ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,300 | cpp | tensorflow/tensorflow | rpc_rendezvous_mgr | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_RPC_RENDEZVOUS_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_RPC_RENDEZVOUS_MGR_H_
#include "tensorflow/core/distributed_runtime/base_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class DeviceMgr;
class RpcRendezvousMgr : public BaseRendezvousMgr {
public:
explicit RpcRendezvousMgr(const WorkerEnv* env);
protected:
tsl::core::RefCountPtr<BaseRemoteRendezvous> Create(
int64_t step_id, const WorkerEnv* worker_env) override;
private:
RpcRendezvousMgr(const RpcRendezvousMgr&) = delete;
void operator=(const RpcRendezvousMgr&) = delete;
};
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class RpcRemoteRendezvous : public BaseRemoteRendezvous {
public:
RpcRemoteRendezvous(const WorkerEnv* env, int64_t step_id)
: BaseRemoteRendezvous(env, step_id) {}
protected:
void RecvFromRemoteAsync(const Rendezvous::ParsedKey& parsed,
const Rendezvous::Args& args,
DoneCallback done) override;
private:
~RpcRemoteRendezvous() override {}
RpcRemoteRendezvous(const RpcRemoteRendezvous&) = delete;
void operator=(const RpcRemoteRendezvous&) = delete;
};
class RpcRecvTensorCall : public BaseRecvTensorCall {
public:
RpcRecvTensorCall() : wi_(nullptr), dst_device_(nullptr) {}
void Init(WorkerInterface* wi, int64_t step_id, StringPiece key,
AllocatorAttributes alloc_attrs, Device* dst_device,
const Rendezvous::Args& recv_args, Rendezvous::DoneCallback done) {
wi_ = wi;
alloc_attrs_ = alloc_attrs;
dst_device_ = dst_device;
recv_args_ = recv_args;
done_ = std::move(done);
req_.set_step_id(step_id);
req_.set_rendezvous_key(key.data(), key.size());
req_.set_request_id(GetUniqueRequestId());
}
void Reset() {
DCHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall::Reset().";
alloc_attrs_ = AllocatorAttributes();
dst_device_ = nullptr;
req_.Clear();
resp_.Clear();
{
mutex_lock l(mu_);
status_ = absl::OkStatus();
}
done_ = nullptr;
}
~RpcRecvTensorCall() override {
CHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall destructor.";
}
void Start(std::function<void()> recv_done) override {
StartRTCall(std::move(recv_done));
}
void StartAbort(const Status& s) override {
{
mutex_lock l(mu_);
status_.Update(s);
}
opts_.StartCancel();
}
Status status() const override {
mutex_lock l(mu_);
return status_;
}
void ReleaseWorker(WorkerCacheInterface* worker_cache) {
DCHECK_NE(static_cast<WorkerInterface*>(nullptr), wi_)
<< "RpcRecvTensorCall::ReleaseWorker() called twice.";
worker_cache->ReleaseWorker(src_worker_, wi_);
wi_ = nullptr;
}
const Tensor& tensor() const { return resp_.tensor(); }
bool is_dead() const { return resp_.metadata().is_dead(); }
Device* dst_device() const { return dst_device_; }
const Rendezvous::Args& recv_args() const { return recv_args_; }
const Rendezvous::DoneCallback& done() const { return done_; }
private:
friend class RpcRemoteRendezvous;
void StartRTCall(std::function<void()> recv_done) {
resp_.InitAlloc(dst_device_, alloc_attrs_);
auto abort_checked = std::make_shared<Notification>();
auto cb = [this, abort_checked,
recv_done = std::move(recv_done)](const Status& s) {
abort_checked->WaitForNotification();
if (!s.ok()) {
mutex_lock l(mu_);
status_.Update(s);
}
recv_done();
};
wi_->RecvTensorAsync(&opts_, &req_, &resp_, std::move(cb));
Status s;
{
mutex_lock l(mu_);
s = status_;
}
if (!s.ok()) {
opts_.StartCancel();
}
abort_checked->Notify();
}
string src_worker_;
string src_rel_device_;
WorkerInterface* wi_;
AllocatorAttributes alloc_attrs_;
Device* dst_device_;
CallOptions opts_;
RecvTensorRequest req_;
TensorResponse resp_;
Rendezvous::Args recv_args_;
Rendezvous::DoneCallback done_;
mutable mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
RpcRecvTensorCall(const RpcRecvTensorCall&) = delete;
void operator=(const RpcRecvTensorCall&) = delete;
};
class RpcRecvTensorFreeList {
public:
RpcRecvTensorFreeList() {}
~RpcRecvTensorFreeList() {
for (size_t i = 0; i < objects_.size(); i++) {
delete objects_[i];
}
}
RpcRecvTensorCall* New() {
{
mutex_lock l(mu_);
if (!objects_.empty()) {
RpcRecvTensorCall* result = objects_.back();
objects_.pop_back();
return result;
}
}
return new RpcRecvTensorCall;
}
void Release(RpcRecvTensorCall* obj) {
obj->Reset();
{
mutex_lock l(mu_);
if (objects_.size() < kMaxObjects) {
objects_.push_back(obj);
return;
}
}
delete obj;
}
private:
static constexpr int kMaxObjects = 1000;
mutex mu_;
std::vector<RpcRecvTensorCall*> objects_ TF_GUARDED_BY(mu_);
};
static RpcRecvTensorFreeList* get_call_freelist() {
static RpcRecvTensorFreeList* call_freelist = new RpcRecvTensorFreeList();
return call_freelist;
}
void RpcRemoteRendezvous::RecvFromRemoteAsync(
const Rendezvous::ParsedKey& parsed, const Rendezvous::Args& recv_args,
DoneCallback done) {
CHECK(is_initialized());
Status s;
RpcRecvTensorCall* call = get_call_freelist()->New();
if (!DeviceNameUtils::SplitDeviceName(parsed.src_device, &call->src_worker_,
&call->src_rel_device_)) {
s = errors::Internal(parsed.src_device,
" is invalid remote source device.");
}
WorkerSession* sess = session();
std::shared_ptr<WorkerCacheInterface> worker_cache =
sess->GetSharedWorkerCache();
WorkerInterface* rwi = worker_cache->GetOrCreateWorker(call->src_worker_);
if (s.ok() && rwi == nullptr) {
s = errors::Internal("No worker known as ", call->src_worker_);
}
Device* dst_device;
if (s.ok()) {
s = sess->device_mgr()->LookupDevice(parsed.dst_device, &dst_device);
}
if (!s.ok()) {
if (rwi != nullptr) {
sess->worker_cache()->ReleaseWorker(call->src_worker_, rwi);
}
get_call_freelist()->Release(call);
done(s, Args(), recv_args, Tensor{}, false);
return;
}
call->Init(rwi, step_id_, parsed.FullKey(), recv_args.alloc_attrs, dst_device,
recv_args, std::move(done));
RegisterCall(call, recv_args);
if (!call->status().ok()) {
DeregisterCall(call, recv_args);
call->ReleaseWorker(sess->worker_cache());
call->done()(call->status(), Args(), Args(), Tensor(), false);
get_call_freelist()->Release(call);
return;
}
Ref();
call->Start([this, call, recv_args, worker_cache]() {
DeregisterCall(call, recv_args);
Status s = call->status();
call->ReleaseWorker(session()->worker_cache());
call->done()(s, Args(), call->recv_args(), call->tensor(), call->is_dead());
get_call_freelist()->Release(call);
Unref();
});
}
}
RpcRendezvousMgr::RpcRendezvousMgr(const WorkerEnv* env)
: BaseRendezvousMgr(env) {}
tsl::core::RefCountPtr<BaseRemoteRendezvous> RpcRendezvousMgr::Create(
int64_t step_id, const WorkerEnv* worker_env) {
return tsl::core::RefCountPtr<BaseRemoteRendezvous>(
new RpcRemoteRendezvous(worker_env, step_id));
}
} | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& s) {
Rendezvous::ParsedKey key;
CHECK(Rendezvous::ParseKey(s, &key).ok());
return key;
}
namespace {
class DummyWorker : public TestWorkerInterface {
public:
void RecvTensorAsync(CallOptions* opts, const RecvTensorRequest* request,
TensorResponse* response, StatusCallback done) override {
SchedClosure([done = std::move(done)]() {
const int64_t t_us = random::New64() % 100 * 1000;
Env::Default()->SleepForMicroseconds(t_us);
done(absl::OkStatus());
});
}
};
class DummyWorkerCache : public WorkerCacheInterface {
void ListWorkers(std::vector<string>* workers) const override {}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (dummy_remote_worker_ == nullptr) {
dummy_remote_worker_ = new DummyWorker;
}
return dummy_remote_worker_;
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
Status GetCoordinationClientCache(
std::unique_ptr<CoordinationClientCache>* coord_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {}
private:
DummyWorker* dummy_remote_worker_ = nullptr;
};
static Device* CreateDevice(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
static DeviceMgr* CreateDeviceMgr() {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:mnist/replica:1/task:2/cpu:1"));
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
return new StaticDeviceMgr(std::move(devices));
}
}
class RpcRendezvousMgrTest : public ::testing::Test {
protected:
RpcRendezvousMgrTest()
: cache_(new DummyWorkerCache),
worker_session_("rpc_session", "/job:mnist/replica:1/task:2",
std::unique_ptr<WorkerCacheInterface>(cache_),
std::unique_ptr<DeviceMgr>(CreateDeviceMgr()),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; }),
rmgr_(&env) {
env.env = Env::Default();
}
DummyWorkerCache* cache_;
WorkerEnv env;
WorkerSession worker_session_;
RpcRendezvousMgr rmgr_;
};
TEST_F(RpcRendezvousMgrTest, LocalSendRecv) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Tensor val(DT_FLOAT);
bool val_dead = false;
TF_ASSERT_OK(rmgr_.RecvLocal(step_id, key, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, LocalAbort) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, rendez = rendez.GetNewRef()]() {
env.env->SleepForMicroseconds(100 * 1000);
rendez->StartAbort(errors::Aborted(""));
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
{
const int64_t step_id = 321;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, step_id]() {
env.env->SleepForMicroseconds(100 * 1000);
rmgr_.Cleanup(step_id);
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
}
TEST_F(RpcRendezvousMgrTest, LocalCancel) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsCancelled(rendez->Recv(key, args, &val, &val_dead)));
n.WaitForNotification();
delete cm;
}
TEST_F(RpcRendezvousMgrTest, CancelAfterReceived) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, rendez = rendez.get(), key, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
TF_ASSERT_OK(rendez->Send(key, Rendezvous::Args(), V("peach"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
n.WaitForNotification();
delete cm;
}
namespace {
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
private:
const int stream_id_;
};
}
TEST_F(RpcRendezvousMgrTest, TransferDummyDeviceContext) {
DummyDeviceContext* dc = new DummyDeviceContext(123);
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Rendezvous::Args args;
args.device_context = dc;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Notification n;
rmgr_.RecvLocalAsync(
step_id, key,
[&n](const Status& s, const Rendezvous::Args send_args,
const Rendezvous::Args recv_args, const Tensor& val,
bool is_dead) {
auto send_dev_context =
static_cast<DummyDeviceContext*>(send_args.device_context);
CHECK_EQ(123, send_dev_context->stream_id());
CHECK_EQ(V(val), "peach");
n.Notify();
});
n.WaitForNotification();
}
rmgr_.Cleanup(step_id);
dc->Unref();
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvOne) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
Tensor val(DT_STRING);
bool val_dead = false;
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvAsyncMany) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
int num_requests = 10000;
Tensor val(DT_STRING);
mutex mu_;
Status status = absl::OkStatus();
BlockingCounter counter(num_requests);
for (int i = 0; i < num_requests; i++) {
rendez->RecvAsync(
key, args,
[&mu_, &status, &counter](const Status& s, const Rendezvous::Args&,
const Rendezvous::Args&, const Tensor&,
const bool) {
{
mutex_lock l(mu_);
status.Update(s);
}
counter.DecrementCount();
});
}
counter.Wait();
TF_ASSERT_OK(status);
}
rmgr_.Cleanup(step_id);
}
} |
1,301 | cpp | tensorflow/tensorflow | grpc_worker_cache | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_WORKER_CACHE_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_WORKER_CACHE_H_
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_client_cq_tag.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
class GrpcWorkerEnv {
public:
GrpcWorkerEnv(size_t num_completion_queues, size_t num_threads);
~GrpcWorkerEnv();
thread::ThreadPool* GetThreadPool() const { return threadpool_.get(); }
size_t CompletionQueueSize() const { return threads_.size(); }
::grpc::CompletionQueue* GetCompletionQueue(size_t index) const {
return threads_.at(index).completion_queue();
}
private:
class GrpcWorkerCacheThread {
public:
GrpcWorkerCacheThread();
~GrpcWorkerCacheThread();
::grpc::CompletionQueue* completion_queue() const {
return &completion_queue_;
}
private:
mutable ::grpc::CompletionQueue completion_queue_;
std::unique_ptr<Thread> thread_;
};
std::unique_ptr<thread::ThreadPool> threadpool_;
std::vector<GrpcWorkerCacheThread> threads_;
};
GrpcWorkerEnv* CreateGrpcWorkerEnv();
WorkerCacheInterface* NewGrpcWorkerCache(std::shared_ptr<GrpcChannelCache> cc,
GrpcWorkerEnv* worker_env);
WorkerCacheInterface* NewGrpcWorkerCacheWithLocalWorker(
std::shared_ptr<GrpcChannelCache> cc, GrpcWorkerEnv* worker_env,
WorkerInterface* local_worker, const string& local_target);
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/rpc/coordination/grpc_coordination_client.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/distributed_runtime/worker_cache_logger.h"
#include "tensorflow/core/distributed_runtime/worker_cache_partial.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
class GrpcWorkerCache : public WorkerCachePartial {
public:
explicit GrpcWorkerCache(std::shared_ptr<GrpcChannelCache> channel_cache,
WorkerInterface* local_worker,
const string& local_target,
GrpcWorkerEnv* worker_env)
: local_target_(local_target),
local_worker_(local_worker),
channel_cache_(channel_cache),
worker_env_(worker_env),
next_round_robin_assignment_(0) {}
void ListWorkers(std::vector<string>* workers) const override {
channel_cache_->ListWorkers(workers);
}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {
channel_cache_->ListWorkersInJob(job_name, workers);
}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (target == local_target_) {
return local_worker_;
} else {
SharedGrpcChannelPtr channel = channel_cache_->FindWorkerChannel(target);
if (!channel) {
return nullptr;
}
size_t index = AssignWorkerToThread(target);
return NewGrpcRemoteWorker(
channel, worker_env_->GetCompletionQueue(index),
worker_env_->GetThreadPool(), &logger_, target);
}
}
void ReleaseWorker(const string& target, WorkerInterface* worker) override {
if (target == local_target_) {
CHECK_EQ(worker, local_worker_)
<< "Releasing a worker that was not returned by this WorkerCache";
} else {
WorkerCacheInterface::ReleaseWorker(target, worker);
}
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
eager_client_cache->reset(eager::NewGrpcEagerClientCache(channel_cache_));
return absl::OkStatus();
}
Status GetCoordinationClientCache(std::unique_ptr<CoordinationClientCache>*
coordination_client_cache) override {
coordination_client_cache->reset(
NewGrpcCoordinationClientCache(channel_cache_));
return absl::OkStatus();
}
void SetLogging(bool v) override { logger_.SetLogging(v); }
void ClearLogs() override { logger_.ClearLogs(); }
bool RetrieveLogs(int64_t step_id, StepStats* ss) override {
return logger_.RetrieveLogs(step_id, ss);
}
private:
size_t AssignWorkerToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(target,
(next_round_robin_assignment_++) %
worker_env_->CompletionQueueSize()))
.first;
}
return it->second;
}
const string local_target_;
WorkerInterface* const local_worker_;
std::shared_ptr<GrpcChannelCache> channel_cache_;
WorkerCacheLogger logger_;
GrpcWorkerEnv* worker_env_;
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
};
}
GrpcWorkerEnv::GrpcWorkerEnv(size_t num_completion_queues, size_t num_threads)
: threadpool_(new thread::ThreadPool(
Env::Default(), ThreadOptions(), "GrpcWorkerEnvQueues", num_threads,
false, nullptr)),
threads_(num_completion_queues) {}
GrpcWorkerEnv::~GrpcWorkerEnv() { threads_.clear(); }
GrpcWorkerEnv::GrpcWorkerCacheThread::GrpcWorkerCacheThread() {
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "GrpcWorkerEnvPool", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
}
}));
}
GrpcWorkerEnv::GrpcWorkerCacheThread::~GrpcWorkerCacheThread() {
completion_queue_.Shutdown();
thread_.reset();
}
GrpcWorkerEnv* CreateGrpcWorkerEnv() {
int num_cpus = port::NumSchedulableCPUs();
int64_t num_completion_queues;
Status status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_QUEUES", 64,
&num_completion_queues);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_QUEUES: " << status;
}
int64_t num_threads;
status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_THREADS", num_cpus,
&num_threads);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_THREADS: " << status;
}
return new GrpcWorkerEnv(num_completion_queues, num_threads);
}
WorkerCacheInterface* NewGrpcWorkerCache(std::shared_ptr<GrpcChannelCache> cc,
GrpcWorkerEnv* worker_env) {
return new GrpcWorkerCache(cc, nullptr, "",
worker_env);
}
WorkerCacheInterface* NewGrpcWorkerCacheWithLocalWorker(
std::shared_ptr<GrpcChannelCache> cc, GrpcWorkerEnv* worker_env,
WorkerInterface* local_worker, const string& local_target) {
return new GrpcWorkerCache(cc, local_worker, local_target, worker_env);
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
TEST(GrpcWorkerCacheTest, NewGrpcWorkerCache) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
std::unique_ptr<WorkerCacheInterface> worker_cache(
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get()));
WorkerInterface* wi;
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:0");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:0", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:1");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:1", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:2");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:2", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:3");
EXPECT_EQ(wi, nullptr);
std::unique_ptr<TestWorkerInterface> local_wi;
worker_cache.reset(NewGrpcWorkerCacheWithLocalWorker(
channel_cache, grpc_worker_env.get(), local_wi.get(), "local_target"));
wi = worker_cache->GetOrCreateWorker("local_target");
EXPECT_EQ(wi, local_wi.get());
}
TEST(GrpcWorkerCacheTest, DestructWorkerCacheInThreadPool) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
WorkerCacheInterface* worker_cache =
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get());
thread::ThreadPool* tp = grpc_worker_env->GetThreadPool();
Notification n;
tp->Schedule([worker_cache, &n] {
delete worker_cache;
n.Notify();
});
n.WaitForNotification();
}
} |
1,302 | cpp | tensorflow/tensorflow | grpc_util | third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_util.cc | third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_util_test.cc | #ifndef XLA_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
#define XLA_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
#include <memory>
#include <string>
#include "grpcpp/grpcpp.h"
#include "grpcpp/support/byte_buffer.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/distributed_runtime_payloads.pb.h"
namespace tsl {
constexpr char kGrpcPayloadsLost[] =
"type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost";
constexpr char kStreamRemovedMessage[] = "Stream removed";
inline bool IsStreamRemovedError(const ::grpc::Status& s) {
return !s.ok() && s.error_code() == ::grpc::StatusCode::UNKNOWN &&
s.error_message() == kStreamRemovedMessage;
}
inline std::string SerializePayloads(const absl::Status& s) {
tensorflow::distributed_runtime::GrpcPayloadContainer container;
s.ForEachPayload([&container](StringPiece key, const absl::Cord& value) {
(*container.mutable_payloads())[std::string(key)] = std::string(value);
});
return container.SerializeAsString();
}
inline void InsertSerializedPayloads(absl::Status& s, std::string payloads) {
tensorflow::distributed_runtime::GrpcPayloadContainer container;
if (container.ParseFromString(payloads)) {
for (const auto& key_val : container.payloads()) {
s.SetPayload(key_val.first, absl::Cord(key_val.second));
}
} else {
s.SetPayload(kGrpcPayloadsLost,
absl::Cord(tensorflow::distributed_runtime::GrpcPayloadsLost()
.SerializeAsString()));
}
}
inline absl::Status FromGrpcStatus(const ::grpc::Status& s) {
if (s.ok()) {
return absl::OkStatus();
} else {
absl::Status converted;
if (IsStreamRemovedError(s)) {
converted =
absl::Status(absl::StatusCode::kUnavailable, s.error_message());
}
converted = absl::Status(static_cast<absl::StatusCode>(s.error_code()),
s.error_message());
InsertSerializedPayloads(converted, s.error_details());
return converted;
}
}
inline ::grpc::Status ToGrpcStatus(const absl::Status& s) {
if (s.ok()) {
return ::grpc::Status::OK;
} else {
if (s.message().size() > 3072 ) {
string scratch = strings::Printf("%.3072s ... [truncated]",
absl::StatusMessageAsCStr(s));
LOG(ERROR) << "Truncated error message: " << s;
return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()), scratch,
SerializePayloads(s));
}
return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()),
std::string(s.message()), SerializePayloads(s));
}
}
typedef std::shared_ptr<::grpc::Channel> SharedGrpcChannelPtr;
::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src,
::grpc::ByteBuffer* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst);
::grpc::Status GrpcMaybeUnparseProto(const string& src,
::grpc::ByteBuffer* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, string* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, tstring* dst);
}
#endif
#include "xla/tsl/distributed_runtime/rpc/grpc_util.h"
#include <algorithm>
#include <vector>
#include "grpcpp/impl/codegen/proto_utils.h"
#include "tsl/platform/protobuf.h"
namespace tsl {
::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src,
grpc::ByteBuffer* dst) {
bool own_buffer;
return ::grpc::SerializationTraits<protobuf::Message>::Serialize(src, dst,
&own_buffer);
}
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst) {
return ::grpc::SerializationTraits<protobuf::Message>::Deserialize(src, dst)
.ok();
}
::grpc::Status GrpcMaybeUnparseProto(const string& src, grpc::ByteBuffer* dst) {
::grpc::Slice s(src.data(), src.size());
::grpc::ByteBuffer buffer(&s, 1);
dst->Swap(&buffer);
return ::grpc::Status::OK;
}
bool GrpcMaybeParseProto(grpc::ByteBuffer* src, string* dst) {
dst->clear();
dst->reserve(src->Length());
std::vector<::grpc::Slice> slices;
if (!src->Dump(&slices).ok()) {
return false;
}
for (const ::grpc::Slice& s : slices) {
dst->append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return true;
}
bool GrpcMaybeParseProto(grpc::ByteBuffer* src, tstring* dst) {
dst->clear();
dst->reserve(src->Length());
std::vector<::grpc::Slice> slices;
if (!src->Dump(&slices).ok()) {
return false;
}
for (const ::grpc::Slice& s : slices) {
dst->append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return true;
}
} | #include "xla/tsl/distributed_runtime/rpc/grpc_util.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include "grpcpp/grpcpp.h"
#include "xla/tsl/distributed_runtime/rpc/test_request.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
using tsl::test::TestRequest;
string ToString(const grpc::ByteBuffer& buf) {
std::vector<grpc::Slice> slices;
CHECK(buf.Dump(&slices).ok());
string result;
for (const grpc::Slice& s : slices) {
result.append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return result;
}
grpc::ByteBuffer MakeBuffer(const string& str, int num_slices) {
std::vector<::grpc::Slice> slices;
const size_t per_slice = (str.size() + num_slices - 1) / num_slices;
for (size_t pos = 0; pos < str.size();) {
const size_t n = std::min(str.size() - pos, per_slice);
slices.emplace_back(&str[pos], n);
pos += n;
}
if (slices.empty()) {
slices.emplace_back();
}
return ::grpc::ByteBuffer(&slices[0], slices.size());
}
TestRequest MakeProto(int size) {
int approx_size = 0;
TestRequest proto;
int index = 0;
while (approx_size < size) {
int item_size = std::min(size - approx_size, 1024);
proto.add_data(string(item_size, 'a' + static_cast<char>(index % 26)));
approx_size += item_size + 3;
index++;
}
return proto;
}
TEST(PayloadSerialization, PayloadsAreTransmitted) {
absl::Status status = errors::InvalidArgument("invalid arg message");
status.SetPayload("a", absl::Cord("\\xFF\\x02\\x03"));
absl::Status status_recovered = FromGrpcStatus(ToGrpcStatus(status));
ASSERT_TRUE(status_recovered.GetPayload("a").has_value());
EXPECT_EQ(status_recovered.GetPayload("a").value(), "\\xFF\\x02\\x03");
}
TEST(PayloadSerialization, PayloadsCorrupted) {
::grpc::Status status(
::grpc::StatusCode::INVALID_ARGUMENT, "invalid arg message",
"string that can not be serialized to the GrpcPayloadContainer proto");
absl::Status converted = FromGrpcStatus(status);
EXPECT_TRUE(converted.GetPayload(kGrpcPayloadsLost).has_value());
}
TEST(GrpcProto, Unparse) {
TestRequest proto;
proto.add_data("hello");
proto.add_data("world");
grpc::ByteBuffer buf;
ASSERT_TRUE(GrpcMaybeUnparseProto(proto, &buf).ok());
TestRequest parsed;
ASSERT_TRUE(parsed.ParseFromString(ToString(buf)));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
TEST(GrpcProto, UnparseToString) {
TestRequest proto;
proto.add_data("hello");
proto.add_data("world");
string str;
CHECK(proto.SerializeToString(&str));
grpc::ByteBuffer buf;
ASSERT_TRUE(GrpcMaybeUnparseProto(str, &buf).ok());
TestRequest parsed;
ASSERT_TRUE(parsed.ParseFromString(ToString(buf)));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
TEST(GrpcProto, Parse) {
struct Case {
int length;
int slices;
};
for (Case c : std::vector<Case>{
{0, 1},
{20, 1},
{100, 1},
{1 << 20, 1},
{100, 5},
{10000, 50},
}) {
TestRequest proto = MakeProto(c.length);
::grpc::ByteBuffer src = MakeBuffer(proto.SerializeAsString(), c.slices);
TestRequest parsed;
ASSERT_TRUE(GrpcMaybeParseProto(&src, &parsed))
<< c.length << " " << c.slices;
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
}
TEST(GrpcProto, ParseFromString) {
struct Case {
int length;
int slices;
};
for (Case c : std::vector<Case>{
{0, 1},
{20, 1},
{100, 1},
{1 << 20, 1},
{100, 5},
{10000, 50},
}) {
TestRequest proto = MakeProto(c.length);
::grpc::ByteBuffer src = MakeBuffer(proto.SerializeAsString(), c.slices);
string parsed_str;
TestRequest parsed;
ASSERT_TRUE(GrpcMaybeParseProto(&src, &parsed_str))
<< c.length << " " << c.slices;
ASSERT_TRUE(parsed.ParseFromString(parsed_str));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
}
static void BM_UnparseGrpc(::testing::benchmark::State& state) {
const int size = state.range(0);
auto proto = MakeProto(size);
for (auto s : state) {
grpc::ByteBuffer buf;
CHECK(GrpcMaybeUnparseProto(proto, &buf).ok());
}
}
BENCHMARK(BM_UnparseGrpc)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
static void BM_UnparseString(::testing::benchmark::State& state) {
const int size = state.range(0);
auto proto = MakeProto(size);
for (auto s : state) {
string buf;
proto.SerializeToString(&buf);
}
}
BENCHMARK(BM_UnparseString)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
static void BM_ParseGrpc(::testing::benchmark::State& state) {
const int size = state.range(0);
const int num_slices = state.range(1);
TestRequest proto = MakeProto(size);
auto buf = MakeBuffer(proto.SerializeAsString(), num_slices);
for (auto s : state) {
CHECK(GrpcMaybeParseProto(&buf, &proto));
}
}
BENCHMARK(BM_ParseGrpc)
->ArgPair(1, 1)
->ArgPair(1 << 10, 1)
->ArgPair(1 << 10, 4)
->ArgPair(1 << 20, 1)
->ArgPair(1 << 20, 4);
static void BM_ParseString(::testing::benchmark::State& state) {
const int size = state.range(0);
TestRequest proto = MakeProto(size);
string serial = proto.SerializeAsString();
for (auto s : state) {
CHECK(proto.ParseFromString(serial));
}
}
BENCHMARK(BM_ParseString)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
}
} |
1,303 | cpp | tensorflow/tensorflow | grpc_tensor_coding | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_TENSOR_CODING_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_TENSOR_CODING_H_
#include "grpcpp/impl/codegen/byte_buffer.h"
namespace tensorflow {
class Tensor;
class RecvTensorResponse;
namespace grpc {
void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
::grpc::ByteBuffer* result);
void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, bool require_ack,
::grpc::ByteBuffer* result);
}
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/io/proto_encode_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace grpc {
void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
::grpc::ByteBuffer* result) {
::grpc::Slice slice(proto.ByteSizeLong());
proto.SerializeWithCachedSizesToArray(
const_cast<uint8*>(reinterpret_cast<const uint8*>(slice.begin())));
::grpc::ByteBuffer tmp(&slice, 1);
result->Swap(&tmp);
}
static int VarLengthEncodingSize(uint32 tag, size_t bytes) {
return core::VarintLength(tag << 3) + core::VarintLength(bytes) + bytes;
}
static int SkeletonEncodingSizeUpperBound(const Tensor& val) {
static const int kVarintMax64 = 10;
const int ndims = val.shape().dims();
return (2 * kVarintMax64) +
(ndims * (4 * kVarintMax64));
}
static void EncodeSkeleton(const Tensor& val, io::ProtoEncodeHelper* e) {
e->WriteUint64(TensorProto::kDtypeFieldNumber, val.dtype());
const int ndims = val.shape().dims();
int tensor_shape_bytes = 0;
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
tensor_shape_bytes +=
2 +
1 +
core::VarintLength(dim_size);
}
if (tensor_shape_bytes > 0) {
e->WriteVarlengthBeginning(TensorProto::kTensorShapeFieldNumber,
tensor_shape_bytes);
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
int64_t dim_varlen = 1 +
core::VarintLength(dim_size);
e->WriteVarlengthBeginning(TensorShapeProto::kDimFieldNumber, dim_varlen);
e->WriteUint64(TensorShapeProto_Dim::kSizeFieldNumber, dim_size);
}
}
#ifndef NDEBUG
{
TensorProto skeleton;
skeleton.set_dtype(val.dtype());
val.shape().AsProto(skeleton.mutable_tensor_shape());
string tensor_except_contents;
skeleton.AppendToString(&tensor_except_contents);
TensorProto skeleton2;
skeleton2.ParseFromString(string(e->data(), e->size()));
string out;
skeleton.AppendToString(&out);
DCHECK_EQ(tensor_except_contents, out) << skeleton.DebugString() << " vs\n"
<< skeleton2.DebugString();
}
#endif
}
void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, bool require_ack,
::grpc::ByteBuffer* result) {
const int kLargeTensorBytes = 1024;
const int64_t kProtoBufLimitBytes = 1LL << 31;
if (val.TotalBytes() > kProtoBufLimitBytes) {
size_t exceeded_bytes = val.TotalBytes() - kProtoBufLimitBytes;
LOG(FATAL) << "Cannot encode a Tensor that exceeds the 2GB protobuf limit. "
"Exceeded bytes: "
<< exceeded_bytes
<< ", tensor shape: " << val.shape().AsProto().DebugString();
}
RecvTensorResponse response;
if (is_dead) {
response.set_is_dead(is_dead);
}
response.set_require_ack(require_ack);
response.set_send_start_micros(Env::Default()->NowMicros());
if (!DataTypeCanUseMemcpy(val.dtype())) {
val.AsProtoTensorContent(response.mutable_tensor());
EncodeRecvTensorResponseToByteBuffer(response, result);
} else {
gtl::InlinedVector<char, 128> skeleton(SkeletonEncodingSizeUpperBound(val));
io::ProtoEncodeHelper e_skeleton(skeleton.data(), skeleton.size());
EncodeSkeleton(val, &e_skeleton);
StringPiece tdata = val.tensor_data();
uint32 overall_tensor_proto_bytesize =
(e_skeleton.size() +
VarLengthEncodingSize(TensorProto::kTensorContentFieldNumber,
tdata.size()));
string header;
response.AppendToString(&header);
size_t expected_size =
(header.size() +
VarLengthEncodingSize(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize));
bool share_tensor_slice_memory = (tdata.size() > kLargeTensorBytes);
size_t encoder_size = expected_size - tdata.size();
gtl::InlinedVector<char, 1024> space(encoder_size);
io::ProtoEncodeHelper e(space.data(), space.size());
e.WriteRawBytes(header);
e.WriteVarlengthBeginning(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize);
e.WriteRawBytes(StringPiece(e_skeleton.data(), e_skeleton.size()));
e.WriteVarlengthBeginning(TensorProto::kTensorContentFieldNumber,
tdata.size());
::grpc::Slice slices[2];
int num_slices = 0;
{
size_t slice_len =
e.size() + (share_tensor_slice_memory ? 0 : tdata.size());
slices[0] = ::grpc::Slice(slice_len);
memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size());
if (!share_tensor_slice_memory) {
memcpy(const_cast<uint8_t*>(slices[0].begin()) + e.size(), tdata.data(),
tdata.size());
}
num_slices += 1;
}
if (share_tensor_slice_memory) {
const TensorBuffer* buf = DMAHelper::buffer(&val);
buf->Ref();
slices[1] = ::grpc::Slice(
const_cast<void*>(static_cast<const void*>(tdata.data())),
tdata.size(),
[](void* backing) { static_cast<TensorBuffer*>(backing)->Unref(); },
const_cast<TensorBuffer*>(buf));
num_slices += 1;
}
size_t total_bytes = 0;
for (int i = 0; i < num_slices; i++) {
total_bytes += slices[i].size();
}
CHECK_EQ(total_bytes, expected_size);
::grpc::ByteBuffer tmp(&slices[0], num_slices);
result->Swap(&tmp);
}
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class GrpcTensorCodingTest : public ::testing::Test {
public:
void Validate(const Tensor& t, bool is_dead) {
::grpc::ByteBuffer buf;
grpc::EncodeTensorToByteBuffer(is_dead, t, false, &buf);
std::vector<::grpc::Slice> slices;
(void)buf.Dump(&slices);
string tmp;
for (const auto& s : slices) {
tmp.append(reinterpret_cast<const char*>(s.begin()), s.size());
}
RecvTensorResponse response;
EXPECT_TRUE(response.ParseFromString(tmp));
EXPECT_EQ(response.is_dead(), is_dead);
Tensor result_tensor;
EXPECT_TRUE(result_tensor.FromProto(response.tensor()));
EXPECT_EQ(t.dtype(), result_tensor.dtype());
EXPECT_EQ(t.shape().DebugString(), result_tensor.shape().DebugString());
EXPECT_EQ(t.DebugString(), result_tensor.DebugString());
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
gtl::InlinedVector<tstring, 4> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(GrpcTensorCodingTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(GrpcTensorCodingTest, StringTensor) { DoTestForStrings(DT_STRING); }
} |
1,304 | cpp | tensorflow/tensorflow | grpc_eager_client | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_CLIENT_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_EAGER_GRPC_EAGER_CLIENT_H_
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
namespace tensorflow {
namespace eager {
EagerClientCache* NewGrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> channel);
}
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include <cstdint>
#include <string>
#include "grpcpp/generic/generic_stub.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_client_cq_tag.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace eager {
namespace {
bool EnableStreaming() {
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
return result;
}
class GrpcEagerClientThread : public core::RefCounted {
public:
GrpcEagerClientThread() {
Ref();
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "eager_client_thread", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
VLOG(4) << "GrpcEagerClientThread got next tag";
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
VLOG(4) << "GrpcEagerClientThread blocking for next tag";
if (RefCountIsOne()) {
break;
}
}
VLOG(4) << "GrpcEagerClientThread exiting";
completion_queue_.Shutdown();
Env::Default()->SchedClosure([this]() { this->Unref(); });
}));
}
~GrpcEagerClientThread() override {}
::grpc::CompletionQueue* completion_queue() { return &completion_queue_; }
private:
::grpc::CompletionQueue completion_queue_;
std::unique_ptr<Thread> thread_;
};
class GrpcEagerClient : public EagerClient {
public:
GrpcEagerClient(const tensorflow::SharedGrpcChannelPtr& channel,
GrpcEagerClientThread* thread, const string& target)
: stub_(channel), thread_(thread), target_(target) {
thread_->Ref();
cq_ = thread->completion_queue();
}
~GrpcEagerClient() override { thread_->Unref(); }
bool allow_multiple_pending_requests() const override {
return EnableStreaming();
}
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), nullptr, \
nullptr, 0, true, \
&target_); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
#undef CLIENT_METHOD
#define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done, \
int64_t init_timeout_in_ms, int retries) override { \
CallOptions* call_ops = nullptr; \
StatusCallback done_wrapped; \
if (init_timeout_in_ms > 0) { \
call_ops = new CallOptions; \
call_ops->SetTimeout(init_timeout_in_ms); \
auto new_done = [call_ops, done = std::move(done)](const Status& s) { \
done(s); \
delete call_ops; \
}; \
done_wrapped = callback_wrapper(new_done); \
} else { \
done_wrapped = callback_wrapper(std::move(done)); \
} \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_ops, nullptr, \
retries, true, &target_); \
}
CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext);
#undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES
#define CLIENT_CANCELABLE_METHOD(method) \
void method##Async(CallOptions* call_opts, const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_opts, nullptr, \
0, true, &target_); \
}
CLIENT_CANCELABLE_METHOD(Enqueue);
CLIENT_CANCELABLE_METHOD(RunComponentFunction);
#undef CLIENT_CANCELABLE_METHOD
void CloseContextAsync(const CloseContextRequest* request,
CloseContextResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
new RPCState<protobuf::Message>(
&stub_, cq_, "/tensorflow.eager.EagerService/CloseContext", *request,
response, std::move(done_wrapped), nullptr,
nullptr, 0, true,
&target_);
VLOG(1) << "Sending RPC to close remote eager context "
<< request->DebugString();
mutex_lock l(mu_);
const auto& it = enqueue_dispatchers_.find(request->context_id());
if (it != enqueue_dispatchers_.end()) {
it->second.CancelCall();
enqueue_dispatchers_.erase(it);
} else if (EnableStreaming()) {
LOG(ERROR) << "Remote EagerContext with id " << request->context_id()
<< " does not seem to exist.";
}
}
void StreamingEnqueueAsync(bool enable_streaming_enqueue,
CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
if (EnableStreaming() && enable_streaming_enqueue) {
mutex_lock l(mu_);
auto it = enqueue_dispatchers_.find(request->context_id());
if (it == enqueue_dispatchers_.end()) {
auto it_and_bool = enqueue_dispatchers_.emplace(
std::piecewise_construct,
std::forward_as_tuple(request->context_id()),
std::forward_as_tuple(
&stub_, cq_,
"/tensorflow.eager.EagerService/StreamingEnqueue"));
it = it_and_bool.first;
}
it->second.SendNextRequest(*request, response, std::move(done_wrapped));
} else {
Notification n;
Status status;
EnqueueAsync(call_opts, request, response,
[&n, &status](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
done_wrapped(status);
}
}
private:
::grpc::GenericStub stub_;
const GrpcEagerClientThread* thread_;
const string target_;
::grpc::CompletionQueue* cq_;
mutable mutex mu_;
std::unordered_map<uint64, StreamingRPCDispatcher<EnqueueResponse>>
enqueue_dispatchers_ TF_GUARDED_BY(mu_);
StatusCallback callback_wrapper(StatusCallback done) {
Ref();
return [this, done = std::move(done)](const Status& status) {
done(status);
this->Unref();
if (TF_PREDICT_FALSE(!status.ok())) {
auto error_source_payload = status.GetPayload(kErrorSource);
if (error_source_payload.has_value()) {
tensorflow::core::platform::ErrorSourceProto error_source_proto;
error_source_proto.ParseFromString(
std::string(*error_source_payload));
metrics::UpdateEagerClientErrorCounter(
error_source_proto.ErrorSource_Name(
error_source_proto.error_source()),
absl::StatusCodeToString(status.code()));
} else {
metrics::UpdateEagerClientErrorCounter(
"unknown", absl::StatusCodeToString(status.code()));
}
}
};
}
};
class GrpcEagerClientCache : public EagerClientCache {
public:
explicit GrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> cache)
: next_round_robin_assignment_(0), cache_(cache), threads_(4) {
for (int i = 0, end = threads_.size(); i < end; i++) {
threads_[i].reset(new GrpcEagerClientThread());
}
}
~GrpcEagerClientCache() override { threads_.clear(); }
Status GetClient(const string& target,
core::RefCountPtr<EagerClient>* client) override {
mutex_lock l(clients_mu_);
auto it = clients_.find(target);
if (it == clients_.end()) {
tensorflow::SharedGrpcChannelPtr shared =
cache_->FindWorkerChannel(target);
if (shared == nullptr) {
return errors::InvalidArgument("Client for target ", target,
" not found.");
}
int assigned_index = AssignClientToThread(target);
GrpcEagerClientThread* thread = threads_[assigned_index].get();
core::RefCountPtr<EagerClient> worker(
new GrpcEagerClient(shared, thread, target));
it = clients_.emplace(target, std::move(worker)).first;
}
it->second->Ref();
client->reset(it->second.get());
return absl::OkStatus();
}
private:
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
size_t AssignClientToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(
target, (next_round_robin_assignment_++) % threads_.size()))
.first;
}
return it->second;
}
std::shared_ptr<tensorflow::GrpcChannelCache> cache_;
mutable mutex clients_mu_;
std::unordered_map<string, core::RefCountPtr<EagerClient>> clients_
TF_GUARDED_BY(clients_mu_);
std::vector<core::RefCountPtr<GrpcEagerClientThread>> threads_;
};
}
EagerClientCache* NewGrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> channel) {
return new GrpcEagerClientCache(channel);
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace eager {
TEST(GrpcEagerClientCache, TestGetClientThreadSafety) {
GrpcChannelSpec spec;
TF_ASSERT_OK(spec.AddHostPortsJob("worker", {{0, "a:1"},
{1, "b:2"},
{2, "c:3"},
{3, "d:4"},
{4, "e:5"},
{5, "f:6"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<EagerClientCache> client_cache(
NewGrpcEagerClientCache(channel_cache));
const int num_calls = 10;
BlockingCounter counter(num_calls);
for (int i = 0; i < num_calls; i++) {
Env::Default()->SchedClosure([&client_cache, i, &counter]() {
string target = strings::StrCat("/job:worker/replica:0/task:", i);
core::RefCountPtr<EagerClient> eager_client;
Status s = client_cache->GetClient(target, &eager_client);
error::Code expected_code = i <= 5 ? error::OK : error::INVALID_ARGUMENT;
EXPECT_EQ(expected_code, s.code());
counter.DecrementCount();
});
}
counter.Wait();
}
}
} |
1,305 | cpp | tensorflow/tensorflow | coordination_service_barrier_proxy | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.cc | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy_test.cc | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_BARRIER_PROXY_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COORDINATION_COORDINATION_SERVICE_BARRIER_PROXY_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tsl/protobuf/coordination_service.pb.h"
namespace tensorflow {
class BarrierProxy {
public:
BarrierProxy(const BarrierProxy&) = delete;
void operator=(const BarrierProxy&) = delete;
BarrierProxy(tsl::CoordinationServiceAgent* agent,
std::vector<CoordinatedTask> tasks, int num_local_threads,
absl::string_view key, absl::Duration timeout)
: key_(key),
agent_(agent),
tasks_(std::move(tasks)),
timeout_(timeout),
num_local_threads_(num_local_threads) {}
~BarrierProxy() = default;
std::pair<Status, bool> Wait();
private:
const std::string key_;
tsl::CoordinationServiceAgent* agent_;
const std::vector<CoordinatedTask> tasks_;
absl::Duration timeout_;
mutex mu_;
condition_variable cv_ TF_GUARDED_BY(mu_);
const int num_local_threads_;
int num_entered_ TF_GUARDED_BY(mu_) = 0;
int num_to_exit_ TF_GUARDED_BY(mu_) = 0;
Status status_ TF_GUARDED_BY(mu_);
bool status_set_ TF_GUARDED_BY(mu_) = false;
};
class BarrierProxyManager {
public:
BarrierProxyManager(const BarrierProxyManager&) = delete;
void operator=(const BarrierProxyManager&) = delete;
BarrierProxyManager() = default;
~BarrierProxyManager() = default;
Status Wait(tsl::CoordinationServiceAgent* agent,
const std::vector<CoordinatedTask>& tasks, int num_local_threads,
absl::string_view key, absl::Duration timeout);
size_t size() const;
private:
mutable mutex mu_;
absl::flat_hash_map<std::string, std::shared_ptr<BarrierProxy>> barriers_
TF_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
#include "tsl/protobuf/coordination_service.pb.h"
namespace tensorflow {
std::pair<Status, bool> BarrierProxy::Wait() {
mutex_lock l(mu_);
if (status_set_) {
return std::make_pair(
absl::FailedPreconditionError(absl::StrCat(
"The barrier has already passed or timed out. key=", key_)),
false);
}
if (num_entered_ >= num_local_threads_) {
return std::make_pair(absl::FailedPreconditionError(absl::StrCat(
"Wait() called too many (>", num_local_threads_,
") times. key=", key_)),
false);
}
++num_entered_;
++num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit_=" << num_to_exit_;
if (num_entered_ == num_local_threads_) {
if (tasks_.size() != 1) {
tsl::profiler::TraceMe traceme("BarrierProxy::Wait::WaitAtBarrier");
status_ = agent_->WaitAtBarrier(key_, timeout_, tasks_);
} else {
status_ = absl::OkStatus();
}
status_set_ = true;
cv_.notify_all();
} else if (WaitForMilliseconds(&l, &cv_, timeout_ / absl::Milliseconds(1)) ==
kCond_Timeout) {
if (!status_set_) {
if (tasks_.size() != 1) {
agent_->CancelBarrier(key_).IgnoreError();
}
status_ = absl::DeadlineExceededError(
absl::StrCat("BarrierProxy timeout: key=", key_));
status_set_ = true;
cv_.notify_all();
}
} else {
CHECK(status_set_);
}
--num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit=" << num_to_exit_;
return std::make_pair(status_, num_to_exit_ == 0);
}
size_t BarrierProxyManager::size() const {
mutex_lock l(mu_);
return barriers_.size();
}
Status BarrierProxyManager::Wait(tsl::CoordinationServiceAgent* agent,
const std::vector<CoordinatedTask>& tasks,
int num_local_threads, absl::string_view key,
absl::Duration timeout) {
if (tasks.size() == 1 && num_local_threads <= 1) return absl::OkStatus();
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"BarrierProxyManager::Wait",
{
{"num_tasks", tasks.size()},
{"num_local_threads", num_local_threads},
});
});
std::shared_ptr<BarrierProxy> barrier;
{
mutex_lock l(mu_);
auto [iter, inserted] = barriers_.try_emplace(key);
if (inserted) {
iter->second = std::make_shared<BarrierProxy>(
agent, tasks, num_local_threads, key, timeout);
VLOG(1) << "BarrierProxy key=" << key << " created.";
}
barrier = iter->second;
}
CHECK(barrier);
auto [status, last_exit] = barrier->Wait();
if (last_exit) {
mutex_lock l(mu_);
barriers_.erase(key);
VLOG(1) << "BarrierProxy key=" << key << " removed.";
}
return status;
}
} | #include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tsl/protobuf/coordination_config.pb.h"
#include "tsl/protobuf/coordination_service.pb.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::Return;
using tsl::CallOptions;
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
class MockCoordinationServiceAgent : public CoordinationServiceAgent {
public:
MOCK_METHOD(Status, WaitAtBarrier,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks),
(override));
MOCK_METHOD(Status, CancelBarrier, (std::string_view barrier_id), (override));
MOCK_METHOD(Status, Initialize,
(Env * env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(Status, Initialize,
(Env * env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(bool, IsInitialized, (), (override));
MOCK_METHOD(bool, IsConnected, (), (override));
MOCK_METHOD(bool, IsError, (), (override));
MOCK_METHOD(Status, Connect, (), (override));
MOCK_METHOD(Status, WaitForAllTasks, (const DeviceInfo& local_devices),
(override));
MOCK_METHOD(const DeviceInfo&, GetClusterDeviceInfo, (), (override));
MOCK_METHOD(absl::StatusOr<CoordinatedTask>, GetOwnTask, (), (override));
MOCK_METHOD(absl::StatusOr<std::vector<CoordinatedTaskStateInfo>>,
GetTaskState, (const std::vector<CoordinatedTask>& task),
(override));
MOCK_METHOD(Status, ReportError, (const Status& error), (override));
MOCK_METHOD(Status, Shutdown, (), (override));
MOCK_METHOD(Status, Reset, (), (override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue, (std::string_view key),
(override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue,
(std::string_view key, absl::Duration timeout), (override));
MOCK_METHOD(std::shared_ptr<CallOptions>, GetKeyValueAsync,
(std::string_view key, StatusOrValueCallback done), (override));
MOCK_METHOD(absl::StatusOr<std::string>, TryGetKeyValue,
(std::string_view key), (override));
MOCK_METHOD(absl::StatusOr<std::vector<KeyValueEntry>>, GetKeyValueDir,
(std::string_view key), (override));
MOCK_METHOD(void, GetKeyValueDirAsync,
(std::string_view key, StatusOrValueDirCallback done),
(override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value,
bool allow_overwrite),
(override));
MOCK_METHOD(Status, DeleteKeyValue, (std::string_view key), (override));
MOCK_METHOD(Status, UpdateKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, StartWatchKey,
(std::string_view key, ChangedKeyValuesCallback on_change),
(override));
MOCK_METHOD(Status, StopWatchKey, (std::string_view key), (override));
MOCK_METHOD(void, WaitAtBarrierAsync,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks, StatusCallback done),
(override));
MOCK_METHOD(void, CancelBarrierAsync,
(std::string_view barrier_id, StatusCallback done), (override));
MOCK_METHOD(absl::StatusOr<Env*>, GetEnv, (), (override));
MOCK_METHOD(void, SetError, (const Status& error), (override));
MOCK_METHOD(Status, ActivateWatch,
(std::string_view key,
(const std::map<std::string, std::string>&)),
(override));
};
constexpr auto kTestKey = "test_key";
constexpr auto kTestTimeout = absl::Seconds(1);
const int kThreadPoolSize = 32;
void TestBarrierProxyWait(
int num_tasks, int num_threads_planned, int num_threads_entered,
int expected_ok_count, std::optional<Status> agent_wait_status,
std::optional<Status> expected_same_exit_status_for_all_threads) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks(num_tasks);
BarrierProxy barrier(agent.get(), tasks, num_threads_planned, kTestKey,
kTestTimeout);
std::atomic<int> last_exit_count = 0;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
} else {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _)).Times(0);
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
auto [status, last_exit] = barrier.Wait();
if (expected_same_exit_status_for_all_threads.has_value()) {
ASSERT_EQ(status, expected_same_exit_status_for_all_threads.value());
}
actual_ok_count += status.ok();
last_exit_count += last_exit;
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(last_exit_count, 1);
}
TEST(BarrierProxyTest, AllThreadsExitBarrier) {
TestBarrierProxyWait(
2,
8,
8,
8,
absl::OkStatus(),
absl::OkStatus());
}
TEST(BarrierProxyTest, AgentErrorBroadcastedToAllThreads) {
TestBarrierProxyWait(
2,
8,
8,
0,
errors::Internal(""),
errors::Internal(""));
}
TEST(BarrierProxyTest, AgentIsIgnoredIfThereIsOnlyOneTask) {
TestBarrierProxyWait(
1,
8,
8,
8,
{},
absl::OkStatus());
}
TEST(BarrierProxyTest, TimeoutIfNotEnoughThreadEntered) {
TestBarrierProxyWait(
2,
8,
7,
0,
{},
errors::DeadlineExceeded("BarrierProxy timeout: key=", kTestKey));
}
TEST(BarrierProxyTest, ExtraThreadsEnteringTheBarrierGetErrors) {
TestBarrierProxyWait(
2,
8,
10,
8,
absl::OkStatus(),
{});
}
void TestBarrierProxyManagerWaitSingleKey(
int num_threads_planned, int num_threads_entered,
std::optional<Status> agent_wait_status, int expected_ok_count) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
num_threads_planned);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
actual_ok_count += mgr.Wait(agent.get(), tasks, num_threads_planned,
kTestKey, kTestTimeout)
.ok();
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(mgr.size(), 0);
}
TEST(BarrierProxyManagerTest, AllThreadExited) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, AllThreadTimedOut) {
TestBarrierProxyManagerWaitSingleKey(
8,
7,
{},
0);
}
TEST(BarrierProxyManagerTest, CoordinationServiceError) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
errors::Internal(""),
0);
}
TEST(BarrierProxyManagerTest, ExtraThreadsEnteringTheSameKeyGetErrors) {
TestBarrierProxyManagerWaitSingleKey(
8,
10,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, DifferentKeysDoNotInterfereWithEachOther) {
constexpr int kNumThreads = 8;
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
EXPECT_CALL(*agent, WaitAtBarrier("key0", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*agent, WaitAtBarrier("key1", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < kNumThreads * 2; ++i) {
pool.Schedule([&, key = absl::StrCat("key", i % 2)]() {
ASSERT_EQ(mgr.Wait(agent.get(), tasks, kNumThreads, key, kTestTimeout),
absl::OkStatus());
});
}
}
}
}
} |
1,306 | cpp | tensorflow/tensorflow | runtime_client | tensorflow/core/function/runtime_client/runtime_client.cc | tensorflow/core/function/runtime_client/runtime_client_test.cc | #ifndef TENSORFLOW_CORE_FUNCTION_RUNTIME_CLIENT_RUNTIME_CLIENT_H_
#define TENSORFLOW_CORE_FUNCTION_RUNTIME_CLIENT_RUNTIME_CLIENT_H_
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace core {
namespace function {
struct OpaqueTfgGraphFuncOp;
struct OpaqueTfFuncOp;
EagerContext& GlobalPythonEagerContext();
EagerContext& GlobalEagerContext();
using ReturnValues = std::vector<ImmediateTensorHandlePtr>;
class Runtime {
public:
explicit Runtime(EagerContext& eager_ctx) : eager_ctx_(eager_ctx) {}
enum class Dialect {
TFG,
TF,
};
absl::StatusOr<FunctionDef> GetFunctionProto(StringPiece name);
Status CreateFunction(const FunctionDef& fdef);
Status CreateFunction(OpaqueTfgGraphFuncOp* fop);
Status CreateFunction(OpaqueTfFuncOp* fop);
Status TransformFunction(StringPiece name, StringPiece pipeline_name,
Dialect dialect = Dialect::TFG);
absl::StatusOr<ReturnValues> CallFunction(
StringPiece name, absl::Span<AbstractTensorHandle* const> args);
private:
EagerContext& eager_ctx_;
};
}
}
}
#endif
#include "tensorflow/core/function/runtime_client/runtime_client.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#if !defined(DISABLE_MLIR)
#include "tensorflow/compiler/mlir/python/mlir.h"
#endif
#include "tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace core {
namespace function {
EagerContext& GlobalEagerContext() {
static EagerContext* global_ctx = []() {
SessionOptions opts;
std::vector<std::unique_ptr<Device>> devices;
Status&& device_init_status = DeviceFactory::AddDevices(
opts, "/job:localhost/replica:0/task:0", &devices);
CHECK(device_init_status.ok());
return new EagerContext(
opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false,
new DynamicDeviceMgr(std::move(devices)),
true,
nullptr,
nullptr,
nullptr,
true);
}();
return *global_ctx;
}
EagerContext& GlobalPythonEagerContext() {
EagerContext* ctx = reinterpret_cast<EagerContext*>(GetCEagerContext());
DCHECK(ctx) << "The Python eager context must be initialized first.";
return *ctx;
}
absl::StatusOr<FunctionDef> Runtime::GetFunctionProto(StringPiece name) {
EagerContext& ctx = this->eager_ctx_;
const FunctionDef* f = ctx.FindFunctionDef(std::string(name));
if (f == nullptr) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Could not find an attribute for key ", name));
}
return *f;
}
Status Runtime::CreateFunction(const FunctionDef& fdef) {
const auto& fname = fdef.signature().name();
if (this->eager_ctx_.FindFunctionByName(fname)) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(this->eager_ctx_.RemoveFunction(fname),
"removing function ", fname);
}
return this->eager_ctx_.AddFunctionDef(fdef);
}
Status Runtime::CreateFunction(OpaqueTfgGraphFuncOp* fop) {
mlir::tfg::GraphFuncOp fop_proper =
*reinterpret_cast<mlir::tfg::GraphFuncOp*>(fop);
return mlir::tfg::ConvertToFunctionDef(fop_proper,
*this->eager_ctx_.FuncLibDef());
}
Status Runtime::CreateFunction(OpaqueTfFuncOp* fop) {
mlir::func::FuncOp fop_proper = *reinterpret_cast<mlir::func::FuncOp*>(fop);
const auto& fname = fop_proper.getName().str();
GraphExportConfig config;
FunctionDef fdef;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(fop_proper, config,
&fdef),
"creating function ", fname);
return CreateFunction(fdef);
}
Status Runtime::TransformFunction(StringPiece name, StringPiece pipeline_name,
Dialect dialect) {
mlir::MLIRContext ctx;
mlir::PassManager pm(&ctx);
std::string error;
llvm::raw_string_ostream error_stream(error);
if (mlir::failed(mlir::parsePassPipeline(std::string(pipeline_name), pm,
error_stream))) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("locating pass pipeline ", pipeline_name, ": ",
error_stream.str()));
}
auto fn = GetFunctionProto(name);
TF_RETURN_WITH_CONTEXT_IF_ERROR(fn.status(), "loading function ", name);
GraphDef graph;
*graph.mutable_library()->add_function() = *fn;
tensorflow::GraphDebugInfo debug_info;
if (dialect == Dialect::TFG) {
auto mlir_fn = mlir::tfg::ImportGraphDef(&ctx, debug_info, graph);
TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ",
name);
mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx);
if (failed(pm.run(mlir_fn->get()))) {
return diagnostics_handler.Combine(
Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("running pass pipeline ", pipeline_name, ": ")));
}
for (auto fn : mlir_fn->get().getBody()->getOps<mlir::tfg::GraphFuncOp>()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CreateFunction(reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fn)),
absl::StrCat("updating function ", fn.getName().str()));
}
return absl::OkStatus();
}
if (dialect == Dialect::TF) {
Status status;
FunctionLibraryDefinition& flib_def = *this->eager_ctx_.FuncLibDef();
std::unique_ptr<FunctionBody> fbody;
status = FunctionDefToBodyHelper(*fn, AttrSlice(), &flib_def, &fbody);
TF_RETURN_WITH_CONTEXT_IF_ERROR(status, "importing function ", name);
auto mlir_fn = ConvertFunctionToMlir(fbody.get(), flib_def, &ctx);
TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ",
name);
mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx);
if (failed(pm.run(mlir_fn->get()))) {
return diagnostics_handler.Combine(
Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("running pass pipeline ", pipeline_name, ": ")));
}
for (auto fn : mlir_fn->get().getBody()->getOps<mlir::func::FuncOp>()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CreateFunction(reinterpret_cast<OpaqueTfFuncOp*>(&fn)),
absl::StrCat("updating function ", fn.getName().str()));
}
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Unsupported dialect: ", dialect,
". Supported dialects are Dialect::TFG and Dialect::TF."));
}
absl::StatusOr<ReturnValues> Runtime::CallFunction(
StringPiece name, absl::Span<AbstractTensorHandle* const> args) {
EagerContext& ctx = this->eager_ctx_;
ImmediateOpPtr op(ctx.CreateOperation());
TF_RETURN_WITH_CONTEXT_IF_ERROR(op->Reset(name.data(), nullptr),
"initializing call op for ", name);
TF_RETURN_WITH_CONTEXT_IF_ERROR(op->AddInputList(args),
"preparing call args for ", name);
const FunctionDef* fn_def = ctx.GetFunctionDef(string(name));
int num_retvals = fn_def->signature().output_arg_size();
int actual_retvals = num_retvals;
std::vector<ImmediateExecutionTensorHandle*> retvals(num_retvals);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
op->Execute(absl::MakeSpan(
reinterpret_cast<AbstractTensorHandle**>(retvals.data()),
num_retvals),
&actual_retvals),
"executing call op for ", name);
DCHECK(num_retvals == actual_retvals);
ReturnValues final_returns;
for (const auto& r : retvals) {
final_returns.emplace_back(ImmediateTensorHandlePtr(r));
}
return final_returns;
}
}
}
} | #include "tensorflow/core/function/runtime_client/runtime_client.h"
#include <stdint.h>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/function/testing/test_pass.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace function {
namespace {
EagerContextPtr TestingEagerCtx() {
SessionOptions opts;
std::vector<std::unique_ptr<Device>> devices;
Status&& device_init_status = DeviceFactory::AddDevices(
opts, "/job:localhost/replica:0/task:0", &devices);
CHECK(device_init_status.ok());
return EagerContextPtr(new EagerContext(
opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false,
new DynamicDeviceMgr(std::move(devices)),
true,
nullptr,
nullptr,
nullptr,
true));
}
int IntValue(ImmediateExecutionTensorHandle& h) {
Status status;
AbstractTensorPtr t(h.Resolve(&status));
DCHECK(status.ok());
switch (h.DataType()) {
case DT_INT32:
return *(static_cast<int32_t*>(t->Data()));
case DT_INT64:
return *(static_cast<int64_t*>(t->Data()));
default:
DCHECK(false) << "invalid data type";
return 0;
}
}
ImmediateTensorHandlePtr IntScalarTensor(EagerContext& ctx, int value) {
AbstractTensorPtr tensor(ctx.CreateInt32Scalar(value));
ImmediateTensorHandlePtr handle(ctx.CreateLocalHandle(tensor.get()));
return handle;
}
FunctionDef MakeNullaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: 'NullaryFunction'
output_arg { name: 'o' type: DT_INT32 }
}
node_def {
name: 'retval'
op: 'Const'
attr {
key: 'dtype'
value { type: DT_INT32 }
}
attr {
key: 'value'
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
ret { key: 'o' value: 'retval:output' })pb",
&fd));
return fd;
}
FunctionDef MakeUnaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "UnaryFunction"
input_arg { name: "x" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "ret"
op: "Identity"
input: "x"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
FunctionDef MakeBinaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "BinaryFunction"
input_arg { name: "x" type: DT_INT32 }
input_arg { name: "y" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "x_plus_y"
op: "AddV2"
input: "x"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "ret"
op: "Identity"
input: "x_plus_y:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
FunctionDef MakeMultiplyFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "MultiplyFunction"
input_arg { name: "x" type: DT_INT32 }
input_arg { name: "y" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "x_times_y"
op: "Mul"
input: "x"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "ret"
op: "Identity"
input: "x_times_y:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
TEST(GlobalContext, Basic) {
Runtime rt(GlobalEagerContext());
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, Call) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, GetRoundtrip) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<FunctionDef> fdef_ret = rt.GetFunctionProto("NullaryFunction");
TF_ASSERT_OK(fdef_ret.status());
FunctionDef fdef = *fdef_ret;
fdef.mutable_signature()->set_name("SecondFunction");
TF_ASSERT_OK(rt.CreateFunction(fdef));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("SecondFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, MlirFromGraphDef) {
mlir::MLIRContext mctx;
mctx.getOrLoadDialect<mlir::tfg::TFGraphDialect>();
auto m = mlir::parseSourceString<mlir::ModuleOp>(
R"mlir(
module {
tfg.func @NullaryFunction()
-> (tensor<i32> {tfg.dtype = i32, tfg.name = "o"})
{
%Const, %ctl = Const name("retval") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>)
return(%Const) : tensor<i32>
}
}
)mlir",
&mctx);
mlir::tfg::GraphFuncOp fop =
*m->getBody()->op_begin<mlir::tfg::GraphFuncOp>();
EagerContextPtr ectx = TestingEagerCtx();
Runtime rt(*ectx);
OpaqueTfgGraphFuncOp* opaque_fop =
reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fop);
TF_ASSERT_OK(rt.CreateFunction(opaque_fop));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Nullary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Unary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeUnaryFunction()));
auto x = IntScalarTensor(*ctx, 1);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("UnaryFunction", {x.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Binary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
auto x = IntScalarTensor(*ctx, 1);
auto y = IntScalarTensor(*ctx, 1);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 2);
}
TEST(TransformTest, TestPassOnBinaryFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass"));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 6);
}
TEST(TransformTest, TestPassOnMultiplyFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeMultiplyFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("MultiplyFunction", "test-pass-tf-dialect",
Runtime::Dialect::TF));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("MultiplyFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 5);
}
TEST(TransformTest, TestMixedPassesOnBinaryFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass"));
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass-tf-dialect",
Runtime::Dialect::TF));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 5);
}
}
}
}
} |
1,307 | cpp | tensorflow/tensorflow | graph_partition | tensorflow/core/graph/graph_partition.cc | tensorflow/core/graph/graph_partition_test.cc | #ifndef TENSORFLOW_CORE_GRAPH_GRAPH_PARTITION_H_
#define TENSORFLOW_CORE_GRAPH_GRAPH_PARTITION_H_
#include <functional>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/costmodel.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
struct PartitionOptions {
typedef std::function<string(const Node*)> NodeToLocFunc;
NodeToLocFunc node_to_loc = nullptr;
typedef std::function<string(const string&)> NewNameFunc;
NewNameFunc new_name = nullptr;
static constexpr uint64 kIllegalIncarnation = 0;
typedef std::function<uint64(const string&)> GetIncarnationFunc;
GetIncarnationFunc get_incarnation = nullptr;
const FunctionLibraryDefinition* flib_def = nullptr;
bool control_flow_added = false;
typedef std::function<DataType(const Edge*)> ShouldCastFunc;
ShouldCastFunc should_cast = nullptr;
bool scheduling_for_recvs = false;
bool need_to_record_start_times = false;
std::vector<Microseconds> start_times;
std::function<string(const Edge*)> get_tensor_name_attr = nullptr;
bool can_make_destructive_changes = false;
};
Status Partition(const PartitionOptions& opts, Graph* input,
std::unordered_map<string, GraphDef>* partitions);
Status AddControlEdges(const PartitionOptions& opts,
std::unordered_map<string, GraphDef>* partitions);
}
#endif
#include "tensorflow/core/graph/graph_partition.h"
#include <deque>
#include <memory>
#include <queue>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
inline bool IsNextIteration(const NodeDef& node_def) {
return node_def.op() == "NextIteration" ||
node_def.op() == "RefNextIteration";
}
struct DupRecvKey {
int src_node_id;
int src_output_slot;
GraphDef* dst_graph;
bool recv_output_on_host;
template <typename H>
friend H AbslHashValue(H h, const DupRecvKey& c) {
return H::combine(std::move(h), c.src_node_id, c.src_output_slot,
reinterpret_cast<std::uintptr_t>(c.dst_graph),
c.recv_output_on_host);
}
friend bool operator==(const DupRecvKey& x, const DupRecvKey& y) {
return (x.src_node_id == y.src_node_id) &&
(x.src_output_slot == y.src_output_slot) &&
(x.dst_graph == y.dst_graph) &&
(x.recv_output_on_host == y.recv_output_on_host);
}
};
struct RecvInfo {
NodeDef* recv;
NodeDef* real_recv;
int64_t start_time;
};
typedef absl::flat_hash_map<DupRecvKey, RecvInfo> DupRecvTable;
struct NodePort {
int node_id;
int index;
friend bool operator==(const NodePort& x, const NodePort& y) {
return x.node_id == y.node_id && x.index == y.index;
}
template <typename H>
friend H AbslHashValue(H h, const NodePort& c) {
return H::combine(std::move(h), c.node_id, c.index);
}
};
typedef absl::flat_hash_map<NodePort, MemoryType> MemoryTypeMap;
struct GraphInfo {
std::vector<DeviceType> device_types;
MemoryTypeMap input_types;
MemoryTypeMap output_types;
std::vector<ControlFlowInfo> cf_info;
};
DataType EdgeType(const Edge* e) {
if (e->IsControlEdge()) {
return DT_FLOAT;
} else {
return e->dst()->input_type(e->dst_input());
}
}
bool NeedSameDeviceSendRecv(const Edge* edge, const GraphInfo& info) {
if (edge->IsControlEdge()) {
return false;
}
const Node* src = edge->src();
const Node* dst = edge->dst();
if (src->assigned_device_name() == dst->assigned_device_name()) {
int src_port = edge->src_output();
int dst_port = edge->dst_input();
if (info.device_types[src->id()] != DEVICE_CPU) {
auto src_it = info.output_types.find({src->id(), src_port});
DCHECK(src_it != info.output_types.end());
auto dst_it = info.input_types.find({dst->id(), dst_port});
DCHECK(dst_it != info.input_types.end());
return src_it->second != dst_it->second;
}
}
return false;
}
bool IsDstInputOnHost(const Edge* edge, const GraphInfo& info) {
const Node* dst = edge->dst();
int dst_port = edge->dst_input();
if (info.device_types[dst->id()] != DEVICE_CPU) {
if (edge->IsControlEdge()) return false;
auto dst_it = info.input_types.find({dst->id(), dst_port});
DCHECK(dst_it != info.input_types.end());
return dst_it->second == HOST_MEMORY;
}
return true;
}
void AddReadControl(const std::vector<NodeDef*>& recvs,
const std::vector<string>& inputs) {
for (NodeDef* recv : recvs) {
for (const string& input : inputs) {
recv->add_input(strings::StrCat("^", input));
}
}
}
void SetSendRecvAttrs(const PartitionOptions& opts, const Edge* edge,
const string& tensor_name_attr, NodeDefBuilder* builder) {
builder->Attr("tensor_name", tensor_name_attr);
builder->Attr("send_device", edge->src()->assigned_device_name());
builder->Attr("send_device_incarnation",
static_cast<int64_t>(
opts.get_incarnation(edge->src()->assigned_device_name())));
builder->Attr("recv_device", edge->dst()->assigned_device_name());
builder->Attr("client_terminated", false);
builder->Attr("_src", edge->src()->name());
builder->Attr("_dst", edge->dst()->name());
}
NodeDef* AddSend(const PartitionOptions& opts, const GraphInfo& g_info,
GraphDef* gdef, const Edge* edge,
NodeDefBuilder::NodeOut send_from, int64_t start_time,
const string& tensor_name_attr, Status* status) {
const DataType dtype = send_from.data_type;
const DataType cast_dtype = opts.should_cast ? opts.should_cast(edge) : dtype;
const Node* src = edge->src();
const int src_port = edge->src_output();
bool host_memory = false;
if (!edge->IsControlEdge()) {
auto src_it = g_info.output_types.find({src->id(), src_port});
DCHECK(src_it != g_info.output_types.end());
host_memory = (src_it->second == HOST_MEMORY);
}
if (dtype != cast_dtype && !NeedSameDeviceSendRecv(edge, g_info)) {
const string cast_op = (host_memory) ? "_HostCast" : "Cast";
NodeDefBuilder cast_builder(opts.new_name(src->name()), cast_op,
NodeDebugInfo(*src));
cast_builder.Device(src->assigned_device_name()).Input(send_from);
if (opts.scheduling_for_recvs) {
cast_builder.Attr("_start_time", start_time);
}
cast_builder.Attr("DstT", cast_dtype);
if (cast_dtype == DT_BFLOAT16) {
cast_builder.Attr("Truncate", true);
}
NodeDef* cast = gdef->add_node();
*status = cast_builder.Finalize(cast, true);
if (!status->ok()) return nullptr;
send_from.Reset(cast->name(), 0, cast_dtype);
}
const string send_op = (host_memory) ? "_HostSend" : "_Send";
NodeDefBuilder send_builder(opts.new_name(src->name()), send_op,
NodeDebugInfo(*src));
SetSendRecvAttrs(opts, edge, tensor_name_attr, &send_builder);
send_builder.Device(src->assigned_device_name()).Input(send_from);
if (opts.scheduling_for_recvs) {
send_builder.Attr("_start_time", start_time);
}
NodeDef* send = gdef->add_node();
*status = send_builder.Finalize(send, true);
return send;
}
NodeDef* AddRecv(const PartitionOptions& opts, const GraphInfo& g_info,
GraphDef* gdef, const Edge* edge, NodeDef** real_recv,
const string& tensor_name_attr, Status* status) {
const DataType dtype = EdgeType(edge);
const Node* src = edge->src();
const Node* dst = edge->dst();
const int dst_port = edge->dst_input();
DataType cast_dtype = dtype;
if (opts.should_cast && !NeedSameDeviceSendRecv(edge, g_info)) {
cast_dtype = opts.should_cast(edge);
}
bool host_memory = false;
if (!edge->IsControlEdge()) {
auto dst_it = g_info.input_types.find({dst->id(), dst_port});
DCHECK(dst_it != g_info.input_types.end());
host_memory = (dst_it->second == HOST_MEMORY);
bool src_host_memory = false;
if (VLOG_IS_ON(1)) {
const int src_port = edge->src_output();
auto src_it = g_info.output_types.find({src->id(), src_port});
DCHECK(src_it != g_info.output_types.end());
src_host_memory = (src_it->second == HOST_MEMORY);
}
VLOG(1) << "Receiving data"
<< " from " << src->name() << " (" << src->type_string() << ")"
<< " on " << src->assigned_device_name() << " in "
<< (src_host_memory ? "host memory" : "device memory") << " for "
<< dst->name() << " (" << dst->type_string() << ")"
<< " on " << dst->assigned_device_name() << " in "
<< (host_memory ? "host memory" : "device memory");
} else {
VLOG(1) << "Receiving control"
<< " from " << src->name() << " (" << src->type_string() << ")"
<< " on " << src->assigned_device_name() << " for " << dst->name()
<< " (" << dst->type_string() << ")"
<< " on " << dst->assigned_device_name();
}
const string recv_op = (host_memory) ? "_HostRecv" : "_Recv";
NodeDefBuilder recv_builder(opts.new_name(src->name()), recv_op,
NodeDebugInfo(*src));
SetSendRecvAttrs(opts, edge, tensor_name_attr, &recv_builder);
recv_builder.Device(dst->assigned_device_name())
.Attr("tensor_type", cast_dtype);
NodeDef* recv = gdef->add_node();
*status = recv_builder.Finalize(recv, true);
if (!status->ok()) return nullptr;
*real_recv = recv;
if (dtype != cast_dtype) {
const string cast_op = (host_memory) ? "_HostCast" : "Cast";
NodeDefBuilder cast_builder(opts.new_name(src->name()), cast_op,
NodeDebugInfo(*src));
cast_builder.Attr("DstT", dtype);
cast_builder.Device(dst->assigned_device_name())
.Input(recv->name(), 0, cast_dtype);
NodeDef* cast = gdef->add_node();
*status = cast_builder.Finalize(cast, true);
if (!status->ok()) return nullptr;
return cast;
} else if (edge->IsControlEdge()) {
NodeDefBuilder id_builder(opts.new_name(src->name()), "Identity",
NodeDebugInfo(*src));
id_builder.Device(dst->assigned_device_name())
.Input(recv->name(), 0, cast_dtype);
NodeDef* id = gdef->add_node();
*status = id_builder.Finalize(id, true);
if (!status->ok()) return nullptr;
return id;
} else {
return recv;
}
}
NodeDef* AddDummyConst(const PartitionOptions& opts, GraphDef* gdef,
const Edge* edge, Status* status) {
const Node* src = edge->src();
Tensor tensor(DT_FLOAT, TensorShape({0}));
NodeDef* result = gdef->add_node();
*status = NodeDefBuilder(opts.new_name(strings::StrCat(src->name(), "/ctrl")),
"Const")
.Device(src->assigned_device_name())
.Attr("dtype", DT_FLOAT)
.Attr("value", tensor)
.Finalize(result, true);
return result;
}
NodeDef* AddControlTrigger(const PartitionOptions& opts, GraphDef* gdef,
const string& assigned_device_name, int64_t epoch,
int64_t starttime, Status* status) {
NodeDef* result = gdef->add_node();
*status = NodeDefBuilder(opts.new_name(strings::StrCat("synch_", epoch)),
"ControlTrigger")
.Device(assigned_device_name)
.Attr("_start_time", starttime)
.Finalize(result, true);
return result;
}
void OptimizeControlFlowColocation(Graph* graph) {
auto visit = [](Node* node) {
if (IsSwitch(node)) {
for (const Edge* in_edge : node->in_edges()) {
if (in_edge->dst_input() == 0) {
node->set_assigned_device_name(
in_edge->src()->assigned_device_name());
return;
}
}
} else if (IsExit(node)) {
for (const Edge* in_edge : node->in_edges()) {
if (!in_edge->IsControlEdge()) {
node->set_assigned_device_name(
in_edge->src()->assigned_device_name());
return;
}
}
} else {
if ((IsEnter(node) && !IsRefType(node->input_type(0))) ||
IsNextIteration(node)) {
const Edge* data_edge = nullptr;
for (const Edge* out_edge : node->out_edges()) {
if (!out_edge->IsControlEdge()) {
data_edge = out_edge;
break;
}
}
if (data_edge) {
node->set_assigned_device_name(
data_edge->dst()->assigned_device_name());
}
}
}
};
DFS(*graph, visit, {});
}
string ControlLoopName(const string& name) {
return strings::StrCat("_cloop", name);
}
bool IsControlLoop(const Node* node) {
const string& name = node->name();
return absl::StartsWith(name, "_cloop");
}
Node* AddControlEnter(Graph* g, const string& node_name,
const string& device_name, const string& frame_name,
const int parallel_iterations, Status* status) {
NodeBuilder node_builder(node_name, "Enter", g->op_registry());
node_builder.Input({"dummy", 0, DT_FLOAT});
node_builder.Attr("frame_name", frame_name);
node_builder.Attr("parallel_iterations", parallel_iterations);
Node* res_node;
*status = node_builder.Finalize(g, &res_node, true);
if (!status->ok()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
}
Node* AddControlMerge(const string& in_name1, const string& in_name2, Graph* g,
const string& node_name, const string& device_name,
Status* status) {
NodeBuilder node_builder(node_name, "Merge", g->op_registry());
node_builder.Input({{in_name1, 0, DT_FLOAT}, {in_name2, 0, DT_FLOAT}});
Node* res_node;
*status = node_builder.Finalize(g, &res_node, true);
if (!status->ok()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
}
Node* AddControlSwitch(NodeBuilder::NodeOut input1, NodeBuilder::NodeOut input2,
const string& device_name,
const GraphDefBuilder::Options& bopts) {
Node* res_node =
ops::BinaryOp("Switch", std::move(input1), std::move(input2), bopts);
if (bopts.HaveError()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
}
Node* AddControlNext(NodeBuilder::NodeOut input, const string& device_name,
const GraphDefBuilder::Options& bopts) {
Node* res_node = ops::UnaryOp("NextIteration", std::move(input), bopts);
if (bopts.HaveError()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
}
Node* EmptyConst(const GraphDefBuilder::Options& options) {
if (options.HaveError()) return nullptr;
NodeBuilder node_builder(options.GetNameForOp("Const"), "Const",
options.op_registry());
const DataType dt = DataTypeToEnum<float>::v();
TensorProto proto;
proto.set_dtype(dt);
TensorShape empty_shape({0});
empty_shape.AsProto(proto.mutable_tensor_shape());
node_builder.Attr("dtype", dt).Attr("value", proto);
return options.FinalizeBuilder(&node_builder);
}
Node* AddControlConst(const string& device_name,
const GraphDefBuilder::Options& bopts) {
Node* res_node = EmptyConst(bopts);
if (bopts.HaveError()) return nullptr;
res_node->set_assigned_device_name(device_name);
return res_node;
}
struct ControlLoop {
Node* enter = nullptr;
Node* merge = nullptr;
Node* switch_node = nullptr;
};
void AddControlFlowInfo(const Node* node, const Node* src,
std::vector<ControlFlowInfo>* cf_info) {
int id = node->id();
if (static_cast<size_t>(id) >= cf_info->size()) {
cf_info->resize(id + 1);
}
const ControlFlowInfo& src_info = (*cf_info)[src->id()];
ControlFlowInfo* info = &(*cf_info)[id];
info->frame = src_info.frame;
info->parent_frame = src_info.parent_frame;
info->frame_name = src_info.frame_name;
}
Status AddControlLoop(const PartitionOptions& opts, Graph* g, const Node* src,
const Edge* edge, Node* loop_cond,
std::vector<ControlFlowInfo>* cf_info,
ControlLoop* loop) {
Status status;
GraphDefBuilder::Options bopts(g, &status);
const ControlFlowInfo& src_info = (*cf_info)[src->id()];
const string& device_name = edge->dst()->assigned_device_name();
const string& frame_name = src_info.frame_name;
int parallel_iterations;
status = GetNodeAttr(src_info.frame->attrs(), "parallel_iterations",
¶llel_iterations);
if (!status.ok()) return status;
const string& enter_name =
ControlLoopName(opts.new_name(edge->dst()->name()));
const string& merge_name =
ControlLoopName(opts.new_name(edge->dst()->name()));
const string& switch_name =
ControlLoopName(opts.new_name(edge->dst()->name()));
const string& next_name = ControlLoopName(opts.new_name(edge->dst()->name()));
Node* enter = AddControlEnter(g, enter_name, device_name, frame_name,
parallel_iterations, &status);
if (!status.ok()) return status;
Node* merge = AddControlMerge(enter_name, next_name, g, merge_name,
device_name, &status);
if (!status.ok()) return status;
Node* switch_node = AddControlSwitch(merge, loop_cond, device_name,
bopts.WithName(switch_name));
if (!status.ok()) return status;
Node* next =
AddControlNext({switch_node, 1}, device_name, bopts.WithName(next_name));
if (!status.ok()) return status;
AddControlFlowInfo(enter, src, cf_info);
AddControlFlowInfo(merge, src, cf_info);
AddControlFlowInfo(switch_node, src, cf_info);
AddControlFlowInfo(next, src, cf_info);
g->AddEdge(enter, 0, merge, 0);
g->AddEdge(next, 0, merge, 1);
loop->enter = enter;
loop->merge = merge;
loop->switch_node = switch_node;
return absl::OkStatus();
}
Status BuildMemoryDeviceInfo(const Graph& g, GraphInfo* info) {
MemoryTypeVector input_memory_types;
MemoryTypeVector output_memory_types;
info->device_types.resize(g.num_node_ids(), DEVICE_CPU);
for (const Node* node : g.op_nodes()) {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(node->assigned_device_name(),
&parsed)) {
return errors::Internal("Malformed assigned device '",
node->assigned_device_name(), "'");
}
TF_RETURN_IF_ERROR(MemoryTypesForNode(
g.op_registry(), DeviceType(parsed.type), node->def(),
&input_memory_types, &output_memory_types));
int node_id = node->id();
info->device_types[node_id] = DeviceType(parsed.type);
for (int i = 0; i < input_memory_types.size(); ++i) {
info->input_types[{node_id, i}] = input_memory_types[i];
}
for (int i = 0; i < output_memory_types.size(); ++i) {
info->output_types[{node_id, i}] = output_memory_types[i];
}
}
return absl::OkStatus();
}
const Node* InputFrame(const Node* node,
const std::vector<ControlFlowInfo>& cf_info) {
if (!node->IsEnter()) {
return node;
}
return cf_info[node->id()].parent_frame;
}
const Node* OutputFrame(const Node* node,
const std::vector<ControlFlowInfo>& cf_info) {
if (!node->IsExit()) {
return node;
}
return cf_info[node->id()].parent_frame;
}
Status AddControlFlow(const PartitionOptions& opts, Graph* g,
GraphInfo* g_info) {
Status status;
GraphDefBuilder::Options bopts(g, &status);
std::vector<ControlFlowInfo>& cf_info = g_info->cf_info;
status = BuildControlFlowInfo(g, &cf_info);
if (!status.ok()) return status;
OptimizeControlFlowColocation(g);
std::unordered_map<string, Node*> frame_cond_map;
int num_node_ids = g->num_node_ids();
for (int i = 0; i < num_node_ids; ++i) {
Node* node = g->FindNodeId(i);
if (node == nullptr) continue;
if (IsLoopCond(node)) {
const string& frame_name = cf_info[node->id()].frame_name;
DCHECK(!frame_name.empty());
frame_cond_map[frame_name] = node;
}
}
std::unordered_map<string, ControlLoop> control_loops;
int num_edge_ids = g->num_edge_ids();
for (int i = 0; i < num_edge_ids; ++i) {
const Edge* edge = g->FindEdgeId(i);
if (edge == nullptr) continue;
const Node* src = edge->src();
const Node* dst = edge->dst();
if (!src->IsOp() || !dst->IsOp()) continue;
const string& src_device = src->assigned_device_name();
const string& dst_device = dst->assigned_device_name();
if (src_device == dst_device) continue;
const Node* src_frame = OutputFrame(src, cf_info);
const Node* dst_frame = InputFrame(dst, cf_info);
const string& src_frame_name = cf_info[src_frame->id()].frame_name;
const string& dst_frame_name = cf_info[dst_frame->id()].frame_name;
if (src_frame_name.empty() || src_frame_name != dst_frame_name) {
continue;
}
ControlLoop child_loop;
while (true) {
const string& curr_frame_name = cf_info[src_frame->id()].frame_name;
if (curr_frame_name.empty()) {
if (child_loop.merge != nullptr) {
const string& node_name = opts.new_name(edge->dst()->name());
const string& device_name = edge->dst()->assigned_device_name();
Node* const_node =
AddControlConst(device_name, bopts.WithName(node_name));
if (!status.ok()) return status;
AddControlFlowInfo(const_node, src_frame, &cf_info);
g->AddEdge(const_node, 0, child_loop.enter, 0);
}
break;
}
const string& cl_key = strings::StrCat(curr_frame_name, "$$", dst_device);
auto it = control_loops.find(cl_key);
if (it != control_loops.end()) {
if (child_loop.enter != nullptr) {
g->AddEdge(it->second.merge, 0, child_loop.enter, 0);
}
break;
}
auto cond_it = frame_cond_map.find(curr_frame_name);
if (cond_it == frame_cond_map.end()) {
return errors::InvalidArgument(
"A cross-device loop must have a pivot predicate: ",
curr_frame_name);
}
Node* loop_cond = cond_it->second; | #include "tensorflow/core/graph/graph_partition.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/random_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
extern Status TopologicalSortNodesWithTimePriority(
const GraphDef* gdef,
std::vector<std::pair<const NodeDef*, int64_t>>* nodes,
std::unordered_map<const NodeDef*, int64_t>* node_to_start_time_out);
namespace {
using ops::_Recv;
using ops::_Send;
using ops::Const;
using ops::Identity;
using ops::LoopCond;
using ops::NextIteration;
using ::testing::Eq;
using ::testing::Ne;
const char gpu_device[] = "/job:a/replica:0/task:0/device:GPU:0";
string SplitByDevice(const Node* node) { return node->assigned_device_name(); }
string DeviceName(const Node* node) {
char first = node->name()[0];
if (first == 'G') {
return gpu_device;
} else {
const string cpu_prefix = "/job:a/replica:0/task:0/cpu:";
int index = first - 'A';
return strings::StrCat(cpu_prefix, index);
}
}
void Partition(const GraphDef& graph_def,
std::unordered_map<string, GraphDef>* partitions) {
Graph g(OpRegistry::Global());
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &g));
for (Node* node : g.nodes()) {
string device_name = !node->requested_device().empty()
? node->requested_device()
: DeviceName(node);
node->set_assigned_device_name(device_name);
}
PartitionOptions popts;
popts.node_to_loc = SplitByDevice;
popts.new_name = [&g](const string& prefix) { return g.NewName(prefix); };
popts.get_incarnation = [](const string& name) {
return (name[0] - 'A') + 100;
};
Status s = Partition(popts, &g, partitions);
CHECK(s.ok()) << s;
EXPECT_EQ(graph_def.versions().producer(), TF_GRAPH_DEF_VERSION);
for (auto& it : *partitions) {
EXPECT_EQ(graph_def.versions().producer(), it.second.versions().producer());
EXPECT_EQ(graph_def.versions().min_consumer(),
it.second.versions().min_consumer());
}
}
void CheckLoopConstruction(const GraphDef& graph_def) {
std::unordered_map<string, GraphDef> partitions;
Partition(graph_def, &partitions);
for (const auto& kv : partitions) {
const GraphDef& gdef = kv.second;
bool has_control_enter = false;
bool has_control_merge = false;
bool has_control_switch = false;
bool has_control_next = false;
for (const NodeDef& ndef : gdef.node()) {
if (ndef.op() == "_Recv") {
bool has_control = false;
for (const string& input_name : ndef.input()) {
if (absl::StartsWith(input_name, "^")) {
has_control = true;
break;
}
}
EXPECT_TRUE(has_control);
}
if (absl::StartsWith(ndef.name(), "_cloop")) {
if (ndef.op() == "Enter") {
has_control_enter = true;
}
if (ndef.op() == "Merge") {
has_control_merge = true;
}
if (ndef.op() == "Switch") {
has_control_switch = true;
}
if (ndef.op() == "NextIteration") {
has_control_next = true;
}
}
}
EXPECT_TRUE(has_control_enter);
EXPECT_TRUE(has_control_merge);
EXPECT_TRUE(has_control_switch);
EXPECT_TRUE(has_control_next);
}
}
REGISTER_OP("FloatInput")
.Output("o: float")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BoolInput")
.Output("o: bool")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("Combine")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(shape_inference::UnknownShape);
Output ConstructOp(const Scope& scope, const string& op_type,
const absl::Span<const Input>& inputs) {
if (!scope.ok()) return Output();
const string unique_name = scope.GetUniqueNameForOp(op_type);
auto builder =
NodeBuilder(unique_name, op_type, scope.graph()->op_registry());
for (auto const& input : inputs) {
builder.Input(ops::NodeOut(input.node(), input.index()));
}
scope.UpdateBuilder(&builder);
Node* ret;
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) return Output();
scope.UpdateStatus(scope.DoShapeInference(ret));
if (!scope.ok()) return Output();
return Output(ret);
}
Output FloatInput(const Scope& scope) {
return ConstructOp(scope, "FloatInput", {});
}
Output BoolInput(const Scope& scope) {
return ConstructOp(scope, "BoolInput", {});
}
Output Combine(const Scope& scope, Input a, Input b) {
return ConstructOp(scope, "Combine", {std::move(a), std::move(b)});
}
std::string FormatStackTrace(const GraphDebugInfo::StackTrace& stack_trace,
const GraphDebugInfo& debug_info) {
std::string result;
for (const GraphDebugInfo::FileLineCol& file_line_col :
stack_trace.file_line_cols()) {
const std::string& file = debug_info.files(file_line_col.file_index());
absl::StrAppend(&result, file_line_col.func(), "@", file, ":",
file_line_col.line(), ".", file_line_col.col(), "\n");
}
return result;
}
class GraphPartitionTest : public ::testing::Test {
protected:
GraphPartitionTest()
: in_(Scope::NewRootScope().ExitOnError()),
scope_a_(Scope::NewRootScope().ExitOnError().WithDevice(
"/job:a/replica:0/task:0/cpu:0")),
scope_b_(Scope::NewRootScope().ExitOnError().WithDevice(
"/job:a/replica:0/task:0/cpu:1")) {}
const GraphDef& ToGraphDef(bool include_debug_info = false) {
TF_EXPECT_OK(in_.ToGraphDef(&in_graph_def_, include_debug_info));
return in_graph_def_;
}
void ExpectMatchA() {
GraphDef graph_def;
TF_EXPECT_OK(scope_a_.ToGraphDef(&graph_def));
string a = "/job:a/replica:0/task:0/cpu:0";
TF_EXPECT_GRAPH_EQ(graph_def, partitions_[a]);
}
void ExpectMatchB() {
GraphDef graph_def;
TF_EXPECT_OK(scope_b_.ToGraphDef(&graph_def));
string b = "/job:a/replica:0/task:0/cpu:1";
TF_EXPECT_GRAPH_EQ(graph_def, partitions_[b]);
}
void ExpectFunctions(const FunctionDefLibrary& library,
const std::set<string>& expected_names) {
std::set<string> actual_names;
for (const FunctionDef& fdef : library.function()) {
actual_names.insert(fdef.signature().name());
}
EXPECT_EQ(actual_names, expected_names);
}
Scope in_;
GraphDef in_graph_def_;
Scope scope_a_;
Scope scope_b_;
std::unordered_map<string, GraphDef> partitions_;
};
TEST_F(GraphPartitionTest, SingleDevice) {
auto a1 = FloatInput(in_.WithOpName("A1"));
Combine(in_.WithOpName("A2"), a1, a1);
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(1, partitions_.size());
a1 = FloatInput(scope_a_.WithOpName("A1"));
Combine(scope_a_.WithOpName("A2"), a1, a1);
ExpectMatchA();
}
TEST_F(GraphPartitionTest, CrossDeviceData) {
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
a1 = FloatInput(scope_a_.WithOpName("A1"));
_Send(scope_a_.WithOpName("A1/_0"), a1, "edge_1_A1", a, 82, b);
ExpectMatchA();
b1 = FloatInput(scope_b_.WithOpName("B1"));
auto recv =
_Recv(scope_b_.WithOpName("A1/_1"), DT_FLOAT, "edge_1_A1", a, 82, b);
Combine(scope_b_.WithOpName("B2"), recv, b1);
ExpectMatchB();
}
TEST_F(GraphPartitionTest, CrossDeviceControl) {
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2").WithControlDependencies(a1), b1, b1);
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
a1 = FloatInput(scope_a_.WithOpName("A1"));
auto c =
Const(scope_a_.WithOpName("A1/ctrl/_0").WithControlDependencies(a1), {});
_Send(scope_a_.WithOpName("A1/_1"), c, "edge_3_A1", a, 82, b);
ExpectMatchA();
auto recv =
_Recv(scope_b_.WithOpName("A1/_2"), DT_FLOAT, "edge_3_A1", a, 82, b);
auto id = Identity(scope_b_.WithOpName("A1/_3"), recv);
b1 = FloatInput(scope_b_.WithOpName("B1"));
Combine(scope_b_.WithOpName("B2").WithControlDependencies(id), b1, b1);
ExpectMatchB();
}
TEST_F(GraphPartitionTest, CrossDeviceData_MultiUse) {
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
Combine(in_.WithOpName("B3"), a1, a1);
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
a1 = FloatInput(scope_a_.WithOpName("A1"));
_Send(scope_a_.WithOpName("A1/_0"), a1, "edge_1_A1", a, 82, b);
ExpectMatchA();
auto recv =
_Recv(scope_b_.WithOpName("A1/_1"), DT_FLOAT, "edge_1_A1", a, 82, b);
b1 = FloatInput(scope_b_.WithOpName("B1"));
Combine(scope_b_.WithOpName("B2"), recv, b1);
Combine(scope_b_.WithOpName("B3"), recv, recv);
ExpectMatchB();
}
TEST_F(GraphPartitionTest, CrossDeviceControl_MultiUse) {
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2").WithControlDependencies(a1), b1, b1);
FloatInput(in_.WithOpName("B3").WithControlDependencies(a1));
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
a1 = FloatInput(scope_a_.WithOpName("A1"));
auto c =
Const(scope_a_.WithOpName("A1/ctrl/_0").WithControlDependencies(a1), {});
_Send(scope_a_.WithOpName("A1/_1"), c, "edge_3_A1", a, 82, b);
ExpectMatchA();
auto recv =
_Recv(scope_b_.WithOpName("A1/_2"), DT_FLOAT, "edge_3_A1", a, 82, b);
auto id = Identity(scope_b_.WithOpName("A1/_3"), recv);
b1 = FloatInput(scope_b_.WithOpName("B1"));
Combine(scope_b_.WithOpName("B2").WithControlDependencies(id), b1, b1);
FloatInput(scope_b_.WithOpName("B3").WithControlDependencies(id));
ExpectMatchB();
}
TEST_F(GraphPartitionTest, CrossDevice_DataControl) {
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
FloatInput(in_.WithOpName("B3").WithControlDependencies(a1));
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
a1 = FloatInput(scope_a_.WithOpName("A1"));
_Send(scope_a_.WithOpName("A1/_0"), a1, "edge_1_A1", a, 82, b);
auto c =
Const(scope_a_.WithOpName("A1/ctrl/_2").WithControlDependencies(a1), {});
_Send(scope_a_.WithOpName("A1/_3"), c, "edge_3_A1", a, 82, b);
ExpectMatchA();
auto recv1 =
_Recv(scope_b_.WithOpName("A1/_4"), DT_FLOAT, "edge_3_A1", a, 82, b);
auto id1 = Identity(scope_b_.WithOpName("A1/_5"), recv1);
auto recv2 =
_Recv(scope_b_.WithOpName("A1/_1"), DT_FLOAT, "edge_1_A1", a, 82, b);
b1 = FloatInput(scope_b_.WithOpName("B1"));
Combine(scope_b_.WithOpName("B2"), recv2, b1);
FloatInput(scope_b_.WithOpName("B3").WithControlDependencies(id1));
ExpectMatchB();
}
TEST_F(GraphPartitionTest, CrossDeviceLoopSimple) {
auto a1 = BoolInput(in_.WithOpName("A1"));
auto a2 = ::tensorflow::ops::internal::Enter(in_.WithOpName("A2"), a1, "foo");
auto a3 = ::tensorflow::ops::Merge(in_.WithOpName("A3"),
{a2, Input("A5", 0, DT_BOOL)})
.output;
LoopCond(in_.WithOpName("A4"), a3);
auto b1 = Identity(in_.WithOpName("B1"), a3);
NextIteration(in_.WithOpName("A5"), b1);
CheckLoopConstruction(ToGraphDef());
}
TEST_F(GraphPartitionTest, CrossDeviceLoopSimple1) {
auto a1 = BoolInput(in_.WithOpName("A1"));
auto a2 = ::tensorflow::ops::internal::Enter(in_.WithOpName("B2"), a1, "foo");
auto a3 = ::tensorflow::ops::Merge(in_.WithOpName("A3"),
{a2, Input("B5", 0, DT_BOOL)})
.output;
LoopCond(in_.WithOpName("A4"), a3);
auto b1 = Identity(in_.WithOpName("B1"), a3);
NextIteration(in_.WithOpName("B5"), b1);
std::unordered_map<string, GraphDef> partitions;
Partition(ToGraphDef(), &partitions);
for (const auto& kv : partitions) {
const GraphDef& gdef = kv.second;
for (const NodeDef& ndef : gdef.node()) {
if (ndef.name() == "A3") {
EXPECT_EQ(ndef.input(0), "B2");
EXPECT_EQ(ndef.input(1), "B5");
}
}
}
}
TEST_F(GraphPartitionTest, CrossDeviceLoopFull) {
Scope cpu0 = in_.WithDevice("/job:a/replica:0/task:0/cpu:0");
auto p1 = ops::Placeholder(cpu0, DT_INT32);
auto p2 = ops::Placeholder(cpu0, DT_INT32);
OutputList outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(
cpu0, {p1, p2},
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
Scope cpu1 = s.WithDevice("/job:a/replica:0/task:0/cpu:1");
outputs->push_back(ops::AddN(cpu1, {inputs[0], inputs[1]}));
outputs->push_back(inputs[1]);
return s.status();
},
"test_loop", &outputs));
CheckLoopConstruction(ToGraphDef());
}
TEST_F(GraphPartitionTest, PartitionIncompleteGraph) {
NodeDef ndef;
Graph g(OpRegistry::Global());
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
name: "N"
op: "Combine"
)EOF",
&ndef);
ASSERT_TRUE(parsed);
Status status;
g.AddNode(ndef, &status);
TF_ASSERT_OK(status);
PartitionOptions popts;
popts.node_to_loc = SplitByDevice;
popts.new_name = [&g](const string& prefix) { return g.NewName(prefix); };
popts.get_incarnation = [](const string&) { return 1; };
std::unordered_map<string, GraphDef> partitions;
status = Partition(popts, &g, &partitions);
EXPECT_EQ(error::INVALID_ARGUMENT, status.code()) << status;
}
TEST_F(GraphPartitionTest, Functions) {
FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = test::function::XTimesTwo();
*fdef_lib.add_function() = test::function::XTimesFour();
TF_ASSERT_OK(in_.graph()->AddFunctionLibrary(fdef_lib));
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
ConstructOp(in_.WithOpName("A2"), "XTimesTwo", {a1});
ConstructOp(in_.WithOpName("B2"), "XTimesFour", {b1});
Partition(ToGraphDef(), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
string b = "/job:a/replica:0/task:0/cpu:1";
ExpectFunctions(partitions_[a].library(), {"XTimesTwo"});
ExpectFunctions(partitions_[b].library(), {"XTimesTwo", "XTimesFour"});
}
TEST_F(GraphPartitionTest, SetIncarnation) {
GraphDef gdef;
const char* const kSendRecvAttrs = R"pb(
attr {
key: 'T'
value { type: DT_FLOAT }
}
attr {
key: 'client_terminated'
value { b: false }
}
attr {
key: 'recv_device'
value { s: 'B' }
}
attr {
key: 'send_device'
value { s: 'A' }
}
attr {
key: 'send_device_incarnation'
value { i: 0 }
}
attr {
key: 'tensor_name'
value { s: 'test' }
}
)pb";
CHECK(protobuf::TextFormat::ParseFromString(
strings::StrCat(
"node { name: 'A/Pi' op: 'Const' ",
" attr { key: 'dtype' value { type: DT_FLOAT } } ",
" attr { key: 'value' value { tensor { ",
" dtype: DT_FLOAT tensor_shape {} float_val: 3.14 } } } }",
"node { name: 'A' op: '_Send' input: 'A/Pi' ", kSendRecvAttrs, "}",
"node { name: 'B' op: '_Recv' ", kSendRecvAttrs,
" attr { key: 'tensor_type' value { type:DT_FLOAT}}}"),
&gdef));
gdef.mutable_versions()->set_producer(TF_GRAPH_DEF_VERSION);
Partition(gdef, &partitions_);
EXPECT_EQ(2, partitions_.size());
for (const auto& kv : partitions_) {
const GraphDef& gdef = kv.second;
for (const NodeDef& ndef : gdef.node()) {
if (ndef.name() == "A" || ndef.name() == "B") {
int64_t val;
TF_CHECK_OK(GetNodeAttr(ndef, "send_device_incarnation", &val));
EXPECT_EQ(val, 100);
}
}
}
}
TEST_F(GraphPartitionTest, GraphDebugInfo) {
GraphDef graph_def;
Output a1 = FloatInput(in_.WithOpName("A1"));
Output b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
Node *a1_node = nullptr, *b1_node = nullptr, *b2_node = nullptr;
for (Node* node : in_.graph()->op_nodes()) {
if (node->name() == "A1") {
a1_node = node;
} else if (node->name() == "B1") {
b1_node = node;
} else if (node->name() == "B2") {
b2_node = node;
}
}
EXPECT_NE(a1_node, nullptr);
EXPECT_NE(b1_node, nullptr);
EXPECT_NE(b2_node, nullptr);
std::vector<StackFrame> a1_stack_trace{{"main.cc", 20, "x"},
{"alpha.cc", 30, "a1"}};
std::vector<StackFrame> b1_stack_trace{{"window.cc", 21, "y"},
{"beta.cc", 35, "b1"}};
std::vector<StackFrame> b2_stack_trace{{"cache.cc", 22, "bar"},
{"beta.cc", 39, "b2"}};
a1_node->SetStackTrace(std::make_shared<FrozenStackTrace>(a1_stack_trace));
b1_node->SetStackTrace(std::make_shared<FrozenStackTrace>(b1_stack_trace));
b2_node->SetStackTrace(std::make_shared<FrozenStackTrace>(b2_stack_trace));
TF_EXPECT_OK(in_.ToGraphDef(&graph_def, true));
Partition(ToGraphDef(true), &partitions_);
EXPECT_EQ(2, partitions_.size());
string a = "/job:a/replica:0/task:0/cpu:0";
const GraphDebugInfo& a_debug_info = partitions_[a].debug_info();
StackTracesMap traces = LoadTracesFromDebugInfo(a_debug_info);
const auto& a_it = traces.find("A1");
EXPECT_THAT(a_it, Ne(traces.end()));
EXPECT_THAT(a_it->second->ToString({}),
::testing::ContainsRegex("alpha.cc.*30"));
string b = "/job:a/replica:0/task:0/cpu:1";
const GraphDebugInfo& b_debug_info = partitions_[b].debug_info();
traces = LoadTracesFromDebugInfo(b_debug_info);
const auto& b1_it = traces.find("B1");
const auto& b2_it = traces.find("B2");
EXPECT_THAT(b1_it, Ne(traces.end()));
EXPECT_THAT(b2_it, Ne(traces.end()));
EXPECT_THAT(b1_it->second->ToString({}),
::testing::ContainsRegex("beta.cc.*35"));
EXPECT_THAT(b2_it->second->ToString({}),
::testing::ContainsRegex("beta.cc.*39"));
}
TEST(TopologicalSortNodesWithTimePriorityTest, NoDependencies) {
Scope root = Scope::NewRootScope().ExitOnError();
std::vector<int> indexes;
for (int i = 0; i < 20; ++i) {
indexes.push_back((i + 2001) % 20);
}
std::vector<ops::Placeholder> placeholders;
for (int i : indexes) {
placeholders.emplace_back(root.WithOpName(strings::StrCat("p", i)),
DT_FLOAT);
placeholders.back().node()->AddAttr("_start_time", i + 1);
}
GraphDef gdef;
TF_EXPECT_OK(root.ToGraphDef(&gdef));
std::vector<std::pair<const NodeDef*, int64_t>> nodes;
std::unordered_map<const NodeDef*, int64_t> node_to_start_time;
TF_CHECK_OK(
TopologicalSortNodesWithTimePriority(&gdef, &nodes, &node_to_start_time));
ASSERT_EQ(nodes.size(), 20);
for (int i = 0; i < nodes.size(); ++i) {
EXPECT_EQ(strings::StrCat("p", i), nodes[i].first->name());
EXPECT_EQ(i + 1, nodes[i].second);
}
}
TEST(TopologicalSortNodesWithTimePriority, Dependencies) {
Scope root = Scope::NewRootScope().ExitOnError();
std::vector<int> indexes;
std::vector<ops::Placeholder> placeholders_in_order;
const int num_leaves = 20;
for (int i = 0; i < num_leaves; ++i) {
indexes.push_back((i + 2001) % num_leaves);
placeholders_in_order.emplace_back(root.WithOpName(strings::StrCat("p", i)),
DT_FLOAT);
placeholders_in_order.back().node()->AddAttr("_start_time", i + 1);
}
std::vector<ops::Placeholder> placeholders;
for (int i : indexes) {
placeholders.push_back(placeholders_in_order[i]);
}
std::vector<ops::Square> squares;
for (int i : indexes) {
squares.emplace_back(root.WithOpName(strings::StrCat("s", i)),
placeholders[i]);
squares.back().node()->AddAttr("_start_time", 50 - (i + 1));
}
std::vector<Input> inputs;
for (const auto& s : squares) inputs.push_back(s);
ops::AddN addn =
ops::AddN(root.WithOpName("addn"), absl::Span<const Input>(inputs));
addn.node()->AddAttr("_start_time", 1);
GraphDef gdef;
TF_EXPECT_OK(root.ToGraphDef(&gdef));
std::vector<std::pair<const NodeDef*, int64_t>> nodes;
std::unordered_map<const NodeDef*, int64_t> node_to_start_time;
TF_CHECK_OK(
TopologicalSortNodesWithTimePriority(&gdef, &nodes, &node_to_start_time));
ASSERT_EQ(1 + squares.size() + placeholders.size(), nodes.size());
for (int i = 0; i < placeholders.size(); ++i) {
const NodeDef* node = nodes[i].first;
EXPECT_EQ(strings::StrCat("p", i), node->name());
EXPECT_EQ(i + 1, nodes[i].second);
EXPECT_EQ(i + 1, node_to_start_time[node]);
}
for (int i = 0; i < squares.size(); ++i) {
int node_index = placeholders.size() + i;
int square_index = num_leaves - 1 - i;
const NodeDef* node = nodes[node_index].first;
EXPECT_EQ(strings::StrCat("s", square_index), node->name());
EXPECT_EQ(50 - (square_index + 1), nodes[node_index].second);
EXPECT_EQ(50 - (square_index + 1), node_to_start_time[node]);
}
EXPECT_EQ("addn", nodes.back().first->name());
EXPECT_EQ(50, nodes.back().second);
EXPECT_EQ(50, node_to_start_time[nodes.back().first]);
}
TEST(TopologicalSortNodesWithTimePriority, WhileLoop) {
using namespace ::tensorflow::ops;
using namespace ::tensorflow::ops::internal;
Scope root = Scope::NewRootScope().ExitOnError();
std::vector<int> indexes;
std::vector<Placeholder> placeholders_in_order;
const int num_leaves = 20;
for (int i = 0; i < num_leaves; ++i) {
indexes.push_back((i + 2001) % num_leaves);
placeholders_in_order.emplace_back(root.WithOpName(strings::StrCat("p", i)),
DT_FLOAT);
placeholders_in_order.back().node()->AddAttr("_start_time", i + 1);
}
std::vector<Placeholder> placeholders;
placeholders.reserve(indexes.size());
for (int i : indexes) {
placeholders.push_back(placeholders_in_order[i]);
}
std::vector<Exit> while_exits;
const int nodes_per_loop = 8;
for (int i : indexes) {
Scope scope = root.NewSubScope(strings::StrCat("while", i));
auto dummy = Placeholder(scope, DT_FLOAT);
Enter enter(scope, placeholders[i], strings::StrCat("frame", i));
Merge merge(scope, std::initializer_list<Input>{enter, dummy});
auto cv = Const(scope.WithControlDependencies({merge.output}), false);
LoopCond loop_cond(scope, cv);
Switch switch_node(scope, merge.output, loop_cond);
Identity identity(scope, switch_node.output_true);
NextIteration next_iteration(scope, identity);
while_exits.emplace_back(scope.WithOpName("exit"),
switch_node.output_false);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
int base_start_time = i * 10 + 100;
for (const auto& op : std::initializer_list<Output>{
enter, merge.output, cv, loop_cond, switch_node.output_false,
identity, next_iteration, while_exits.back()}) {
op.node()->AddAttr("_start_time", base_start_time++);
}
}
std::vector<Square> squares;
squares.reserve(indexes.size());
for (int i : indexes) {
squares.emplace_back(root.WithOpName(strings::StrCat("s", i)),
while_exits[i]);
squares.back().node()->AddAttr("_start_time", 500 - (i + 1));
}
GraphDef gdef;
TF_EXPECT_OK(root.ToGraphDef(&gdef));
std::vector<std::pair<const NodeDef*, int64_t>> nodes;
std::unordered_map<const NodeDef*, int64_t> node_to_start_time;
TF_CHECK_OK(
TopologicalSortNodesWithTimePriority(&gdef, &nodes, &node_to_start_time));
ASSERT_LT(while_exits.size() + squares.size() + placeholders.size(),
nodes.size());
int node_index = 0;
for (int i = 0; i < placeholders.size(); ++i, ++node_index) {
const NodeDef* node = nodes[i].first;
EXPECT_EQ(strings::StrCat("p", i), node->name());
EXPECT_EQ(i + 1, nodes[i].second);
EXPECT_EQ(i + 1, node_to_start_time[node]);
}
for (int i = 0; i < while_exits.size(); ++i, node_index += nodes_per_loop) {
const NodeDef* node = nodes[node_index].first;
EXPECT_EQ(strings::StrCat("while", i, "/Enter"), node->name());
EXPECT_EQ(100 + i * 10, nodes[node_index].second);
EXPECT_EQ(100 + i * 10, node_to_start_time[node]);
}
for (int i = 0; i < squares.size(); ++i, ++node_index) {
int square_index = num_leaves - 1 - i;
const NodeDef* node = nodes[node_index].first;
EXPECT_EQ(strings::StrCat("s", square_index), node->name());
EXPECT_EQ(500 - (square_index + 1), nodes[node_index].second);
EXPECT_EQ(500 - (square_index + 1), node_to_start_time[node]);
}
}
}
} |
1,308 | cpp | tensorflow/tensorflow | fallback_tensor | tensorflow/core/tfrt/utils/fallback_tensor.cc | tensorflow/core/tfrt/utils/fallback_tensor_test.cc | #ifndef TENSORFLOW_CORE_TFRT_UTILS_FALLBACK_TENSOR_H_
#define TENSORFLOW_CORE_TFRT_UTILS_FALLBACK_TENSOR_H_
#include <utility>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace tfrt_stub {
class ImmutableTensor {
public:
ImmutableTensor() = default;
static ImmutableTensor Create(tensorflow::Tensor tensor);
tensorflow::Tensor& tensor() { return tensor_; }
const tensorflow::Tensor& tensor() const { return tensor_; }
private:
explicit ImmutableTensor(tensorflow::Tensor tensor)
: tensor_(std::move(tensor)) {
DCHECK(!tensor_.RefCountIsOne())
<< "Immutable tensors' buffers cannot be forwarded.";
}
tensorflow::Tensor tensor_;
};
class FallbackTensor {
public:
FallbackTensor() = default;
explicit FallbackTensor(const tensorflow::Tensor& tensor) : tensor_(tensor) {}
explicit FallbackTensor(tensorflow::Tensor&& tensor)
: tensor_(std::move(tensor)) {}
explicit FallbackTensor(ImmutableTensor* immutable_tensor)
: tensor_(immutable_tensor->tensor()), is_immutable_(true) {}
FallbackTensor(const FallbackTensor& other) { *this = other; }
FallbackTensor& operator=(const FallbackTensor& other) {
tsl::profiler::TraceMe trace_me("FallbackTensor::Copy");
if (!other.is_immutable() && other.buffer() != nullptr) {
tensor_ = std::move(
tensorflow::tfrt_stub::ImmutableTensor::Create(other.tensor())
.tensor());
} else {
tensor_ = other.tensor();
}
is_immutable_ = true;
return *this;
}
FallbackTensor(FallbackTensor&&) noexcept = default;
FallbackTensor& operator=(FallbackTensor&&) noexcept = default;
const TensorBuffer* buffer() const {
return tensorflow::DMAHelper::buffer(&tensor());
}
TensorBuffer* buffer() { return tensorflow::DMAHelper::buffer(&tensor()); }
bool is_immutable() const { return is_immutable_; }
tensorflow::Tensor& tensor() { return tensor_; }
const tensorflow::Tensor& tensor() const { return tensor_; }
private:
tensorflow::Tensor tensor_;
bool is_immutable_ = false;
};
}
}
#endif
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include <utility>
#include "tensorflow/core/common_runtime/dma_helper.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
class ImmutableTensorBuffer final : public tensorflow::TensorBuffer {
public:
static tensorflow::core::RefCountPtr<ImmutableTensorBuffer> Create(
tensorflow::Tensor tensor);
explicit ImmutableTensorBuffer(tensorflow::Tensor tensor)
: tensorflow::TensorBuffer(tensor.data()), tensor_(std::move(tensor)) {
if (auto* buf = tensorflow::DMAHelper::buffer(&tensor_)) {
root_buffer_ = buf->root_buffer();
} else {
root_buffer_ = this;
}
}
~ImmutableTensorBuffer() override = default;
size_t size() const override {
return tensorflow::DMAHelper::buffer(&tensor_)->size();
}
bool OwnsMemory() const override { return false; }
tensorflow::TensorBuffer* root_buffer() override { return root_buffer_; }
void FillAllocationDescription(AllocationDescription* proto) const override {}
bool GetAllocatedBytes(size_t*) const override { return false; }
private:
tensorflow::Tensor tensor_;
tensorflow::TensorBuffer* root_buffer_ = nullptr;
};
tensorflow::core::RefCountPtr<ImmutableTensorBuffer>
ImmutableTensorBuffer::Create(tensorflow::Tensor tensor) {
return tensorflow::core::RefCountPtr<ImmutableTensorBuffer>(
new ImmutableTensorBuffer(std::move(tensor)));
}
}
ImmutableTensor ImmutableTensor::Create(tensorflow::Tensor tensor) {
auto dtype = tensor.dtype();
auto shape = tensor.shape();
auto immutable_buffer = ImmutableTensorBuffer::Create(std::move(tensor));
return ImmutableTensor(
tensorflow::Tensor(dtype, shape, std::move(immutable_buffer)));
}
}
} | #include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(FallbackTensorTest, ImmutableTensor) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1);
ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = immutable_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne());
EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes());
}
TEST(FallbackTensorTest, StringImmutableTensor) {
tensorflow::tstring scalar = "string";
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1);
ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_STRING);
auto flat = immutable_tensor.tensor().flat<tensorflow::tstring>();
EXPECT_EQ(flat(0), "string");
EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne());
EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes());
}
TEST(FallbackTensorTest, FallbackTensor) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
{
FallbackTensor fallback_tensor(tensor);
EXPECT_FALSE(fallback_tensor.is_immutable());
ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1);
ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = fallback_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
FallbackTensor copy(fallback_tensor);
FallbackTensor assign;
assign = fallback_tensor;
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
fallback_tensor = {};
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
}
auto immutable_tensor = ImmutableTensor::Create(tensor);
{
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1);
ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = fallback_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
FallbackTensor copy(fallback_tensor);
FallbackTensor assign;
assign = fallback_tensor;
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
fallback_tensor = {};
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
}
}
TEST(FallbackTensorTest, FallbackTensorCopy) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
{
FallbackTensor fallback_tensor(tensor);
EXPECT_FALSE(fallback_tensor.is_immutable());
auto copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
}
auto immutable_tensor = ImmutableTensor::Create(tensor);
{
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
auto copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
}
}
TEST(FallbackTensorTest, FallbackTensorCopyRootBuffer) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
EXPECT_EQ(fallback_tensor.buffer()->root_buffer(),
tensorflow::DMAHelper::buffer(&tensor));
FallbackTensor copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
EXPECT_EQ(copy.buffer()->root_buffer(),
tensorflow::DMAHelper::buffer(&tensor));
}
TEST(FallbackTensorTest, EmptyTensor) {
tensorflow::Tensor tensor(tensorflow::DT_FLOAT,
tensorflow::TensorShape({1, 0}));
FallbackTensor fallback_tensor(tensor);
auto copy = fallback_tensor;
ASSERT_FALSE(copy.buffer());
}
}
}
} |
1,309 | cpp | tensorflow/tensorflow | tensor_util | tensorflow/core/tfrt/utils/tensor_util.cc | tensorflow/core/tfrt/utils/tensor_util_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_
#define TENSORFLOW_CORE_FRAMEWORK_TENSOR_UTIL_H_
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tensor {
Tensor DeepCopy(const Tensor& other);
void DeepCopy(const Tensor& input, Tensor* output);
Status Concat(const absl::Span<const Tensor>& tensors,
Tensor* result) TF_MUST_USE_RESULT;
Status Split(const Tensor& tensor, const absl::Span<const int64_t>& sizes,
std::vector<Tensor>* result) TF_MUST_USE_RESULT;
namespace internal {
void SetTensorProtoShape(absl::Span<const size_t> shape,
TensorShapeProto* shape_proto);
template <typename Type>
class TensorProtoFieldHelper : public std::false_type {};
#define DEFINE_PROTO_FIELD_HELPER(TYPE, FIELDNAME) \
template <> \
class TensorProtoFieldHelper<TYPE> : public std::true_type { \
public: \
typedef decltype( \
std::declval<TensorProto>().FIELDNAME##_val(0)) FieldType; \
typedef decltype( \
std::declval<TensorProto>().FIELDNAME##_val()) RepeatedFieldType; \
typedef decltype(std::declval<TensorProto>().mutable_##FIELDNAME##_val()) \
MutableRepeatedFieldType; \
static MutableRepeatedFieldType GetMutableField(TensorProto* proto) { \
return proto->mutable_##FIELDNAME##_val(); \
} \
static RepeatedFieldType& GetField(const TensorProto& proto) { \
return proto.FIELDNAME##_val(); \
} \
}
DEFINE_PROTO_FIELD_HELPER(float, float);
DEFINE_PROTO_FIELD_HELPER(double, double);
DEFINE_PROTO_FIELD_HELPER(int8, int);
DEFINE_PROTO_FIELD_HELPER(uint8, int);
DEFINE_PROTO_FIELD_HELPER(int16, int);
DEFINE_PROTO_FIELD_HELPER(uint16, int);
DEFINE_PROTO_FIELD_HELPER(int32, int);
DEFINE_PROTO_FIELD_HELPER(uint32, uint32);
DEFINE_PROTO_FIELD_HELPER(int64_t, int64);
DEFINE_PROTO_FIELD_HELPER(uint64, uint64);
DEFINE_PROTO_FIELD_HELPER(bool, bool);
DEFINE_PROTO_FIELD_HELPER(qint8, int);
DEFINE_PROTO_FIELD_HELPER(quint8, int);
DEFINE_PROTO_FIELD_HELPER(qint16, int);
DEFINE_PROTO_FIELD_HELPER(quint16, int);
DEFINE_PROTO_FIELD_HELPER(qint32, int);
DEFINE_PROTO_FIELD_HELPER(Eigen::half, half);
DEFINE_PROTO_FIELD_HELPER(bfloat16, half);
DEFINE_PROTO_FIELD_HELPER(complex64, scomplex);
DEFINE_PROTO_FIELD_HELPER(complex128, dcomplex);
#undef DEFINE_PROTO_HELPER
template <typename T>
struct CopyHelper {
template <typename SrcIter, typename DstIter>
static void ToArray(SrcIter begin, SrcIter end, DstIter dst) {
using SrcType = typename std::iterator_traits<SrcIter>::value_type;
using DstType = typename std::iterator_traits<DstIter>::value_type;
std::transform(begin, end, dst, [](const SrcType& x) -> DstType {
return static_cast<DstType>(x);
});
}
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, SrcIter dst) {
std::copy(begin, end, dst);
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
ToArray(begin, end, dst);
}
};
template <>
struct CopyHelper<Eigen::half> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, Eigen::half* dst) {
std::transform(begin, end, dst, [](int x) -> Eigen::half {
return Eigen::numext::bit_cast<Eigen::half>(static_cast<uint16>(x));
});
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
std::transform(begin, end, dst, [](Eigen::half h) -> int {
return static_cast<int>(Eigen::numext::bit_cast<uint16>(h));
});
}
};
template <>
struct CopyHelper<bfloat16> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, bfloat16* dst) {
std::transform(begin, end, dst, [](int x) -> bfloat16 {
return Eigen::numext::bit_cast<bfloat16>(static_cast<uint16>(x));
});
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
std::transform(begin, end, dst, [](bfloat16 bf16) -> int {
return static_cast<int>(Eigen::numext::bit_cast<uint16>(bf16));
});
}
};
template <typename RealType>
struct CopyHelper<std::complex<RealType>> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, std::complex<RealType>* dst) {
RealType* real_dst = reinterpret_cast<RealType*>(dst);
std::copy(begin, end, real_dst);
}
template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
size_t n = std::distance(begin, end);
const RealType* real_begin = reinterpret_cast<const RealType*>(&(*begin));
std::copy_n(real_begin, 2 * n, dst);
}
};
template <typename T>
class TensorProtoHelper : public std::true_type {
public:
using FieldHelper = TensorProtoFieldHelper<T>;
using FieldType = typename TensorProtoFieldHelper<T>::FieldType;
static DataType GetDataType() { return DataTypeToEnum<T>::value; }
static size_t NumValues(const TensorProto& proto) {
size_t raw_size = FieldHelper::GetField(proto).size();
return is_complex<T>::value ? raw_size / 2 : raw_size;
}
static void AddValue(const T& value, TensorProto* proto) {
const T* val_ptr = &value;
AddValues(val_ptr, val_ptr + 1, proto);
}
static T GetValue(size_t index, const TensorProto& proto) {
const size_t stride = is_complex<T>::value ? 2 : 1;
T val;
CopyHelper<T>::ToArray(
FieldHelper::GetField(proto).begin() + stride * index,
FieldHelper::GetField(proto).begin() + stride * (index + 1), &val);
return val;
}
template <typename IterType>
static void AddValues(IterType begin, IterType end, TensorProto* proto) {
size_t n = std::distance(begin, end);
FieldType* dst = AppendUninitialized(n, proto);
CopyHelper<T>::FromArray(begin, end, dst);
}
template <typename IterType>
static void CopyValues(IterType dst, const TensorProto& proto) {
CopyHelper<T>::ToArray(FieldHelper::GetField(proto).begin(),
FieldHelper::GetField(proto).end(), dst);
}
static void Truncate(size_t new_size, TensorProto* proto) {
if (is_complex<T>::value) new_size *= 2;
FieldHelper::GetMutableField(proto)->Truncate(new_size);
}
static FieldType* AppendUninitialized(size_t n, TensorProto* proto) {
if (is_complex<T>::value) n *= 2;
auto* field = FieldHelper::GetMutableField(proto);
field->Reserve(field->size() + n);
return reinterpret_cast<FieldType*>(field->AddNAlreadyReserved(n));
}
};
template <>
class TensorProtoHelper<string> : public std::true_type {
public:
static DataType GetDataType() { return DataType::DT_STRING; }
static void AddValue(const string& value, TensorProto* proto) {
*proto->mutable_string_val()->Add() = value;
}
template <typename IterType>
static void AddValues(IterType begin, IterType end, TensorProto* proto) {
for (IterType it = begin; it != end; ++it) {
AddValue(*it, proto);
}
}
template <typename IterType>
static void CopyToTensorContent(IterType begin, IterType end,
TensorProto* proto) {
AddValues(begin, end, proto);
}
};
template <typename Type, typename IterType>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProto(IterType values_begin, IterType values_end,
const size_t values_size,
const absl::Span<const size_t> shape) {
TensorProto tensor;
TensorShapeProto tensor_shape_proto;
internal::SetTensorProtoShape(shape, &tensor_shape_proto);
if (TensorShape(tensor_shape_proto).num_elements() != values_size) {
LOG(ERROR) << "Shape and number of values (" << values_size
<< ") are incompatible.";
return tensor;
}
using TypeHelper = internal::TensorProtoHelper<Type>;
tensor.set_dtype(TypeHelper::GetDataType());
*tensor.mutable_tensor_shape() = std::move(tensor_shape_proto);
TypeHelper::AddValues(values_begin, values_end, &tensor);
return tensor;
}
}
template <typename Type>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProtoSpan(const absl::Span<const Type> values,
const absl::Span<const size_t> shape) {
return internal::CreateTensorProto<Type>(values.begin(), values.end(),
values.size(), shape);
}
template <typename Type>
typename std::enable_if<internal::TensorProtoHelper<Type>::value,
TensorProto>::type
CreateTensorProto(const std::vector<Type>& values,
const absl::Span<const size_t> shape) {
return internal::CreateTensorProto<Type>(values.begin(), values.end(),
values.size(), shape);
}
bool CompressTensorProtoInPlace(int64_t min_num_elements,
float min_compression_ratio,
TensorProto* tensor);
inline bool CompressTensorProtoInPlace(TensorProto* tensor) {
static const int64_t kDefaultMinNumElements = 64;
static const float kDefaultMinCompressionRatio = 2.0f;
return CompressTensorProtoInPlace(kDefaultMinNumElements,
kDefaultMinCompressionRatio, tensor);
}
Status MakeShape(const Tensor& shape_t, TensorShape* out);
}
}
#endif
#include "tensorflow/core/framework/tensor_util.h"
#include <cmath>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tensor {
Tensor DeepCopy(const Tensor& other) {
Tensor tmp = Tensor(other.dtype(), other.shape());
DeepCopy(other, &tmp);
return tmp;
}
void DeepCopy(const Tensor& input, Tensor* output) {
if (DataTypeCanUseMemcpy(input.dtype())) {
if (input.NumElements() > 0) {
StringPiece input_data = input.tensor_data();
StringPiece output_data = output->tensor_data();
memcpy(const_cast<char*>(output_data.data()), input_data.data(),
input_data.size());
}
} else if (input.dtype() == DT_STRING) {
output->unaligned_flat<tstring>() = input.unaligned_flat<tstring>();
} else {
CHECK_EQ(DT_VARIANT, input.dtype());
output->unaligned_flat<Variant>() = input.unaligned_flat<Variant>();
}
}
Status Concat(const absl::Span<const Tensor>& tensors, Tensor* result) {
if (tensors.empty()) {
return errors::InvalidArgument("Cannot concatenate zero tensors");
}
int64_t total_dim0_size = 0;
for (const Tensor& tensor : tensors) {
if (tensor.dims() == 0) {
return errors::InvalidArgument(
"Cannot concatenate a zero-dimensional tensor");
}
total_dim0_size += tensor.dim_size(0);
}
TensorShape shape = tensors[0].shape();
shape.set_dim(0, total_dim0_size);
const DataType dtype = tensors[0].dtype();
for (int i = 1; i < tensors.size(); ++i) {
if (tensors[i].dtype() != dtype) {
return errors::InvalidArgument(
"Cannot concatenate tensors that have different data types.", " Got ",
DataTypeString(dtype), " and ", DataTypeString(tensors[i].dtype()),
".");
}
}
*result = Tensor(dtype, shape);
StringPiece to_data = result->tensor_data();
if (DataTypeCanUseMemcpy(dtype)) {
int64_t offset = 0;
for (const Tensor& tensor : tensors) {
StringPiece from_data = tensor.tensor_data();
CHECK_LE(offset + from_data.size(), to_data.size());
memcpy(const_cast<char*>(to_data.data()) + offset, from_data.data(),
from_data.size());
offset += from_data.size();
}
} else {
if (dtype != DT_STRING) {
return errors::Internal("Unexpected data type");
}
tstring* to_strings =
reinterpret_cast<tstring*>(const_cast<char*>(to_data.data()));
int64_t offset = 0;
for (const Tensor& tensor : tensors) {
auto from_strings = tensor.flat<tstring>();
CHECK_LE(offset + tensor.NumElements(), result->NumElements());
for (int i = 0; i < tensor.NumElements(); ++i) {
to_strings[offset + i] = from_strings(i);
}
offset += tensor.NumElements();
}
}
return absl::OkStatus();
}
Status Split(const Tensor& tensor, const absl::Span<const int64_t>& sizes,
std::vector<Tensor>* result) {
if (tensor.dims() == 0) {
return errors::InvalidArgument("Cannot split a zero-dimensional tensor");
}
int64_t total_size = 0;
for (int64_t size : sizes) {
total_size += size;
}
if (total_size != tensor.dim_size(0)) {
return errors::InvalidArgument(
"The values in 'sizes' do not sum to the zeroth-dimension size of "
"'tensor'");
}
StringPiece from_data = tensor.tensor_data();
if (DataTypeCanUseMemcpy(tensor.dtype())) {
int64_t offset = 0;
for (int64_t size : sizes) {
TensorShape shape = tensor.shape();
shape.set_dim(0, size);
result->emplace_back(tensor.dtype(), shape);
Tensor* split = &(*result)[result->size() - 1];
StringPiece to_data = split->tensor_data();
CHECK_LE(offset + to_data.size(), from_data.size());
memcpy(const_cast<char*>(to_data.data()), from_data.data() + offset,
to_data.size());
offset += to_data.size();
}
} else {
if (tensor.dtype() != DT_STRING) {
return errors::Internal("Unexpected data type");
}
auto from_strings = tensor.flat<tstring>();
int64_t offset = 0;
for (int64_t size : sizes) {
TensorShape shape = tensor.shape();
shape.set_dim(0, size);
result->emplace_back(tensor.dtype(), shape);
Tensor& split = (*result)[result->size() - 1];
tstring* to_strings = reinterpret_cast<tstring*>(
const_cast<char*>(split.tensor_data().data()));
CHECK_LE(offset + split.NumElements(), tensor.NumElements());
for (int i = 0; i < split.NumElements(); ++i) {
to_strings[i] = from_strings(offset + i);
}
offset += split.NumElements();
}
}
return absl::OkStatus();
}
namespace internal {
void SetTensorProtoShape(const absl::Span<const size_t> shape,
TensorShapeProto* shape_proto) {
for (auto dim : shape) {
shape_proto->mutable_dim()->Add()->set_size(dim);
}
}
template <typename T>
bool CompressTensorContent(float min_compression_ratio,
const TensorShape& shape, TensorProto* tensor) {
using TypeHelper = internal::TensorProtoHelper<T>;
using FieldType = typename internal::TensorProtoHelper<T>::FieldType;
const int64_t num_tensor_values = shape.num_elements();
const int64_t num_bytes = tensor->tensor_content().size();
const int64_t num_raw_values = num_bytes / sizeof(T);
if (num_raw_values != num_tensor_values) {
return false;
}
int64_t last_offset = num_bytes - 1;
int64_t prev_offset = last_offset - sizeof(T);
while (prev_offset >= 0) {
if (tensor->tensor_content()[prev_offset] !=
tensor->tensor_content()[last_offset]) {
break;
}
--last_offset;
--prev_offset;
}
if (prev_offset == -1) {
T splat_value;
port::CopySubrangeToArray(tensor->tensor_content(), 0, sizeof(T),
reinterpret_cast<char*>(&splat_value));
if (splat_value == T(0)) {
tensor->clear_tensor_content();
return true;
}
}
const int64_t new_num_values = last_offset / sizeof(T) + 1;
if (new_num_values * (is_complex<T>::value ? 2 : 1) * sizeof(FieldType) >
static_cast<int64_t>(num_bytes / min_compression_ratio)) {
return false;
}
if constexpr (sizeof(FieldType) == sizeof(T)) {
FieldType* dst_ptr =
TypeHelper::AppendUninitialized(new_num_values, tensor);
port::CopySubrangeToArray(tensor->tensor_content(), 0,
new_num_values * sizeof(T),
reinterpret_cast<char*>(dst_ptr));
tensor->clear_tensor_content();
} else if constexpr (sizeof(T) > 1) {
gtl::InlinedVector<T, 64> tmp;
if (new_num_values >= tmp.max_size()) return false;
tmp.resize(new_num_values);
port::CopySubrangeToArray(tensor->tensor_content(), 0,
new_num_values * sizeof(T),
reinterpret_cast<char*>(tmp.data()));
tensor->clear_tensor_content();
TypeHelper::AddValues(tmp.begin(), tmp.end(), tensor);
} else {
for (int64_t i = 0; i < new_num_values; ++i) {
char c = tensor->tensor_content()[i];
TypeHelper::AddValue(static_cast<T>(c), tensor);
}
tensor->clear_tensor_content();
}
return true;
}
template <typename T>
inline bool PackedValuesNotEqual(T a, T b) {
return a != b;
}
template <>
inline bool PackedValuesNotEqual(float a, float b) {
return reinterpret_cast<int32_t&>(a) != reinterpret_cast<int32_t&>(b);
}
template <>
inline bool PackedValuesNotEqual(double a, double b) {
return reinterpret_cast<int64_t&>(a) != reinterpret_cast<int64_t&>(b);
}
template <typename RealType>
inline bool PackedValuesNotEqual(const std::complex<RealType>& a,
const std::complex<RealType>& b) {
return PackedValuesNotEqual(a.real(), b.real()) ||
PackedValuesNotEqual(a.imag(), b.imag());
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
static bool IsNegativeZero(T value) {
return false;
}
template <typename T,
typename std::enable_if<!std::is_integral<T>::value>::type* = nullptr>
static bool IsNegativeZero(T value) {
return value == T(0) && std::signbit(value);
}
template <typename T>
static bool IsNegativeZero(std::complex<T> value) {
return IsNegativeZero(value.real()) || IsNegativeZero(value.imag());
}
static bool IsNegativeZero(Eigen::QUInt8 value) { return false; }
static bool IsNegativeZero(Eigen::QInt8 value) { return false; }
static bool IsNegativeZero(Eigen::QUInt16 value) { return false; }
static bool IsNegativeZero(Eigen::QInt16 value) { return false; }
static bool IsNegativeZero(Eigen::QInt32 value) { return false; }
static bool IsNegativeZero(Eigen::half value) {
return IsNegativeZero<float>(static_cast<float>(value));
}
static bool IsNegativeZero(Eigen::bfloat16 value) {
return IsNegativeZero<float>(static_cast<float>(value));
}
template <typename T>
bool CompressRepeatedField(float min_compression_ratio,
const TensorShape& shape, TensorProto* tensor) {
using TypeHelper = internal::TensorProtoHelper<T>;
using FieldType = typename internal::TensorProtoHelper<T>::FieldType;
const int64_t num_tensor_values = shape.num_elements();
const int64_t num_proto_values = TypeHelper::NumValues(*tensor);
if (num_proto_values == 0) return false;
const T last_value = TypeHelper::GetValue(num_proto_values - 1, *tensor);
int64_t last_index = 0;
for (int64_t i = num_proto_values - 2; i >= 0 && last_index == 0; --i) {
const T cur_value = TypeHelper::GetValue(i, *tensor);
if (PackedValuesNotEqual(cur_value, last_value)) {
last_index = i + 1;
}
}
if (last_index == 0 && last_value == T(0) && !IsNegativeZero(last_value)) {
TypeHelper::Truncate(0, tensor);
return true;
}
const int64_t num_truncated_proto_values = last_index + 1;
const int64_t num_bytes_as_field =
num_truncated_proto_values * sizeof(FieldType);
const int64_t num_bytes_as_tensor_content = num_tensor_values * sizeof(T);
const int64_t num_bytes_before = num_proto_values * sizeof(FieldType);
if (std::min(num_bytes_as_field, num_bytes_as_tensor_content) >
static_cast<int64_t>(num_bytes_before / min_compression_ratio)) {
return false;
}
if (num_bytes_as_field <= num_bytes_as_tensor_content) {
TypeHelper::Truncate(num_truncated_proto_values, tensor);
} else {
gtl::InlinedVector<T, 64> tmp;
if (num_proto_values == 1) {
tmp.resize(num_tensor_values, last_value);
} else {
tmp.resize(num_tensor_values, T(0));
TypeHelper::CopyValues(tmp.begin(), *tensor);
}
TypeHelper::Truncate(0, tensor);
port::CopyFromArray(tensor->mutable_tensor_content(),
reinterpret_cast<const char*>(tmp.data()),
num_bytes_as_tensor_content);
}
return true;
}
template <typename T>
bool CompressTensorProtoInPlaceImpl(int64_t min_num_elements,
float min_compression_ratio,
TensorProto* tensor) {
const TensorShape shape(tensor->tensor_shape());
const int64_t num_tensor_values = shape.num_elements();
if (num_tensor_values < min_num_elements) {
return false;
}
if (tensor->tensor_content().empty()) {
return CompressRepeatedField<T>(min_compression_ratio, shape, tensor);
} else {
return CompressTensorContent<T>(min_compression_ratio, shape, tensor);
}
return true;
}
}
#define HANDLE_COMPRESS_CASE(TF_TYPE) \
case TF_TYPE: \
return internal::CompressTensorProtoInPlaceImpl< \
EnumToDataType<TF_TYPE>::Type>(min_num_elements, \
min_compression_ratio, tensor); \
break
bool CompressTensorProtoInPlace(int64_t min_num_elements,
float min_compression_ratio,
TensorProto* tensor) {
switch (tensor->dtype()) {
HANDLE_COMPRESS_CASE(DT_FLOAT);
HANDLE_COMPRESS_CASE(DT_DOUBLE);
HANDLE_COMPRESS_CASE(DT_COMPLEX64);
HANDLE_COMPRESS_CASE(DT_COMPLEX128);
HANDLE_COMPRESS_CASE(DT_UINT8);
HANDLE_COMPRESS_CASE(DT_INT8);
HANDLE_COMPRESS_CASE(DT_UINT16);
HANDLE_COMPRESS_CASE(DT_INT16);
HANDLE_COMPRESS_CASE(DT_UINT32);
HANDLE_COMPRESS_CASE(DT_INT32);
HANDLE_COMPRESS_CASE(DT_UINT64);
HANDLE_COMPRESS_CASE(DT_INT64);
HANDLE_COMPRESS_CASE(DT_BOOL);
HANDLE_COMPRESS_CASE(DT_QUINT8);
HANDLE_COMPRESS_CASE(DT_QINT8);
HANDLE_COMPRESS_CASE(DT_QUINT16);
HANDLE_COMPRESS_CASE(DT_QINT16);
HANDLE_COMPRESS_CASE(DT_QINT32);
HANDLE_COMPRESS_CASE(DT_HALF);
HANDLE_COMPRESS_CASE(DT_BFLOAT16);
default:
return false;
}
}
#undef HANDLE_COMPRESS_CASE
Status MakeShape(const Tensor& shape, TensorShape* out) {
if (!TensorShapeUtils::IsVector(shape.shape())) {
return errors::InvalidArgument(
"shape must be a vector of {int32,int64}, got shape ",
shape.shape().DebugString());
}
if (shape.dtype() == DataType::DT_INT32) {
auto vec = shape.flat<int32>();
return TensorShapeUtils::MakeShape(vec.data(), vec.size(), out);
} else if (shape.dtype() == DataType::DT_INT64) {
auto vec = shape.flat<int64_t>();
return TensorShapeUtils::MakeShape(vec.data(), vec.size(), out);
} else {
return errors::InvalidArgument("shape must be a vector of {int32,int64}.");
}
}
}
} | #include "tensorflow/core/framework/tensor_util.h"
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TensorUtil, DeepCopy0d) {
Tensor x(DT_FLOAT, TensorShape({}));
x.scalar<float>()() = 10.0;
Tensor y = tensor::DeepCopy(x);
y.scalar<float>()() = 20.0;
EXPECT_EQ(10.0, x.scalar<float>()());
x.scalar<float>()() = 30.0;
EXPECT_EQ(20.0, y.scalar<float>()());
Tensor z = tensor::DeepCopy(y);
y.scalar<float>()() = 40.0;
EXPECT_EQ(20.0, z.scalar<float>()());
EXPECT_EQ(30.0, x.scalar<float>()());
EXPECT_EQ(40.0, y.scalar<float>()());
EXPECT_EQ(TensorShape({}), x.shape());
EXPECT_EQ(TensorShape({}), y.shape());
EXPECT_EQ(TensorShape({}), z.shape());
EXPECT_EQ(DT_FLOAT, x.dtype());
EXPECT_EQ(DT_FLOAT, y.dtype());
EXPECT_EQ(DT_FLOAT, z.dtype());
}
TEST(TensorUtil, DeepCopyZeroElements) {
Tensor x;
Tensor y = tensor::DeepCopy(x);
EXPECT_EQ(TensorShape({0}), y.shape());
EXPECT_EQ(DT_FLOAT, y.dtype());
EXPECT_EQ(0, y.NumElements());
}
TEST(TensorUtil, DeepCopy) {
Tensor x(DT_FLOAT, TensorShape({1}));
x.flat<float>()(0) = 10.0;
Tensor y = tensor::DeepCopy(x);
y.flat<float>()(0) = 20.0;
EXPECT_EQ(10.0, x.flat<float>()(0));
x.flat<float>()(0) = 30.0;
EXPECT_EQ(20.0, y.flat<float>()(0));
Tensor z = tensor::DeepCopy(y);
y.flat<float>()(0) = 40.0;
EXPECT_EQ(20.0, z.flat<float>()(0));
EXPECT_EQ(30.0, x.flat<float>()(0));
EXPECT_EQ(40.0, y.flat<float>()(0));
EXPECT_EQ(TensorShape({1}), x.shape());
EXPECT_EQ(TensorShape({1}), y.shape());
EXPECT_EQ(TensorShape({1}), z.shape());
EXPECT_EQ(DT_FLOAT, x.dtype());
EXPECT_EQ(DT_FLOAT, y.dtype());
EXPECT_EQ(DT_FLOAT, z.dtype());
Tensor str1(DT_STRING, TensorShape({2}));
str1.flat<tstring>()(0) = "foo1";
str1.flat<tstring>()(1) = "foo2";
Tensor str2 = tensor::DeepCopy(str1);
str2.flat<tstring>()(0) = "bar1";
str2.flat<tstring>()(1) = "bar2";
EXPECT_NE(str2.flat<tstring>()(0), str1.flat<tstring>()(0));
}
TEST(TensorUtil, DeepCopySlice) {
Tensor x(DT_INT32, TensorShape({10}));
x.flat<int32>().setConstant(1);
Tensor y = x.Slice(2, 6);
Tensor z = tensor::DeepCopy(y);
x.flat<int32>().setConstant(2);
EXPECT_EQ(TensorShape({10}), x.shape());
EXPECT_EQ(TensorShape({4}), y.shape());
EXPECT_EQ(TensorShape({4}), z.shape());
EXPECT_EQ(DT_INT32, x.dtype());
EXPECT_EQ(DT_INT32, y.dtype());
EXPECT_EQ(DT_INT32, z.dtype());
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(2, x.flat<int32>()(i));
}
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(2, y.unaligned_flat<int32>()(i));
EXPECT_EQ(1, z.flat<int32>()(i));
}
}
TEST(TensorUtil, DeepCopySliceString) {
Tensor x(DT_STRING, TensorShape({10}));
x.flat<tstring>().setConstant("hello");
Tensor y = x.Slice(3, 7);
Tensor z = tensor::DeepCopy(y);
x.flat<tstring>().setConstant("goodbye");
EXPECT_EQ(TensorShape({10}), x.shape());
EXPECT_EQ(TensorShape({4}), y.shape());
EXPECT_EQ(TensorShape({4}), z.shape());
EXPECT_EQ(DT_STRING, x.dtype());
EXPECT_EQ(DT_STRING, y.dtype());
EXPECT_EQ(DT_STRING, z.dtype());
for (int i = 0; i < 10; ++i) {
EXPECT_EQ("goodbye", x.flat<tstring>()(i));
}
for (int i = 0; i < 4; ++i) {
EXPECT_EQ("goodbye", y.unaligned_flat<tstring>()(i));
EXPECT_EQ("hello", z.flat<tstring>()(i));
}
}
TEST(TensorUtil, DeepCopySliceVariant) {
Tensor x(DT_VARIANT, TensorShape({10}));
x.flat<Variant>().setConstant(Tensor(42.0f));
Tensor y = x.Slice(3, 7);
Tensor z = tensor::DeepCopy(y);
x.flat<Variant>().setConstant(Tensor("foo"));
EXPECT_EQ(TensorShape({10}), x.shape());
EXPECT_EQ(TensorShape({4}), y.shape());
EXPECT_EQ(TensorShape({4}), z.shape());
EXPECT_EQ(DT_VARIANT, x.dtype());
EXPECT_EQ(DT_VARIANT, y.dtype());
EXPECT_EQ(DT_VARIANT, z.dtype());
for (int i = 0; i < 10; ++i) {
EXPECT_EQ("foo", x.flat<Variant>()(i).get<Tensor>()->scalar<tstring>()());
}
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(
"foo",
y.unaligned_flat<Variant>()(i).get<Tensor>()->scalar<tstring>()());
EXPECT_EQ(42.0, z.flat<Variant>()(i).get<Tensor>()->scalar<float>()());
}
}
TEST(TensorUtil, Concat) {
std::vector<int64_t> sizes = {1, 4, 5};
std::vector<Tensor> to_concat;
int64_t total_size = 0;
int offset = 0;
for (size_t entry = 0; entry < sizes.size(); ++entry) {
const int64_t size = sizes[entry];
Tensor tensor(DT_INT32, TensorShape({size, 2}));
for (int i = offset; i < offset + size; ++i) {
for (int j = 0; j < 2; ++j) {
tensor.matrix<int32>()(i - offset, j) = 2 * i + j;
}
}
to_concat.push_back(tensor);
total_size += size;
offset += size;
}
Tensor concated;
TF_ASSERT_OK(tensor::Concat(to_concat, &concated));
ASSERT_EQ(TensorShape({total_size, 2}), concated.shape());
for (int i = 0; i < total_size; ++i) {
for (int j = 0; j < 2; ++j) {
EXPECT_EQ(2 * i + j, concated.matrix<int32>()(i, j));
}
}
}
TEST(TensorUtil, Split) {
Tensor to_split(DT_INT64, TensorShape({10, 2}));
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 2; ++j) {
to_split.matrix<int64_t>()(i, j) = 2 * i + j;
}
}
std::vector<int64_t> sizes = {1, 4, 5};
std::vector<Tensor> splits;
TF_ASSERT_OK(tensor::Split(to_split, sizes, &splits));
ASSERT_EQ(sizes.size(), splits.size());
int offset = 0;
for (size_t entry = 0; entry < splits.size(); ++entry) {
const int64_t size = sizes[entry];
const Tensor& split = splits[entry];
ASSERT_EQ(TensorShape({size, 2}), split.shape());
for (int i = offset; i < offset + size; ++i) {
for (int j = 0; j < 2; ++j) {
EXPECT_EQ(2 * i + j, split.matrix<int64_t>()(i - offset, j));
}
}
offset += size;
}
}
TEST(TensorUtil, ConcatSplitStrings) {
Tensor x(DT_STRING, TensorShape({4, 3}));
for (int i = 0; i < 4 * 3; ++i) {
x.flat<tstring>()(i) = strings::StrCat("foo_", i);
}
std::vector<Tensor> split;
TF_ASSERT_OK(tensor::Split(x, {2, 1, 1}, &split));
Tensor x_round_tripped;
TF_ASSERT_OK(tensor::Concat(split, &x_round_tripped));
ASSERT_EQ(x.shape(), x_round_tripped.shape());
for (int i = 0; i < 4 * 3; ++i) {
EXPECT_EQ(x.flat<tstring>()(i), x_round_tripped.flat<tstring>()(i));
}
for (int i = 0; i < 4 * 3; ++i) {
x_round_tripped.flat<tstring>()(i) = strings::StrCat("bar_", i);
}
for (int i = 0; i < 4 * 3; ++i) {
EXPECT_NE(x.flat<tstring>()(i), x_round_tripped.flat<tstring>()(i));
}
}
TEST(TensorProtoUtil, CreateTensorProtoSpan_string) {
string s[2] = {"a", "b"};
std::vector<size_t> shape{1, 2};
auto proto = tensor::CreateTensorProtoSpan<string>(s, shape);
TensorProto expected_tensor_proto;
expected_tensor_proto.set_dtype(DT_STRING);
expected_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(1);
expected_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
expected_tensor_proto.add_string_val("a");
expected_tensor_proto.add_string_val("b");
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreateTensorProtoSpan_int32) {
int32 s[2] = {123, 456};
std::vector<size_t> shape{1, 2};
auto proto = tensor::CreateTensorProtoSpan<int32>(s, shape);
TensorProto expected_tensor_proto;
expected_tensor_proto.set_dtype(DT_INT32);
expected_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(1);
expected_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
expected_tensor_proto.add_int_val(123);
expected_tensor_proto.add_int_val(456);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesStringTensorProto) {
std::vector<string> values{"a", "b", "c"};
std::vector<size_t> shape{1, 3};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_STRING\n"
"tensor_shape {\n"
" dim {\n"
" size: 1\n"
" }\n"
" dim {\n"
" size: 3\n"
" }\n"
"}\n"
"string_val: \"a\"\n"
"string_val: \"b\"\n"
"string_val: \"c\"\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesInt32TensorProto) {
std::vector<int32> values{1, 2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_INT32\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"int_val: 1\n"
"int_val: 2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesInt64TensorProto) {
std::vector<int64_t> values{1, 2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_INT64\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"int64_val: 1\n"
"int64_val: 2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesUInt32TensorProto) {
std::vector<uint32> values{1, 2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_UINT32\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"uint32_val: 1\n"
"uint32_val: 2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesUInt64TensorProto) {
std::vector<uint64> values{1, 2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_UINT64\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"uint64_val: 1\n"
"uint64_val: 2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesFloatTensorProto) {
std::vector<float> values{1.1, 2.2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_FLOAT\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"float_val: 1.1\n"
"float_val: 2.2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesDoubleTensorProto) {
std::vector<double> values{1.1, 2.2};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_DOUBLE\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"double_val: 1.1\n"
"double_val: 2.2\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CreatesBoolTensorProto) {
std::vector<bool> values{true, false};
std::vector<size_t> shape{2};
auto proto = tensor::CreateTensorProto(values, shape);
TensorProto expected_tensor_proto;
protobuf::TextFormat::ParseFromString(
"dtype: DT_BOOL\n"
"tensor_shape {\n"
" dim {\n"
" size: 2\n"
" }\n"
"}\n"
"bool_val: true\n"
"bool_val: false\n",
&expected_tensor_proto);
EXPECT_EQ(proto.DebugString(), expected_tensor_proto.DebugString());
}
TEST(TensorProtoUtil, CompressTensorProtoInPlaceTooSmall) {
const int kLength = 63;
TensorProto tensor_proto =
tensor::CreateTensorProto(std::vector<float>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
tensor_proto =
tensor::CreateTensorProto(std::vector<int>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
tensor_proto =
tensor::CreateTensorProto(std::vector<uint8>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
tensor_proto =
tensor::CreateTensorProto(std::vector<bool>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
tensor_proto =
tensor::CreateTensorProto(std::vector<Eigen::half>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
tensor_proto = tensor::CreateTensorProto(
std::vector<std::complex<float>>(kLength), {kLength});
EXPECT_FALSE(tensor::CompressTensorProtoInPlace(&tensor_proto));
}
TEST(TensorProtoUtil, CompressTensorProtoInPlaceAllEqual) {
const int kLength = 64;
TensorProto tensor_proto =
tensor::CreateTensorProto(std::vector<float>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(tensor::internal::TensorProtoHelper<float>::NumValues(tensor_proto),
0);
tensor_proto =
tensor::CreateTensorProto(std::vector<int>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(tensor::internal::TensorProtoHelper<int>::NumValues(tensor_proto),
0);
tensor_proto =
tensor::CreateTensorProto(std::vector<uint8>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(tensor::internal::TensorProtoHelper<uint8>::NumValues(tensor_proto),
0);
tensor_proto =
tensor::CreateTensorProto(std::vector<bool>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(tensor::internal::TensorProtoHelper<bool>::NumValues(tensor_proto),
0);
tensor_proto =
tensor::CreateTensorProto(std::vector<Eigen::half>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(
tensor::internal::TensorProtoHelper<Eigen::half>::NumValues(tensor_proto),
0);
tensor_proto = tensor::CreateTensorProto(
std::vector<std::complex<float>>(kLength), {kLength});
EXPECT_TRUE(tensor::CompressTensorProtoInPlace(&tensor_proto));
EXPECT_EQ(tensor::internal::TensorProtoHelper<std::complex<float>>::NumValues(
tensor_proto),
0);
}
template <typename T>
void VectorWithConstantTail(int size, int tail_length, std::vector<T>* v) {
CHECK_LE(tail_length, size);
v->clear();
for (int i = 0; i < size; ++i) {
T vi = (i >= size - tail_length) ? T() : T(i);
v->push_back(vi);
}
}
template <>
void VectorWithConstantTail(int size, int tail_length,
std::vector<std::complex<float>>* v) {
CHECK_LE(tail_length, size);
v->clear();
for (int i = 0; i < size; ++i) {
std::complex<float> vi(
0.0f, (i >= (size - tail_length)) ? 0.f : static_cast<float>(i));
v->push_back(vi);
}
}
template <typename T>
TensorProto CreateAsProtoTensorContent(int size, int tail_length) {
std::vector<T> values;
VectorWithConstantTail<T>(size, tail_length, &values);
Tensor tensor(DataTypeToEnum<T>::value, TensorShape({size}));
std::copy(values.begin(), values.end(), tensor.flat<T>().data());
TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return tensor_proto;
}
template <typename T>
TensorProto CreateAsProtoField(int size, int tail_length) {
std::vector<T> values;
VectorWithConstantTail<T>(size, tail_length, &values);
Tensor tensor(DataTypeToEnum<T>::value, TensorShape({size}));
std::copy(values.begin(), values.end(), tensor.flat<T>().data());
TensorProto tensor_proto;
tensor.AsProtoField(&tensor_proto);
return tensor_proto;
}
template <typename T>
void CompareTensorValues(const TensorProto& x, const TensorProto& y) {
Tensor x_t;
EXPECT_TRUE(x_t.FromProto(x));
Tensor y_t;
EXPECT_TRUE(y_t.FromProto(y));
test::ExpectTensorEqual<T>(x_t, y_t);
}
template <typename T>
void ConstantTailTest(int64_t length, int64_t tail_length, bool as_field) {
using TensorProtoHelper = tensor::internal::TensorProtoHelper<T>;
using FieldType = typename TensorProtoHelper::FieldType;
const float kMinCompressionRatio = 2.0;
const int64_t kMinSize = 64;
TensorProto tensor_proto =
as_field ? CreateAsProtoField<T>(length, tail_length)
: CreateAsProtoTensorContent<T>(length, tail_length);
TensorProto original_tensor_proto = tensor_proto;
int64_t original_size =
length * (as_field ? (is_complex<T>::value ? 2 : 1) * sizeof(FieldType)
: sizeof(T));
int64_t size_as_tensor_content = length * sizeof(T);
int64_t size_as_field = std::min(length, (length - tail_length + 1)) *
(is_complex<T>::value ? 2 : 1) * sizeof(FieldType);
bool will_compress =
std::min(size_as_tensor_content, size_as_field) <=
static_cast<int64_t>(original_size / kMinCompressionRatio);
EXPECT_EQ(tensor::CompressTensorProtoInPlace(kMinSize, kMinCompressionRatio,
&tensor_proto),
will_compress);
if (will_compress) {
if (size_as_tensor_content < size_as_field) {
EXPECT_EQ(TensorProtoHelper::NumValues(tensor_proto), 0);
EXPECT_FALSE(tensor_proto.tensor_content().empty());
} else {
EXPECT_LE(TensorProtoHelper::NumValues(tensor_proto),
(length - tail_length + 1));
EXPECT_TRUE(tensor_proto.tensor_content().empty());
}
}
CompareTensorValues<T>(tensor_proto, original_tensor_proto);
}
TEST(TensorProtoUtil, CompressTensorProtoConstantTail) {
const int kLength = 64;
for (bool as_field : {true, false}) {
for (int tail_length : {0, 1, 2, 32, 33, 63, 64}) {
ConstantTailTest<float>(kLength, tail_length, as_field);
ConstantTailTest<double>(kLength, tail_length, as_field);
ConstantTailTest<complex64>(kLength, tail_length, as_field);
ConstantTailTest<complex128>(kLength, tail_length, as_field);
ConstantTailTest<int32>(kLength, tail_length, as_field);
ConstantTailTest<uint32>(kLength, tail_length, as_field);
ConstantTailTest<int64_t>(kLength, tail_length, as_field);
ConstantTailTest<uint64>(kLength, tail_length, as_field);
ConstantTailTest<int8>(kLength, tail_length, as_field);
ConstantTailTest<uint8>(kLength, tail_length, as_field);
ConstantTailTest<int16>(kLength, tail_length, as_field);
ConstantTailTest<uint16>(kLength, tail_length, as_field);
ConstantTailTest<Eigen::half>(kLength, tail_length, as_field);
ConstantTailTest<bfloat16>(kLength, tail_length, as_field);
}
}
}
TEST(TensorProtoUtil, CompressTensorProtoNegatizeZero) {
TensorProto tensor_proto;
{
Tensor tensor(-0.0);
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.double_val(0), -0.0);
ASSERT_TRUE(std::signbit(tensor_proto.double_val(0)));
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_EQ(tensor_proto.double_val(0), -0.0);
ASSERT_TRUE(std::signbit(tensor_proto.double_val(0)));
}
{
Tensor tensor(-0.0f);
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.float_val(0), -0.0);
ASSERT_TRUE(std::signbit(tensor_proto.float_val(0)));
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_EQ(tensor_proto.float_val(0), -0.0);
ASSERT_TRUE(std::signbit(tensor_proto.float_val(0)));
}
{
Tensor tensor(Eigen::half(-0.0f));
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.half_val(0), 0x8000);
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_TRUE(tensor.FromProto(tensor_proto));
ASSERT_EQ(tensor.scalar<Eigen::half>()(), static_cast<Eigen::half>(0.0f));
ASSERT_TRUE(
std::signbit(static_cast<float>(tensor.scalar<Eigen::half>()())));
}
{
Tensor tensor(std::complex<double>(-0.0, -0.0));
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.dcomplex_val(0), -0.0);
ASSERT_EQ(tensor_proto.dcomplex_val(1), -0.0);
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_TRUE(tensor.FromProto(tensor_proto));
auto value = tensor.scalar<std::complex<double>>()();
ASSERT_EQ(value.real(), -0.0f);
ASSERT_TRUE(std::signbit(value.real()));
ASSERT_EQ(value.imag(), -0.0f);
ASSERT_TRUE(std::signbit(value.imag()));
}
{
Tensor tensor(std::complex<double>(0.0, -0.0));
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.dcomplex_val(0), 0.0);
ASSERT_EQ(tensor_proto.dcomplex_val(1), -0.0);
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_TRUE(tensor.FromProto(tensor_proto));
auto value = tensor.scalar<std::complex<double>>()();
ASSERT_EQ(value.real(), 0.0f);
ASSERT_FALSE(std::signbit(value.real()));
ASSERT_EQ(value.imag(), -0.0f);
ASSERT_TRUE(std::signbit(value.imag()));
}
{
Tensor tensor(std::complex<double>(-0.0, 0.0));
tensor.AsProtoField(&tensor_proto);
ASSERT_EQ(tensor_proto.dcomplex_val(0), -0.0);
ASSERT_EQ(tensor_proto.dcomplex_val(1), 0.0);
tensor::CompressTensorProtoInPlace(1, 1.0, &tensor_proto);
ASSERT_TRUE(tensor.FromProto(tensor_proto));
auto value = tensor.scalar<std::complex<double>>()();
ASSERT_EQ(value.real(), -0.0f);
ASSERT_TRUE(std::signbit(value.real()));
ASSERT_EQ(value.imag(), 0.0f);
ASSERT_FALSE(std::signbit(value.imag()));
}
}
}
} |
1,310 | cpp | tensorflow/tensorflow | tfrt_graph_execution_state | tensorflow/core/tfrt/utils/tfrt_graph_execution_state.cc | tensorflow/core/tfrt/utils/tfrt_graph_execution_state_test.cc | #ifndef TENSORFLOW_CORE_TFRT_UTILS_TFRT_GRAPH_EXECUTION_STATE_H_
#define TENSORFLOW_CORE_TFRT_UTILS_TFRT_GRAPH_EXECUTION_STATE_H_
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/core/common_runtime/graph_execution_state.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
namespace tensorflow {
namespace tfrt_stub {
class TfrtGraphExecutionState {
public:
struct OptimizationResult {
std::unique_ptr<tensorflow::Graph> graph;
absl::Duration functionalization_duration;
absl::Duration grappler_duration;
};
struct Options {
bool run_placer_grappler_on_functions = false;
bool run_placer_on_graph = true;
};
static absl::StatusOr<std::unique_ptr<TfrtGraphExecutionState>> Create(
const Options& options, tensorflow::GraphDef graph_def,
const FallbackState& fallback_state);
TfrtGraphExecutionState(
const Options& options,
std::unique_ptr<tensorflow::GraphExecutionState> graph_execution_state,
const FallbackState& fallback_state,
absl::flat_hash_set<std::string> functions_to_optimize)
: options_(options),
graph_execution_state_(std::move(graph_execution_state)),
fallback_state_(fallback_state),
functions_to_optimize_(std::move(functions_to_optimize)) {}
absl::StatusOr<OptimizationResult> CreateOptimizedGraph(
tensorflow::GraphImportConfig& graph_import_config);
Status Extend(const GraphDef& graph);
const tensorflow::Graph& graph() const {
absl::MutexLock lock(&graph_execution_state_mu_);
DCHECK(graph_execution_state_->full_graph());
return *graph_execution_state_->full_graph();
}
const GraphDef* original_graph_def() const {
absl::MutexLock lock(&graph_execution_state_mu_);
return graph_execution_state_->original_graph_def();
}
const FunctionLibraryDefinition& flib_def() const {
absl::MutexLock lock(&graph_execution_state_mu_);
return graph_execution_state_->flib_def();
}
private:
absl::StatusOr<std::unique_ptr<tensorflow::Graph>> OptimizeGraph(
const tensorflow::Graph& graph,
const tensorflow::BuildGraphOptions& build_graph_options);
Options options_;
std::unique_ptr<tensorflow::GraphExecutionState> graph_execution_state_
ABSL_GUARDED_BY(graph_execution_state_mu_);
mutable absl::Mutex graph_execution_state_mu_;
const FallbackState& fallback_state_;
absl::flat_hash_set<std::string> functions_to_optimize_
ABSL_GUARDED_BY(graph_execution_state_mu_);
};
Status PruneGraphDef(GraphDef& graph_def,
const CallableOptions& callable_options);
Status EliminateRefVariablesFromV1ControlFlow(GraphDef& graph_def);
void RemoveInputShapesInFunctions(tensorflow::GraphDef& graph_def);
}
}
#endif
#include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/upgrade_graph.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::flat_hash_set<std::string> FindFunctionsToOptimize(
const GraphDef& graph_def) {
static const auto* const kOpWhitelist = new absl::flat_hash_set<std::string>{
"PartitionedCall", "StatefulPartitionedCall", "BatchFunction"};
absl::flat_hash_map<
std::string ,
absl::flat_hash_set<std::string> >
function_to_ops;
auto build_map = [&](const auto& node_defs) {
for (const auto& node_def : node_defs) {
for (const auto& p : node_def.attr()) {
const AttrValue& attr_value = p.second;
if (!attr_value.has_func()) continue;
function_to_ops[attr_value.func().name()].insert(node_def.op());
}
}
};
build_map(graph_def.node());
for (const auto& function_def : graph_def.library().function()) {
build_map(function_def.node_def());
}
absl::flat_hash_set<std::string> functions_to_optimize;
for (const auto& p : function_to_ops) {
const std::string& function_name = p.first;
const absl::flat_hash_set<std::string>& ops = p.second;
if (std::all_of(ops.begin(), ops.end(), [](const auto& op) {
return kOpWhitelist->contains(op);
})) {
functions_to_optimize.insert(function_name);
}
}
return functions_to_optimize;
}
absl::StatusOr<absl::flat_hash_set<std::string>> PreprocessGraph(
tensorflow::GraphDef& graph_def, bool run_placer_grappler_on_functions) {
if (VLOG_IS_ON(1)) {
DumpGraphDefToFile("before_generate_resource_shared_name_graph_def",
graph_def);
}
TF_RETURN_IF_ERROR(tensorflow::GenerateResourceSharedNameIfEmpty(
graph_def, tensorflow::OpRegistry::Global()));
if (VLOG_IS_ON(2)) {
DumpGraphDefToFile("after_generate_resource_shared_name_graph_def",
graph_def);
}
if (run_placer_grappler_on_functions) {
return FindFunctionsToOptimize(graph_def);
}
return absl::flat_hash_set<std::string>();
}
}
absl::StatusOr<std::unique_ptr<TfrtGraphExecutionState>>
TfrtGraphExecutionState::Create(const TfrtGraphExecutionState::Options& options,
tensorflow::GraphDef graph_def,
const FallbackState& fallback_state) {
TF_ASSIGN_OR_RETURN(
auto functions_to_optimize,
PreprocessGraph(graph_def, options.run_placer_grappler_on_functions));
TF_ASSIGN_OR_RETURN(auto graph_execution_state,
fallback_state.CreateGraphExecutionState(
std::move(graph_def), options.run_placer_on_graph));
return std::make_unique<TfrtGraphExecutionState>(
options, std::move(graph_execution_state), fallback_state,
std::move(functions_to_optimize));
}
namespace {
CallableOptions PopulateCallableOptions(
CallableOptions& callable_options,
absl::Span<const std::string> feed_tensor_names,
absl::Span<const std::string> fetch_tensor_names,
absl::Span<const std::string> target_tensor_names) {
callable_options.mutable_feed()->Reserve(feed_tensor_names.size());
for (const auto& feed : feed_tensor_names) {
callable_options.add_feed(feed);
}
callable_options.mutable_fetch()->Reserve(fetch_tensor_names.size());
for (const auto& fetch : fetch_tensor_names) {
callable_options.add_fetch(fetch);
}
callable_options.mutable_target()->Reserve(target_tensor_names.size());
for (const auto& target : target_tensor_names) {
callable_options.add_target(target);
}
return callable_options;
}
tensorflow::GraphDef CreateGraphDefFromGraphAndFlibDef(
const tensorflow::Graph& graph,
const tensorflow::FunctionLibraryDefinition& flib_def) {
tensorflow::GraphDef graph_def;
graph.ToGraphDef(&graph_def);
*graph_def.mutable_library() = flib_def.ToProto();
return graph_def;
}
absl::StatusOr<std::unique_ptr<tensorflow::Graph>> CreatePrunedGraph(
tensorflow::GraphDef graph_def, const CallableOptions& callable_options) {
VLOG(1) << "Creating pruned graph: " << callable_options.DebugString();
TF_RETURN_IF_ERROR(PruneGraphDef(graph_def, callable_options));
if (VLOG_IS_ON(2)) {
DumpGraphDefToFile("before_eliminate_ref_variables_graph_def", graph_def);
}
TF_RETURN_IF_ERROR(EliminateRefVariablesFromV1ControlFlow(graph_def));
RemoveInputShapesInFunctions(graph_def);
auto pruned_graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
tensorflow::GraphConstructorOptions options;
options.allow_internal_ops = true;
options.add_default_attributes = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(options, std::move(graph_def),
pruned_graph.get()));
return pruned_graph;
}
NodeDef CreateNewIdentityNode(const NodeDef& node,
const std::string& input_name,
const std::string& identity_name) {
NodeDef identity;
identity.set_name(identity_name);
identity.set_op("Identity");
identity.add_input(input_name);
identity.set_device(node.device());
for (const auto& name_and_attr : node.attr()) {
if (name_and_attr.first == "T") {
identity.mutable_attr()->insert(name_and_attr);
break;
}
}
return identity;
}
}
absl::StatusOr<TfrtGraphExecutionState::OptimizationResult>
TfrtGraphExecutionState::CreateOptimizedGraph(
tensorflow::GraphImportConfig& graph_import_config) {
OptimizationResult result;
tensorflow::BuildGraphOptions build_graph_options;
std::vector<std::string> inputs;
inputs.reserve(graph_import_config.inputs.size());
for (const auto& input : graph_import_config.inputs) {
inputs.push_back(input.first);
}
PopulateCallableOptions(build_graph_options.callable_options, inputs,
graph_import_config.outputs,
graph_import_config.control_outputs);
auto graph_def = CreateGraphDefFromGraphAndFlibDef(graph(), flib_def());
if (VLOG_IS_ON(1)) {
DumpGraphDefToFile("before_pruning", graph_def);
}
TF_ASSIGN_OR_RETURN(
result.graph,
CreatePrunedGraph(graph_def, build_graph_options.callable_options));
DCHECK(result.graph);
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_pruning", *result.graph);
}
const auto functionalization_start_time = absl::Now();
TF_RETURN_IF_ERROR(tensorflow::UpgradeLegacyGraph(
result.graph.get(),
const_cast<tensorflow::FunctionLibraryDefinition*>(
&result.graph->flib_def()),
false));
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_functionalization", *result.graph);
}
auto grappler_start_time = absl::Now();
result.functionalization_duration =
grappler_start_time - functionalization_start_time;
auto status_or_optimized_graph =
OptimizeGraph(*result.graph, build_graph_options);
if (status_or_optimized_graph.ok()) {
result.graph = std::move(status_or_optimized_graph.value());
} else {
LOG(WARNING) << "TFRT failed to optimize graph: "
<< status_or_optimized_graph.status();
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_grappler", *result.graph);
}
result.grappler_duration = absl::Now() - grappler_start_time;
return result;
}
Status TfrtGraphExecutionState::Extend(const GraphDef& graph) {
std::unique_ptr<GraphExecutionState> new_state;
absl::MutexLock lock(&graph_execution_state_mu_);
TF_RETURN_IF_ERROR(graph_execution_state_->Extend(graph, &new_state));
graph_execution_state_.swap(new_state);
auto* graph_def = graph_execution_state_->original_graph_def();
DCHECK_NE(graph_def, nullptr);
TF_ASSIGN_OR_RETURN(
functions_to_optimize_,
PreprocessGraph(*graph_def, options_.run_placer_grappler_on_functions));
return absl::OkStatus();
}
namespace {
absl::StatusOr<const NodeDef*> FindLoopCondFromExitNode(
const NodeDef& exit_node,
const absl::flat_hash_map<std::string, NodeDef*>& name_to_node) {
const NodeDef* switch_node = nullptr;
for (const std::string& tensor_name : exit_node.input()) {
const std::string node_name = grappler::NodeName(tensor_name);
if (!name_to_node.contains(node_name)) {
return errors::InvalidArgument("Graph does not contain input ", node_name,
" of exit node ", exit_node.name());
}
const NodeDef* node = name_to_node.at(node_name);
if (node->op() == "Switch") {
switch_node = node;
break;
}
}
if (switch_node == nullptr) {
return errors::InvalidArgument("Exit node ", exit_node.name(),
" does not have a Switch node as its ",
"predecessor.");
}
for (const std::string& tensor_name : switch_node->input()) {
const std::string node_name = grappler::NodeName(tensor_name);
if (!name_to_node.contains(node_name)) {
return errors::InvalidArgument("Graph does not contain input ", node_name,
" of switch node ", switch_node->name());
}
const NodeDef* node = name_to_node.at(node_name);
if (node->op() == "LoopCond") {
return node;
}
}
return errors::InvalidArgument("Switch node ", switch_node->name(),
" does not have a LoopCond node as its ",
"predecessor.");
}
}
Status PruneGraphDef(GraphDef& graph_def,
const CallableOptions& callable_options) {
absl::flat_hash_map<std::string, NodeDef*> name_to_node;
absl::flat_hash_set<const NodeDef*> exit_nodes;
for (auto& node : *graph_def.mutable_node()) {
name_to_node[node.name()] = &node;
if (node.op() == "Exit") {
exit_nodes.insert(&node);
}
if (node.op() == "_Send" || node.op() == "_Recv") {
return errors::InvalidArgument(
"TFRT prune graphdef cannot handle graphs contains _Send and _Recv "
"ops.");
}
}
absl::flat_hash_map<const NodeDef*, absl::flat_hash_set<const NodeDef*>>
loop_cond_to_exit_nodes;
for (const NodeDef* exit_node : exit_nodes) {
TF_ASSIGN_OR_RETURN(const NodeDef* loop_cond_node,
FindLoopCondFromExitNode(*exit_node, name_to_node));
loop_cond_to_exit_nodes[loop_cond_node].insert(exit_node);
}
std::vector<const NodeDef*> queue;
absl::flat_hash_set<std::string> fetch_node_names;
for (const std::string& tensor_name : callable_options.fetch()) {
const NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain fetch node ",
tensor_name, ".");
}
queue.push_back(node);
fetch_node_names.insert(node->name());
}
for (const std::string& tensor_name : callable_options.target()) {
const NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain target node ",
tensor_name, ".");
}
queue.push_back(node);
fetch_node_names.insert(node->name());
}
absl::flat_hash_set<NodeDef*> feed_node_defs;
for (const std::string& tensor_name : callable_options.feed()) {
NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain feed node ",
tensor_name, ".");
}
if (node->op() == "Const") {
node->clear_input();
}
queue.push_back(node);
feed_node_defs.insert(node);
}
absl::flat_hash_set<const NodeDef*> visited;
std::vector<NodeDef> keep;
while (!queue.empty()) {
const NodeDef* node = queue.back();
queue.pop_back();
if (!visited.insert(node).second) {
continue;
}
keep.push_back(*node);
if (node->op() == "LoopCond") {
for (const NodeDef* exit_node : loop_cond_to_exit_nodes[node]) {
queue.push_back(exit_node);
}
}
for (const std::string& tensor_name : node->input()) {
const NodeDef* in = name_to_node[grappler::NodeName(tensor_name)];
if (!in) {
return errors::InvalidArgument("Graph does not contain input ",
grappler::NodeName(tensor_name),
" of node ", node->name(), ".");
}
queue.push_back(in);
}
}
graph_def.clear_node();
for (auto& node : keep) {
if (fetch_node_names.contains(node.name())) {
if (node.op() == "Exit") {
auto renamed_exit_node = node;
renamed_exit_node.set_name(
absl::StrCat(renamed_exit_node.name(), "/tfrt_renamed"));
node.set_op("Identity");
*node.mutable_input(0) = renamed_exit_node.name();
*graph_def.add_node() = std::move(renamed_exit_node);
}
}
*graph_def.add_node() = std::move(node);
}
return absl::OkStatus();
}
Status EliminateRefVariablesFromV1ControlFlow(tensorflow::GraphDef& graph_def) {
auto* op_factory = OpRegistry::Global();
absl::flat_hash_set<std::string> ref_nodes;
for (const auto& node : graph_def.node()) {
if (node.op() == "RefEnter" || node.op() == "RefSwitch") {
ref_nodes.insert(node.name());
}
}
tensorflow::GraphDef updated_graph_def;
absl::flat_hash_set<std::string> new_identities;
for (auto& node : *graph_def.mutable_node()) {
std::string* ref_input_name = nullptr;
if (node.op() == "RefEnter") {
node.set_op("Enter");
if (node.input_size() != 1) {
return errors::InvalidArgument("RefEnter node ", node.name(),
" does not have exactly 1 input.");
}
ref_input_name = node.mutable_input(0);
} else if (node.op() == "RefSwitch") {
node.set_op("Switch");
if (node.input_size() != 2) {
return errors::InvalidArgument("RefSwitch node", node.name(),
" does not have exactly 2 inputs.");
}
ref_input_name = node.mutable_input(0);
} else {
std::string ref_input;
for (const auto& tensor_name : node.input()) {
std::string input = grappler::NodeName(tensor_name);
if (ref_nodes.contains(input)) {
ref_input = std::move(input);
break;
}
}
if (!ref_input.empty()) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_factory->LookUpOpDef(node.op(), &op_def));
for (const auto& input_arg : op_def->input_arg()) {
if (input_arg.is_ref()) {
return errors::Unimplemented(
"Cannot in-place update ref node ", ref_input,
" to the non-ref counterpart since its user node ", node.name(),
" requires its input to be refs.");
}
}
}
}
if (ref_input_name != nullptr) {
std::string identity_name =
absl::StrCat(grappler::NodeName(*ref_input_name), "/identity");
if (!new_identities.contains(identity_name)) {
*updated_graph_def.add_node() =
CreateNewIdentityNode(node, *ref_input_name, identity_name);
new_identities.insert(identity_name);
}
*ref_input_name = std::move(identity_name);
}
*updated_graph_def.add_node() = std::move(node);
}
graph_def.mutable_node()->Swap(updated_graph_def.mutable_node());
return absl::OkStatus();
}
void RemoveInputShapesInFunctions(tensorflow::GraphDef& graph_def) {
for (tensorflow::FunctionDef& function_def :
*graph_def.mutable_library()->mutable_function()) {
function_def.mutable_attr()->erase("_input_shapes");
}
}
namespace {
Status OptimizeFunctions(
FunctionDefLibrary& flib_proto, const FunctionLibraryDefinition& flib,
const FallbackState& fallback_state,
const absl::flat_hash_set<std::string>& functions_to_optimize) {
for (FunctionDef& fdef : *flib_proto.mutable_function()) {
if (!functions_to_optimize.contains(fdef.signature().name())) {
continue;
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(fdef, AttrSlice(), &flib, &fbody));
tensorflow::Graph* graph = fbody->graph;
tensorflow::GraphDef graph_def;
graph->ToGraphDef(&graph_def);
*graph_def.mutable_library() = flib.ToProto();
TF_ASSIGN_OR_RETURN(
auto graph_execution_state,
fallback_state.CreateGraphExecutionState(std::move(graph_def)));
std::unique_ptr<tensorflow::Graph> optimized_graph;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> optimized_flib;
tensorflow::BuildGraphOptions build_graph_options;
std::vector<std::string> args;
args.reserve(fbody->arg_nodes.size());
for (const auto& arg : fbody->arg_nodes) args.push_back(arg->name());
std::vector<std::string> rets;
rets.reserve(fbody->ret_nodes.size());
for (const auto& ret : fbody->ret_nodes) rets.push_back(ret->name());
std::vector<std::string> control_rets;
control_rets.reserve(fbody->control_ret_nodes.size());
for (const auto& control_ret : fbody->control_ret_nodes) {
control_rets.push_back(control_ret->name());
}
PopulateCallableOptions(build_graph_options.callable_options, args, rets,
control_rets);
auto status = graph_execution_state->OptimizeGraph(
build_graph_options, *graph_execution_state->full_graph(), &flib,
&optimized_graph, &optimized_flib);
if (!status.ok()) {
LOG(ERROR) << "TFRT failed to optimize graph (converted from function: "
<< fdef.signature().name() << "): " << status;
continue;
}
TF_RETURN_IF_ERROR(
optimized_graph->AddFunctionLibrary(optimized_flib->ToProto()));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*optimized_graph,
fdef.signature().name(), &new_fdef));
fdef = std::move(new_fdef);
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<tensorflow::Graph>>
TfrtGraphExecutionState::OptimizeGraph(
const tensorflow::Graph& graph,
const tensorflow::BuildGraphOptions& build_graph_options) {
std::unique_ptr<tensorflow::Graph> optimized_graph;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> optimized_flib;
{
absl::MutexLock lock(&graph_execution_state_mu_);
TF_RETURN_IF_ERROR(graph_execution_state_->OptimizeGraph(
build_graph_options, graph, &graph.flib_def(), &optimized_graph,
&optimized_flib));
}
FunctionDefLibrary optimized_flib_proto = optimized_flib->ToProto();
if (options_.run_placer_grappler_on_functions) {
TF_RETURN_IF_ERROR(OptimizeFunctions(optimized_flib_proto, *optimized_flib,
fallback_state_,
functions_to_optimize_));
optimized_graph->mutable_flib_def()->Clear();
}
TF_RETURN_IF_ERROR(optimized_graph->AddFunctionLibrary(optimized_flib_proto));
return optimized_graph;
}
}
} | #include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::EqualsProto;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::proto::IgnoringFieldPaths;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
class PruneGraphDefTest : public grappler::GrapplerTest {};
TEST_F(PruneGraphDefTest, ConstFeedWithInput) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("b");
callable_options.add_fetch("c");
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareGraphs(expected, graphdef);
}
Status LessThanTenCond(const Scope& scope, const std::vector<Output>& inputs,
Output* output) {
*output = ops::Less(scope, inputs[0], 10);
return scope.status();
}
Status AddOneBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(scope, {inputs[0], 1}));
return scope.status();
}
TEST_F(PruneGraphDefTest, InsertIdentityForLoopExitFeed) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("input");
callable_options.add_fetch("while/Exit");
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
for (const auto& node : graphdef.node()) {
if (node.op() == "Exit") {
EXPECT_EQ(node.name(), "while/Exit/tfrt_renamed");
}
if (node.name() == "while/Exit") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "while/Exit/tfrt_renamed");
}
}
}
TEST_F(PruneGraphDefTest, EliminateRefEntersFromControlFlow) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs1;
std::vector<Output> outputs2;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs1));
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop2",
&outputs2));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Enter") {
node.set_op("RefEnter");
}
name_to_node.insert({node.name(), node});
}
}
TF_ASSERT_OK(EliminateRefVariablesFromV1ControlFlow(graphdef));
int num_identity_op = 0;
int num_enter_op = 0;
int num_ref_enter_op = 0;
for (const auto& node : graphdef.node()) {
if (node.op() == "Identity") {
num_identity_op++;
EXPECT_EQ(node.name(), "input/identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_THAT(node.attr(), ElementsAre(Pair("T", _)));
} else if (node.op() == "RefEnter") {
num_ref_enter_op++;
} else if (node.op() == "Enter") {
EXPECT_EQ(num_identity_op, 1);
num_enter_op++;
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input/identity");
EXPECT_THAT(
node, IgnoringFieldPaths({"input", "op"},
EqualsProto(name_to_node.at(node.name()))));
} else {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
}
name_to_node.erase(node.name());
}
EXPECT_EQ(num_identity_op, 1);
EXPECT_EQ(num_enter_op, 2);
EXPECT_EQ(num_ref_enter_op, 0);
EXPECT_THAT(name_to_node, IsEmpty());
}
TEST_F(PruneGraphDefTest, EliminateRefSwitchesFromControlFlow) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output cond_a = ops::Placeholder(scope.WithOpName("cond_a"), DT_BOOL);
Output cond_b = ops::Placeholder(scope.WithOpName("cond_b"), DT_BOOL);
Output input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
ops::Switch switch_a(scope.WithOpName("switch_a"), input, cond_a);
ops::Switch switch_b(scope.WithOpName("switch_b"), input, cond_b);
Output switch_a_true =
ops::Identity(scope.WithOpName("switch_a_true"), switch_a.output_true);
Output switch_b_true =
ops::Identity(scope.WithOpName("switch_b_true"), switch_b.output_true);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Switch") {
node.set_op("RefSwitch");
}
name_to_node.insert({node.name(), node});
}
}
TF_ASSERT_OK(EliminateRefVariablesFromV1ControlFlow(graphdef));
int num_identity_op = 0;
int num_switch_op = 0;
int num_ref_switch_op = 0;
for (const auto& node : graphdef.node()) {
if (node.name() == "switch_a_true" || node.name() == "switch_b_true") {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
} else if (node.op() == "Identity") {
num_identity_op++;
EXPECT_EQ(node.name(), "input/identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_THAT(node.attr(), ElementsAre(Pair("T", _)));
} else if (node.op() == "RefSwitch") {
num_ref_switch_op++;
} else if (node.op() == "Switch") {
EXPECT_EQ(num_identity_op, 1);
num_switch_op++;
ASSERT_EQ(node.input().size(), 2);
EXPECT_TRUE(node.input(0) == "input/identity" ||
node.input(1) == "input/identity");
EXPECT_THAT(
node, IgnoringFieldPaths({"input", "op"},
EqualsProto(name_to_node.at(node.name()))));
} else {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
}
name_to_node.erase(node.name());
}
EXPECT_EQ(num_identity_op, 1);
EXPECT_EQ(num_switch_op, 2);
EXPECT_EQ(num_ref_switch_op, 0);
EXPECT_THAT(name_to_node, IsEmpty());
}
TEST_F(PruneGraphDefTest, EliminateRefVariablesFromV1ControlFlowFailed) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output cond = ops::Placeholder(scope.WithOpName("cond"), DT_BOOL);
Output input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
ops::Switch switch_op(scope.WithOpName("switch"), input, cond);
Output var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign =
ops::Assign(scope.WithOpName("assign"), var, switch_op.output_true);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Switch") {
node.set_op("RefSwitch");
}
name_to_node.insert({node.name(), node});
}
}
const auto status = EliminateRefVariablesFromV1ControlFlow(graphdef);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.ToString(), HasSubstr("requires its input to be refs"));
}
TEST_F(PruneGraphDefTest, KeepLoopStructureComplete) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("input");
callable_options.add_fetch("while/LoopCond");
GraphDef original_graphdef = graphdef;
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
EXPECT_THAT(graphdef,
IgnoringRepeatedFieldOrdering(EqualsProto(original_graphdef)));
}
class OptimizeGraphTest : public grappler::GrapplerTest {};
TEST_F(OptimizeGraphTest, OptimizeFunctions) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y_retval: float"}, {},
{{{"ArithmeticOptimizer/ConvertPow__inner_pow3"},
"Square",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"},
{{"pow3"},
"Mul",
{"ArithmeticOptimizer/ConvertPow__inner_pow3:y:0", "x"},
{{"T", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"}},
{{"y_retval", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareGraphs(expected, optimized_graph_def);
CompareFunctions(expected.library().function(0),
optimized_graph_def.library().function(0));
}
TEST_F(OptimizeGraphTest, OptimizeFunctionsUsedByFunctionNodes) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
const Tensor kOne = test::AsScalar<float>(1.0);
auto base2pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Add1Pow3", {"x: float"}, {"y: float"}, {},
{{{"one"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}},
{{"add"}, "Add", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
{{"pcall"},
"PartitionedCall",
{"add:z:0"},
{{"Tin", DataTypeSlice({DT_FLOAT})},
{"Tout", DataTypeSlice({DT_FLOAT})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"Pow3", {{"T", DT_FLOAT}})}}}},
{{"y", "pcall:output:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = pow3_fdef;
*fdef_lib.add_function() = base2pow3_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 1.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
base2pow3_fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(base2pow3_fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y_retval: float"}, {},
{{{"ArithmeticOptimizer/ConvertPow__inner_pow3"},
"Square",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"},
{{"pow3"},
"Mul",
{"ArithmeticOptimizer/ConvertPow__inner_pow3:y:0", "x"},
{{"T", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"}},
{{"y_retval", "pow3:z:0"}});
const Tensor kOne = test::AsScalar<float>(1.0);
auto base2pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Add1Pow3", {"x: float"}, {"y: float"}, {},
{{{"one"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}},
{{"add"}, "Add", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
{{"pcall"},
"PartitionedCall",
{"add:z:0"},
{{"Tin", DataTypeSlice({DT_FLOAT})},
{"Tout", DataTypeSlice({DT_FLOAT})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"Pow3", {{"T", DT_FLOAT}})}}}},
{{"y", "pcall:output:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = pow3_fdef;
*fdef_lib.add_function() = base2pow3_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 1.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
base2pow3_fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(base2pow3_fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareFunctions(expected.library().function(1),
optimized_graph_def.library().function(1));
ASSERT_EQ("Pow3",
optimized_graph_def.library().function(1).signature().name());
}
TEST_F(OptimizeGraphTest, DontOptimizeUnsafeFunction) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
Output cond = ops::Const(scope.WithOpName("cond"), true, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto if_op =
ops::If(scope, cond, inputs, output_dtypes, func_attr, func_attr);
Output b = if_op.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
CompareGraphs(graphdef, optimized_graph_def);
CompareFunctions(graphdef.library().function(0),
optimized_graph_def.library().function(0));
}
TEST_F(OptimizeGraphTest, FunctionBecomeUnsafeIfAnyOpIsUnsafe) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
Output cond = ops::Const(scope.WithOpName("cond"), true, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto if_op =
ops::If(scope, cond, inputs, output_dtypes, func_attr, func_attr);
Output b = if_op.output.front();
inputs = {b};
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output c = pcall.output.front();
Output d = ops::Identity(scope.WithOpName("d"), c);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"d"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
CompareFunctions(graphdef.library().function(0),
optimized_graph_def.library().function(0));
}
class ExtendGraphTest : public grappler::GrapplerTest {};
TEST_F(ExtendGraphTest, ExtendGraph) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
SessionOptions session_options;
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(session_options, {}));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = false;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(graph_execution_state->Extend(extension));
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
ASSERT_NE(graph_execution_state->original_graph_def(), nullptr);
CompareGraphs(expected, *graph_execution_state->original_graph_def());
}
}
}
} |
1,311 | cpp | tensorflow/tensorflow | node_io_dump_rewriter | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.cc | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter_test.cc | #ifndef TENSORFLOW_CORE_TFRT_UTILS_DEBUG_NODE_IO_DUMP_REWRITER_H_
#define TENSORFLOW_CORE_TFRT_UTILS_DEBUG_NODE_IO_DUMP_REWRITER_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace tfrt_stub {
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir = "");
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir = "");
}
}
#endif
#include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::StatusOr<std::string> GetDumpDir(absl::string_view dump_dir) {
if (!dump_dir.empty()) return std::string(dump_dir);
const char* prefix = getenv("TF_DUMP_GRAPH_PREFIX");
if (prefix != nullptr) return std::string(prefix);
return errors::InvalidArgument("TF_DUMP_GRAPH_PREFIX not specified");
}
Status InsertDumpOpsForNode(Graph& graph, Node& node,
absl::string_view dump_dir) {
auto insert = [&](bool is_input, const std::vector<const Edge*> edges) {
for (const Edge* edge : edges) {
if (edge->IsControlEdge()) continue;
Node* dump_node;
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat(edge->src()->name(), "/", edge->src_output(),
"/debug_identity"),
"DebugIdentityV3")
.Attr("io_of_node", node.name())
.Attr("is_input", is_input)
.Attr("io_index",
is_input ? edge->dst_input() : edge->src_output())
.Attr("tensor_name",
absl::StrCat(edge->src()->name(), ":", edge->src_output()))
.Attr("debug_urls", {absl::StrCat("file:
.Input(edge->src(), edge->src_output())
.Finalize(&graph, &dump_node));
TF_RETURN_IF_ERROR(
graph.UpdateEdge(dump_node, 0, edge->dst(), edge->dst_input()));
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(true,
{node.in_edges().begin(), node.in_edges().end()}));
TF_RETURN_IF_ERROR(insert(
false, {node.out_edges().begin(), node.out_edges().end()}));
return absl::OkStatus();
}
}
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
TF_ASSIGN_OR_RETURN(auto dir, GetDumpDir(dump_dir));
auto insert = [&](Graph& graph) {
for (Node* node : graph.op_nodes()) {
if (nodes_to_dump.contains(node->name())) {
TF_RETURN_IF_ERROR(InsertDumpOpsForNode(graph, *node, dir));
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(graph));
for (const auto& fname : graph.flib_def().ListFunctionNames()) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*graph.flib_def().Find(fname), AttrSlice(), &graph.flib_def(), &fbody));
TF_RETURN_IF_ERROR(insert(*fbody->graph));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, fname, &new_fdef));
TF_RETURN_IF_ERROR(
graph.mutable_flib_def()->ReplaceFunction(fname, new_fdef));
}
return absl::OkStatus();
}
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph({}, meta_graph_def.graph_def(), &graph));
TF_RETURN_IF_ERROR(InsertDumpOps(graph, nodes_to_dump, dump_dir));
graph.ToGraphDef(meta_graph_def.mutable_graph_def());
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <dirent.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/tfrt/saved_model/saved_model.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
const Node* FindNode(const Graph* graph, absl::string_view node_name) {
for (Node* node : graph->nodes()) {
if (node->name() == node_name) return node;
}
return nullptr;
}
const Node* GetInputNode(const Node* node, size_t index) {
const Node* input_node;
CHECK_OK(node->input_node(index, &input_node));
return input_node;
}
const Node* GetOutputNode(const Node* node, size_t index) {
for (const Edge* edge : node->out_edges()) {
if (edge->src_output() == index) return edge->dst();
}
return nullptr;
}
absl::StatusOr<std::vector<std::string>> GetFilenames(
absl::string_view dump_dir) {
auto dump_sub_dir = absl::StrCat(dump_dir, "/", kDumpSubDirName);
DIR* dir = opendir(dump_sub_dir.data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", dump_sub_dir));
}
std::vector<std::string> step_dirs;
struct dirent* entry;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type != DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found non-directory entry under dump_sub_dir: ", entry->d_name));
}
step_dirs.push_back(absl::StrCat(dump_sub_dir, "/", entry->d_name));
}
closedir(dir);
CHECK_EQ(step_dirs.size(), 1);
dir = opendir(step_dirs[0].data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", step_dirs[0]));
}
std::vector<std::string> filenames;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type == DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found directory entry under step_dir: ", entry->d_name));
}
filenames.push_back(entry->d_name);
}
closedir(dir);
return filenames;
}
TEST(NodeIoDumpRewriterTest, OnGraph) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input_a = ops::Placeholder(scope.WithOpName("input_a"), DT_INT32);
auto input_b = ops::Placeholder(scope.WithOpName("input_b"), DT_INT32);
auto add = ops::Add(scope.WithOpName("add"), input_a, input_b);
auto output = ops::Identity(scope.WithOpName("output"), add);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnGraph");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(*graph, {"add"}, dump_dir));
auto* node = FindNode(graph.get(), "add");
EXPECT_EQ(node->num_inputs(), 2);
EXPECT_EQ(GetInputNode(node, 0)->name(), "input_a/0/debug_identity");
EXPECT_EQ(GetInputNode(node, 1)->name(), "input_b/0/debug_identity");
EXPECT_EQ(node->num_outputs(), 1);
EXPECT_EQ(GetOutputNode(node, 0)->name(), "add/0/debug_identity");
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV1) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV1");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"Add"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "another_toy", inputs, &outputs));
ASSERT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]),
::testing::ElementsAreArray({12}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "Add:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "Add:in:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "Add:in:1_"));
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV2) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v2");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV2");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"result"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "serving_default", inputs, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "result:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "result:in:1_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "result:in:0_"));
}
}
}
} |
1,312 | cpp | tensorflow/tensorflow | execute | tensorflow/core/tfrt/mlrt/interpreter/execute.cc | tensorflow/core/common_runtime/eager/execute_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_EXECUTE_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_EXECUTE_H_
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status EagerExecute(EagerOperation* op, TensorHandle** retvals,
int* num_retvals);
Status EagerKernelExecute(
EagerContext* ctx, const absl::InlinedVector<TensorHandle*, 4>& op_inputs,
const absl::optional<EagerFunctionParams>& eager_func_params,
const core::RefCountPtr<KernelAndDevice>& kernel,
GraphCollector* graph_collector, CancellationManager* cancellation_manager,
absl::Span<TensorHandle*> retvals,
const absl::optional<ManagedStackTrace>& stack_trace = {});
Status EagerCopyToDevice(TensorHandle* h, EagerContext* ctx,
EagerExecutor* executor, Device* device, bool mirror,
TensorHandle** result);
void EagerLocalExecuteAsync(EagerOperation* op, TensorHandle** retvals,
int* num_retvals, StatusCallback done);
}
#endif
#include "tensorflow/core/common_runtime/eager/execute.h"
#include <algorithm>
#include <cstddef>
#include <functional>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <string_view>
#include <unordered_map>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_replace.h"
#include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/small_constants_optimizer.h"
#include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/protobuf.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/compiler/jit/defs.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/colocation_graph.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/copy_to_device_node.h"
#include "tensorflow/core/common_runtime/eager/execute_node.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/statusor.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/distributed_runtime/eager/remote_copy_node.h"
#include "tensorflow/core/distributed_runtime/eager/remote_execute_node.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
#endif
#include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/util.h"
#ifdef INTEL_MKL
#include "tensorflow/core/graph/mkl_graph_util.h"
#endif
namespace tensorflow {
namespace {
constexpr char kEnabled[] = "enabled";
constexpr char kDisabled[] = "disabled";
auto* function_compile_counter =
monitoring::Counter<2>::New("/tensorflow/core/tf_function_compile",
"The number of times that TF function is "
"called for different compilation options.",
"device", "compilation_option");
auto* top_level_jit_compilation_counter = monitoring::Counter<1>::New(
"/tensorflow/core/tf_top_level_jit_compilation",
"The number of times a top-level JIT-compiled function is called.",
"device");
bool SendAsProtosWhenPossible() {
static bool send_as_protos_when_possible = []() {
bool result;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_SEND_AS_PROTOS_WHEN_POSSIBLE",
false, &result));
return result;
}();
return send_as_protos_when_possible;
}
const string& DeviceNameOrUnspecified(Device* device) {
static string* unspecified_string = new string("<unspecified>");
return (device == nullptr) ? *unspecified_string : device->name();
}
Status CopyInputToExpectedDevice(EagerContext* ctx, EagerOperation* op,
Device* op_device,
TensorHandle* handle,
int i, Device* handle_device,
Device* expected_input_device,
TensorHandle** result) {
VLOG(6) << "Expected input device: " << expected_input_device->name()
<< "; handle_device: " << handle_device->name();
DCHECK(expected_input_device != handle_device);
*result = nullptr;
const string& op_device_name = DeviceNameOrUnspecified(op_device);
switch (ctx->GetDevicePlacementPolicy()) {
case DEVICE_PLACEMENT_SILENT_FOR_INT32:
if (handle->dtype == DT_INT32) {
break;
}
VLOG(6) << "DevicePlacementPolicy: DEVICE_PLACEMENT_SILENT_FOR_INT32 but "
"input type is not INT32.";
TF_FALLTHROUGH_INTENDED;
case DEVICE_PLACEMENT_EXPLICIT:
if (op->Name() == "Identity" ||
op->Name() == "IdentityN"
|| op->Name() == "_EagerConst") {
break;
}
return errors::InvalidArgument(
"Tensors on conflicting devices:"
" cannot compute ",
op->Name(), " as input #", i, " was expected to be on ",
expected_input_device->name(), " but is actually on ",
handle_device->name(), " (operation running on ", op_device_name, ")",
" Tensors can be copied explicitly using:"
" `with tf.device(device_name): x = tf.identity(x)`"
" or transparently copied by using"
" tf.config.experimental.set_device_policy('silent')."
" Copying tensors between devices may slow down your model");
case DEVICE_PLACEMENT_WARN:
LOG(WARNING) << "before computing " << op->Name() << " input #" << i
<< " was expected to be on " << expected_input_device->name()
<< " but is actually on " << handle_device->name()
<< " (operation running on " << op_device_name
<< "). This triggers a copy which can be a performance "
"bottleneck.";
break;
case DEVICE_PLACEMENT_SILENT:
break;
}
TensorHandle* result_handle = nullptr;
tsl::profiler::TraceMe activity(
[&] {
return absl::StrCat("_Send input ", i, " from ", handle_device->name(),
" to ", expected_input_device->name());
},
tsl::profiler::TraceMeLevel::kInfo);
Status status =
EagerCopyToDevice(handle, ctx, &op->Executor(), expected_input_device,
true, &result_handle);
activity.Stop();
if (!status.ok()) {
return Status(
status.code(),
absl::StrCat("Failed copying input tensor from ", handle_device->name(),
" to ", expected_input_device->name(), " in order to run ",
op->Name(), ": ", status.message()));
}
*result = result_handle;
return absl::OkStatus();
}
Status ValidateInputTypeAndPlacement(
EagerContext* ctx, EagerOperation* op,
const core::RefCountPtr<KernelAndDevice>& kernel) {
tsl::profiler::TraceMe activity("ValidateInputTypeAndPlacement",
tsl::profiler::TraceMeLevel::kInfo);
const int n_inputs = op->Inputs().size();
if (kernel->num_inputs() != n_inputs) {
return errors::InvalidArgument("expected ", kernel->num_inputs(),
" inputs, got ", n_inputs);
}
const bool is_function = kernel->IsFunction();
if (n_inputs > 0) {
const DataType* input_types = &kernel->input_dtypes()[0];
const absl::InlinedVector<TensorHandle*, 4>* handles;
TF_RETURN_IF_ERROR(op->TensorHandleInputs(&handles));
for (int i = 0; i < n_inputs; ++i) {
TensorHandle* handle = (*handles)[i];
Device* expected_device = kernel->InputDevice(i);
if (!kernel->IsFunction() && handle->Type() == TensorHandle::PACKED) {
for (int j = 0; j < handle->NumPackedHandles(); ++j) {
TensorHandle* h = nullptr;
TF_RETURN_IF_ERROR(handle->ExtractPackedHandle(j, &h));
if ((h->op_device() != nullptr) &&
(h->op_device()->name() == op->DeviceName())) {
op->UpdateInput(i, h);
handle = h;
break;
}
}
}
Device* handle_device = handle->DeviceOrHostCPU(*ctx);
const bool maybe_copy =
!is_function || handle->Type() != TensorHandle::REMOTE;
VLOG(6) << "!is_function: " << !is_function;
VLOG(6) << "handle->Type(): " << handle->Type();
if (expected_device != handle_device && maybe_copy) {
TF_RETURN_IF_ERROR(CopyInputToExpectedDevice(ctx, op, kernel->device(),
handle, i, handle_device,
expected_device, &handle));
op->UpdateInput(i, handle);
handle->Unref();
}
if (handle->dtype != input_types[i]) {
return errors::InvalidArgument(
"cannot compute ", op->Name(), " as input #", i, "(zero-based)",
" was expected to be a ", DataTypeString(input_types[i]),
" tensor but is a ", DataTypeString(handle->dtype), " tensor");
}
}
}
return absl::OkStatus();
}
bool IsHostMemoryArg(const EagerOperation& op, const NodeDef* node_def,
const Device* op_device, const KernelDef* kernel_def,
const int port_id) {
if (op.is_function()) return false;
if (node_def == nullptr) return false;
if (kernel_def == nullptr || op_device == nullptr) return false;
const auto& host_memory_args = kernel_def->host_memory_arg();
const OpDef& op_def = OpRegistry::Global()->LookUp(op.Name())->op_def;
const int arg_id = OpPortIdToArgId(*node_def, op_def.input_arg(), port_id);
if (arg_id < 0) {
return false;
}
return std::find(host_memory_args.begin(), host_memory_args.end(),
op_def.input_arg(arg_id).name()) != host_memory_args.end();
}
Status GetDeviceForInput(const EagerOperation& op, const EagerContext& ctx,
const bool is_host_memory_arg,
TensorHandle* tensor_handle, Device** result) {
Device* cpu_device = ctx.HostCPU();
string device_name;
if (tensor_handle->Type() != TensorHandle::LOCAL) {
Device* device = tensor_handle->device();
device_name = device != nullptr ? device->name() : cpu_device->name();
*result = (device == nullptr ? cpu_device : device);
} else if (tensor_handle->dtype == DT_RESOURCE) {
const Tensor* tensor;
TF_RETURN_IF_ERROR(tensor_handle->Tensor(&tensor));
if (tensor->NumElements() == 0) {
return errors::InvalidArgument("Empty resource handle");
}
const ResourceHandle& handle = tensor->flat<ResourceHandle>()(0);
device_name = handle.device();
Device* input_device;
TF_RETURN_IF_ERROR(
ctx.FindDeviceFromName(device_name.c_str(), &input_device));
*result = input_device;
} else {
Device* device = tensor_handle->device();
const bool is_tpu = device != nullptr && device->device_type() == "TPU";
FullTypeDef ft = tensor_handle->FullType();
const bool use_host_memory =
is_tpu || (!op.is_function() && device != cpu_device &&
!is_host_memory_arg)
? MTypeFromDTypeIntsOnDevice(tensor_handle->dtype)
: MTypeFromDType(tensor_handle->dtype);
if (use_host_memory) {
Int32FulltypePass int32_ft("GetDeviceForInput");
TF_RETURN_IF_ERROR(int32_ft.Int32FullTypeForTensor(
tensor_handle->dtype, &ft, false));
VLOG(2)
<< "Full type information with TFT_SHAPE_TENSOR for int32 for eager '"
<< tensor_handle->DebugString();
}
TF_RETURN_IF_ERROR(
tensorflow::full_type::CheckMemoryType(use_host_memory, ft));
if (use_host_memory) {
*result = cpu_device;
} else {
if (!op.is_function() && device != cpu_device && !is_host_memory_arg) {
device = std::get<Device*>(op.Device());
}
*result = (device == nullptr ? cpu_device : device);
}
}
return absl::OkStatus();
}
Status GetFuncAttr(const EagerOperation* op, const EagerContext& ctx,
const char* attr_name, bool* value) {
Status status = op->Attrs().Get(attr_name, value);
if (status.ok()) {
VLOG(2) << "Caller explicitly specifies " << attr_name
<< (value ? "=true " : "=false, ") << op->DebugString();
return absl::OkStatus();
}
const FunctionDef* function_def = op->GetFunctionDef();
if (function_def == nullptr) {
return errors::NotFound("Failed to find function '", op->Name(), "'");
}
status = GetNodeAttr(AttrSlice(&function_def->attr()), attr_name, value);
if (status.ok()) {
VLOG(2) << "Function definition explicitly specifies " << attr_name
<< (value ? "=true" : "=false");
return absl::OkStatus();
}
return status;
}
Status HasTPUReplication(const EagerOperation& op, const EagerContext& ctx,
bool* has_tpu_replication) {
*has_tpu_replication = false;
if (!op.is_function()) {
return absl::OkStatus();
}
const FunctionDef* function_def = op.GetFunctionDef();
if (function_def == nullptr) {
return errors::NotFound("Failed to find function '", op.Name(), "'");
}
for (const NodeDef& node : function_def->node_def()) {
if (node.op() == "TPUReplicateMetadata") {
*has_tpu_replication = true;
return absl::OkStatus();
}
}
return absl::OkStatus();
}
Status MustCompileWithXLA(const EagerOperation* op, const EagerContext& ctx,
bool* compile_with_xla) {
#if defined(PLUGGABLE_DEVICE_SUPPORTED_MACOS)
*compile_with_xla = false;
#else
if (!op->is_function()) {
*compile_with_xla = false;
return absl::OkStatus();
}
if (op->eager_func_params().has_value() &&
op->eager_func_params().value().is_component_function) {
*compile_with_xla = false;
return absl::OkStatus();
}
Status status = GetFuncAttr(op, ctx, kXlaMustCompileAttr, compile_with_xla);
if (status.ok()) {
return absl::OkStatus();
}
if (op->GetDeviceParsedName().type == tensorflow::DEVICE_TPU ||
op->GetDeviceParsedName().type == "XLA_GPU" ||
op->GetDeviceParsedName().type == "XLA_CPU") {
VLOG(2) << "Compiling " << op->Name()
<< " with XLA because it is running on an XLA device "
<< op->GetDeviceParsedName().type;
*compile_with_xla = true;
} else {
*compile_with_xla = false;
}
#endif
return absl::OkStatus();
}
Status HasNestedJitCompile(const EagerOperation& op, const EagerContext& ctx,
bool* has_jit_compile, string* device) {
*has_jit_compile = false;
const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall";
const std::string kXlaMustCompile = "_XlaMustCompile";
if (!op.is_function()) {
return absl::OkStatus();
}
std::queue<std::string> function_names;
function_names.push(op.Name());
const FunctionLibraryDefinition* func_lib_def = op.FuncLibDef();
while (!function_names.empty()) {
const string& function_name = function_names.front();
const FunctionDef* function_def = func_lib_def->Find(function_name);
if (function_def == nullptr) {
return errors::NotFound("Failed to find function '", function_name, "'");
}
function_names.pop();
for (const NodeDef& node : function_def->node_def()) {
if (node.op() == kStatefulPartitionedCallOp) {
auto attr = node.attr().find(kXlaMustCompile);
if (attr != node.attr().end() && attr->second.b() == true) {
*has_jit_compile = true;
auto device_attr = node.attr().find("device");
if (device_attr != node.attr().end()) {
*device = device_attr->second.s();
}
return absl::OkStatus();
} else {
auto attr = node.attr().find("f");
if (attr != node.attr().end() &&
!attr->second.func().name().empty()) {
function_names.push(attr->second.func().name());
}
}
}
}
}
return absl::OkStatus();
}
string CanonicalizeDeviceType(std::string_view device_type) {
string canonical_device_type = "Unknown";
if (device_type == "XLA_CPU" || device_type == tensorflow::DEVICE_CPU) {
canonical_device_type = tensorflow::DEVICE_CPU;
}
if (device_type == "XLA_GPU" || device_type == tensorflow::DEVICE_GPU) {
canonical_device_type = tensorflow::DEVICE_GPU;
}
if (device_type == tensorflow::DEVICE_TPU) {
canonical_device_type = tensorflow::DEVICE_TPU;
}
return canonical_device_type;
}
Status UpdateCompileCounter(const EagerOperation* op, const EagerContext& ctx,
bool compile_with_xla, bool has_tpu_replication) {
if (has_tpu_replication) {
function_compile_counter->GetCell(tensorflow::DEVICE_TPU, kEnabled)
->IncrementBy(1);
return absl::OkStatus();
}
string device_type = CanonicalizeDeviceType(op->GetDeviceParsedName().type);
string compilation_option = kDisabled;
if (!compile_with_xla) {
bool nested_jit_compile;
string device;
TF_RETURN_IF_ERROR(
HasNestedJitCompile(*op, ctx, &nested_jit_compile, &device));
if (nested_jit_compile) {
if (!device.empty()) {
tsl::DeviceNameUtils::ParsedName device_parsed_name;
if (!DeviceNameUtils::ParseFullName(device, &device_parsed_name)) {
return errors::InvalidArgument("Malformed device specification: '",
device);
}
VLOG(1) << "Compilation Device Type: " << device_parsed_name.type;
function_compile_counter
->GetCell(CanonicalizeDeviceType(device_parsed_name.type), kEnabled)
->IncrementBy(1);
return absl::OkStatus();
} else {
compilation_option = kEnabled;
}
}
} else {
top_level_jit_compilation_counter->GetCell(device_type)->IncrementBy(1);
}
if (device_type == tensorflow::DEVICE_TPU || compile_with_xla) {
compilation_option = kEnabled;
}
VLOG(1) << "Compilation Device Type: " << device_type;
function_compile_counter->GetCell(device_type, compilation_option)
->IncrementBy(1);
return absl::OkStatus();
}
using ProtoArgListType = protobuf::RepeatedPtrField<OpDef_ArgDef>;
string EscapeOrigName(const string& orig_name) {
return absl::StrReplaceAll(orig_name, {{"_", "__"}});
}
string GetFlatName(const string orig_name, int index) {
return absl::StrCat(EscapeOrigName(orig_name), "_", index);
}
Status BuildWrappedOpName(EagerOperation* op, const OpDef& opdef,
const AbstractOpAttrs* op_attrs, string* name) {
string fname = absl::StrCat("__wrapped__", EscapeOrigName(op->Name()));
auto FillAttrToLen = [op_attrs, op](
const ProtoArgListType& args,
absl::btree_map<string, int>* attr_to_len) {
for (const auto& arg : args) {
if (!arg.type_list_attr().empty()) {
gtl::InlinedVector<DataType, 4> type_list;
TF_RETURN_IF_ERROR(
op_attrs->GetTypeList(arg.type_list_attr(), &type_list));
(*attr_to_len)[arg.type_list_attr()] = type_list.size();
} else if (!arg.number_attr().empty()) {
int64_t number_attr;
if (!op_attrs->GetInt(arg.number_attr(), &number_attr)) {
return errors::Internal("Unable to read attr ", arg.number_attr(),
" for op ", op->Name());
}
(*attr_to_len)[arg.number_attr()] = number_attr;
}
}
return absl::OkStatus();
};
absl::btree_map<string, int> attr_to_len;
TF_RETURN_IF_ERROR(FillAttrToLen(opdef.input_arg(), &attr_to_len));
TF_RETURN_IF_ERROR(FillAttrToLen(opdef.output_arg(), &attr_to_len));
for (auto& name_len : attr_to_len) {
absl::StrAppend(&fname, "_", name_len.first, "_", name_len.second);
}
absl::StrAppend(&fname, "_device_", op->DeviceName());
*name = fname;
return absl::OkStatus();
}
Status BuildWrappedOpSignature(EagerOperation* op, const OpDef& opdef,
co | #include "tensorflow/core/common_runtime/eager/execute.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(ExecuteTest, EagerOperationAsFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
ctx->SetRunEagerOpAsFunction(true);
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
"Mul",
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input1_tensor = test::AsScalar<int64_t>(3);
auto input1 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input1_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input1.get()));
Tensor input2_tensor = test::AsScalar<int64_t>(2);
auto input2 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input2_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input2.get()));
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunctionInt32BadFullType) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr,
nullptr, nullptr,
true);
const Tensor kTwo = test::AsScalar<int32_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int32"},
{"y: int32"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT32}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT32}, {"DstT", DT_INT32}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT32}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int32_t>(3);
ASSERT_NE(ctx->HostCPUName().c_str(), nullptr);
Device* d = nullptr;
TF_ASSERT_OK(ctx->FindDeviceFromName(ctx->HostCPUName().c_str(), &d));
auto input = core::RefCountPtr<TensorHandle>(
TensorHandle::CreateLocalHandle(std::move(input_tensor), d,
nullptr, ctx));
TF_ASSERT_OK(op->AddInput(input.get()));
FullTypeDef ft;
ft.set_type_id(TFT_TENSOR);
input.get()->SetFullType(ft);
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
Status status = EagerExecute(op.get(), retvals.data(), &num_retvals);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
EXPECT_TRUE(
absl::StrContains(status.message(), "TFT_TENSOR has 0 args instead of 1"))
<< "Actual: " << status.message();
ASSERT_EQ(retvals[0], nullptr);
ctx->Unref();
}
TEST(ExecuteTest, CompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
TF_ASSERT_OK(op->SetAttrBool("_XlaMustCompile", true));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(top_level_counter.Delta("CPU"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, NestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
const FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, MultipleNestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"_device", "/job:localhost/replica:0/task:0/device:CPU:0"},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
for (auto& node_def : *function_call.mutable_node_def()) {
if (node_def.op() == "StatefulPartitionedCall") {
node_def.set_device("/job:localhost/replica:0/task:0/device:CPU:0");
}
}
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
const string call_function_name2 = "FunctionCall2";
const FunctionDef function_call2 = FunctionDefHelper::Define(
call_function_name2,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"FunctionCall", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call2));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name2.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
}
} |
1,313 | cpp | tensorflow/tensorflow | context | tensorflow/core/tfrt/mlrt/interpreter/context.cc | tensorflow/core/tfrt/mlrt/interpreter/context_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CONTEXT_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_CONTEXT_H_
namespace tsl {
class Context {
public:
Context() {}
Context(const ContextKind kind) {}
bool operator==(const Context& other) const { return true; }
};
class WithContext {
public:
explicit WithContext(const Context& x) {}
~WithContext() {}
};
}
#endif
#include "tensorflow/core/common_runtime/eager/context.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/stats_publisher_interface.h"
#include "tensorflow/core/common_runtime/eager/rendezvous_cache.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/colocation_graph.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/eager/small_constants_optimizer.h"
#include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/statusor.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#endif
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
EagerContext* global_c_eager_context = nullptr;
}
void SetCEagerContext(EagerContext* ctx) { global_c_eager_context = ctx; }
EagerContext* GetCEagerContext() { return global_c_eager_context; }
namespace {
bool ReadBoolFromEnvVar(StringPiece env_var_name, bool default_val) {
bool val;
if (tensorflow::ReadBoolFromEnvVar(env_var_name, default_val, &val).ok()) {
return val;
}
return default_val;
}
auto* eager_context_created =
monitoring::Gauge<bool, 0>::New("/tensorflow/core/eager_context_created",
"True if an eager context was created.");
}
const int64_t EagerContext::kGlobalRendezvousId = -1;
bool SkipRemoteHandleWaitReady() {
static bool skip_remote_handle_wait_ready = []() {
bool result;
TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_REMOTE_HANDLE_SKIP_WAIT_FOR_READY",
false, &result));
return result;
}();
return skip_remote_handle_wait_ready;
}
tsl::core::RefCountPtr<IntraProcessRendezvous>
EagerContext::LocalRendezvousCache::FindOrCreate(int64_t step_id,
DeviceMgr* device_mgr) {
return cache_->FindOrCreate(step_id, [&]() {
return tsl::core::RefCountPtr<IntraProcessRendezvous>(
new IntraProcessRendezvous(device_mgr));
});
}
EagerContext::EagerContext(
const SessionOptions& opts,
ContextDevicePlacementPolicy default_device_placement_policy, bool async,
DeviceMgr* device_mgr, bool device_mgr_owned,
tsl::core::RefCountPtr<Rendezvous> rendezvous,
DistributedFunctionLibraryRuntime* cluster_flr,
CollectiveExecutorMgrInterface* collective_executor_mgr,
bool run_eager_op_as_function, bool jit_compile_rewrite)
: ImmediateExecutionContext(kEager),
opts_(opts),
default_device_placement_policy_(default_device_placement_policy),
local_device_manager_(device_mgr, device_mgr_owned),
host_cpu_device_(device_mgr->HostCPU()),
rendezvous_(std::move(rendezvous)),
thread_pool_(NewThreadPoolFromSessionOptions(opts)),
cluster_flr_(cluster_flr),
log_device_placement_(opts.config.log_device_placement()),
allow_soft_placement_(opts.config.allow_soft_placement()),
num_active_steps_(0),
step_container_(std::make_unique<ScopedStepContainer>(
0, [this](const string& name) { ClearResourceContainer(name); })),
default_executor_(async,
!opts.config.experimental()
.disable_eager_executor_streaming_enqueue()),
log_memory_(LogMemory::IsEnabled()),
env_(opts.env),
collective_executor_mgr_(collective_executor_mgr, false),
use_send_tensor_rpc_(false),
pin_small_ops_to_cpu_(ReadBoolFromEnvVar(
"TF_EAGER_ENABLE_SMALL_TENSOR_CPU_PINNING", false)),
run_eager_op_as_function_(run_eager_op_as_function),
jit_compile_rewrite_(jit_compile_rewrite),
register_abstract_functions_local_only_(ReadBoolFromEnvVar(
"TF_EAGER_REGISTER_ABSTRACT_FUNCTIONS_LOCAL_ONLY", false)) {
ResetPFLR(device_mgr, opts.env, &opts.config, TF_GRAPH_DEF_VERSION,
&func_lib_def_, opts.config.graph_options().optimizer_options(),
thread_pool_.get(), cluster_flr);
eager_context_created->GetCell()->Set(true);
InitPrioritizedDeviceTypeList();
runner_ = [this](std::function<void()> closure) {
this->thread_pool_->Schedule(std::move(closure));
};
run_metadata_ = std::make_unique<RunMetadata>();
#if !defined(IS_MOBILE_PLATFORM)
context_id_ = kInvalidContextId;
context_view_id_ = 0;
#endif
if (collective_executor_mgr_.Get() == nullptr) {
collective_executor_mgr_.Reset(CreateProdLocalCollectiveExecutorMgr(
opts.config, local_device_mgr(),
MaybeCreateNcclCommunicator(opts.config)));
}
ResetGlobalRendezvousForFunction();
}
AbstractTensorInterface* EagerContext::CreateInt64Scalar(int64_t value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateUint64Scalar(uint64 value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateInt32Scalar(int32_t value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateFloatScalar(float value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateDoubleScalar(double value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateHalfScalar(Eigen::half value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateStringScalar(tstring value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateComplex128Scalar(
complex128 value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateBoolScalar(bool value) {
return new TensorInterface(Tensor(value));
}
AbstractTensorInterface* EagerContext::CreateTensor(
DataType dtype, absl::Span<const int64_t> dim_sizes) {
return new TensorInterface(Tensor(dtype, TensorShape(dim_sizes)));
}
AbstractTensorInterface* EagerContext::CreateTensor(
DataType dtype, const int64_t* dims, int num_dims, void* data, size_t len,
MemoryReleaser memory_releaser, void* memory_releaser_arg) {
TF_Tensor* tensor_wrapper =
TF_NewTensor(static_cast<TF_DataType>(dtype), dims, num_dims, data, len,
memory_releaser, memory_releaser_arg);
AbstractTensorInterface* result = nullptr;
std::swap(result, tensor_wrapper->tensor);
TF_DeleteTensor(tensor_wrapper);
return result;
}
void EagerContext::ResetPFLR(const DeviceMgr* device_mgr, Env* env,
const ConfigProto* config, int graph_def_version,
const FunctionLibraryDefinition* lib_def,
const OptimizerOptions& optimizer_options,
thread::ThreadPool* thread_pool,
DistributedFunctionLibraryRuntime* cluster_flr) {
Rendezvous::Factory rendezvous_factory = CreateRendezvousFactory();
const tensorflow::SessionMetadata* session_metadata = nullptr;
if (opts_.config.experimental().has_session_metadata()) {
session_metadata = &opts_.config.experimental().session_metadata();
}
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr, env, config, graph_def_version, lib_def, optimizer_options,
thread_pool, cluster_flr, session_metadata, std::move(rendezvous_factory),
StatsPublisherInterface::GetStatsPublisherFactory());
}
void EagerContext::InitPrioritizedDeviceTypeList() {
DeviceSet ds;
for (Device* d : local_device_mgr()->ListDevices()) {
ds.AddDevice(d);
}
auto remote_device_manager = remote_device_mgr();
if (remote_device_manager != nullptr) {
for (Device* d : remote_device_manager->ListDevices()) {
ds.AddDevice(d);
}
}
mutex_lock l(device_type_list_mu_);
prioritized_device_type_list_ =
std::make_shared<std::vector<DeviceType>>(ds.PrioritizedDeviceTypeList());
}
namespace {
std::vector<string> DevicesToString(const PrioritizedDeviceVector& devices) {
std::vector<string> v;
v.reserve(devices.size());
for (const auto& p : devices) {
v.push_back(p.first->name());
}
return v;
}
std::vector<string> DeviceTypesToString(
const PrioritizedDeviceTypeVector& types) {
std::vector<string> v;
v.reserve(types.size());
for (const auto& p : types) {
v.push_back(p.first.type_string());
}
return v;
}
Device* SelectBestMatchingDevice(const DeviceNameUtils::ParsedName& pattern,
const PrioritizedDeviceVector& existing,
const PrioritizedDeviceTypeVector& supported) {
for (const std::pair<DeviceType, int32>& prioritized_type : supported) {
for (const std::pair<Device*, int32>& prioritized_device : existing) {
Device* dev = prioritized_device.first;
if (DeviceType(dev->attributes().device_type()) ==
prioritized_type.first &&
DeviceNameUtils::IsCompleteSpecification(pattern,
dev->parsed_name())) {
return dev;
}
}
}
return nullptr;
}
}
Status EagerContext::SelectDevice(DeviceNameUtils::ParsedName preferred,
const NodeDef& ndef, Device** out) const {
DCHECK(out != nullptr);
PrioritizedDeviceTypeVector supported_devs;
auto device_type_list = prioritized_device_type_list();
TF_RETURN_IF_ERROR(SupportedDeviceTypesForNode(
*device_type_list, ndef, &supported_devs, &HostCPU()->parsed_name()));
if (supported_devs.empty()) {
return errors::NotFound("Could not find device for node: ",
errors::FormatNodeNameForError(ndef.name()), " = ",
ndef.op(), "[", SummarizeAttrs(ndef), "]",
"\nAll kernels registered for op ", ndef.op(),
":\n", KernelsRegisteredForOp(ndef.op()));
}
const auto pflr_device_set = pflr()->device_set();
const PrioritizedDeviceVector& existing =
pflr_device_set->prioritized_devices();
*out = SelectBestMatchingDevice(preferred, existing, supported_devs);
if (*out != nullptr) {
return absl::OkStatus();
}
if (AllowSoftPlacement()) {
DeviceNameUtils::ParsedName soft_device_name = preferred;
soft_device_name.type.clear();
soft_device_name.has_type = false;
soft_device_name.has_id = false;
*out = SelectBestMatchingDevice(soft_device_name, existing, supported_devs);
if (*out != nullptr) {
return absl::OkStatus();
}
}
if (DeviceNameUtils::HasSomeDetails(preferred)) {
return errors::InvalidArgument(
"Could not satisfy device specification '", preferred,
"'. enable_soft_placement=", AllowSoftPlacement(),
". Supported device types [",
absl::StrJoin(DeviceTypesToString(supported_devs), ", "),
"]. All available devices [",
absl::StrJoin(DevicesToString(existing), ", "), "].");
}
return errors::InvalidArgument(
"No supported device found in available devices [",
absl::StrJoin(DevicesToString(existing), ", "),
"]. enable_soft_placement=", AllowSoftPlacement(),
". Supported devices types [",
absl::StrJoin(DeviceTypesToString(supported_devs), ", "), "].");
}
void EagerContext::ResetClusterFLR(
DistributedFunctionLibraryRuntime* cluster_flr) {
cluster_flr_.Reset(cluster_flr, true);
}
void EagerContext::UpdateClusterFLRAndInitDevices(
DistributedFunctionLibraryRuntime* cluster_flr) {
ResetClusterFLR(cluster_flr);
const ConfigProto* config = pflr_ ? pflr_->config() : nullptr;
ResetPFLR(
local_device_manager_.Get(), env_, config,
TF_GRAPH_DEF_VERSION, &func_lib_def_,
config ? config->graph_options().optimizer_options() : OptimizerOptions(),
thread_pool_.get(), cluster_flr_.Get());
}
EagerExecutor& EagerContext::Executor() {
tf_shared_lock l(executor_map_mu_);
return *gtl::FindWithDefault(thread_local_executor_,
std::this_thread::get_id(), &default_executor_);
}
void EagerContext::SetExecutorForThread(EagerExecutor* executor) {
tensorflow::mutex_lock l(executor_map_mu_);
if (executor == &default_executor_) {
thread_local_executor_.erase(std::this_thread::get_id());
} else {
auto thread_id = std::this_thread::get_id();
thread_local_executor_[thread_id] = executor;
auto& executors_with_cleanups = has_cleanup_[thread_id];
if (executors_with_cleanups.find(executor) ==
executors_with_cleanups.end()) {
executors_with_cleanups.insert(executor);
std::function<void()> cleanup([this, thread_id, executor]() {
{
tensorflow::mutex_lock l(executor_map_mu_);
auto existing = thread_local_executor_.find(thread_id);
if (existing != thread_local_executor_.end() &&
existing->second == executor) {
thread_local_executor_.erase(thread_id);
}
has_cleanup_[thread_id].erase(executor);
if (!GetGlobalRendezvousForFunctionLocalRendezvousStatus().ok()) {
VLOG(6) << "global_rendezvous_for_functions_ is in bad state. "
"Resetting.";
ResetGlobalRendezvousForFunction();
}
}
});
executor->AddCleanup(reinterpret_cast<intptr_t>(this),
std::move(cleanup));
}
}
}
void EagerContext::ClearCachesAndThreadExecutors() {
std::unordered_map<std::thread::id, EagerExecutor*> executors_copy;
{
mutex_lock l(executor_map_mu_);
executors_copy = thread_local_executor_;
}
for (const auto& entry : executors_copy) {
entry.second->WaitForAllPendingNodes().IgnoreError();
}
ClearCachesAndDefaultExecutor();
}
void EagerContext::ClearCachesAndDefaultExecutor() {
{
mutex_lock ml(cache_mu_);
default_executor_.WaitForAllPendingNodes().IgnoreError();
kernel_cache_.clear();
for (auto& entry : registered_functions_) {
entry.second->cached_kernel_keys->clear();
}
}
{
mutex_lock dl(device_cache_mu_);
device_cache_.clear();
}
{
mutex_lock ml(metadata_mu_);
step_container_ = std::make_unique<ScopedStepContainer>(
0, [this](const string& name) { ClearResourceContainer(name); });
}
}
void EagerContext::SetThreadLocalDevicePlacementPolicy(
ContextDevicePlacementPolicy policy) {
mutex_lock ml(policy_map_mu_);
VLOG(6) << "Setting device placement policy to: " << policy;
device_placement_policy_[std::this_thread::get_id()] = policy;
}
ContextDevicePlacementPolicy EagerContext::GetDevicePlacementPolicy() const {
tf_shared_lock l(policy_map_mu_);
auto policy_map_it =
device_placement_policy_.find(std::this_thread::get_id());
if (policy_map_it != device_placement_policy_.end()) {
VLOG(6) << "ContextDevicePlacementPolicy: " << policy_map_it->second;
return policy_map_it->second;
}
VLOG(6) << "ContextDevicePlacementPolicy not found; returning default.";
return default_device_placement_policy_;
}
#if !defined(IS_MOBILE_PLATFORM)
std::vector<string> EagerContext::GetRemoteContexts() {
tf_shared_lock l(remote_state_mu_);
return remote_contexts_;
}
bool EagerContext::IsRemoteContextsEmpty() {
tf_shared_lock l(remote_state_mu_);
return remote_contexts_.empty();
}
void EagerContext::CloseAndClearAllRemoteContexts() {
uint64 context_id;
uint64 context_view_id;
std::vector<string> remote_contexts_copy;
{
mutex_lock l(remote_state_mu_);
if (!is_master_) return;
context_id = context_id_;
context_view_id = context_view_id_;
context_id_ = kInvalidContextId;
context_view_id_ = 0;
remote_contexts_copy = remote_contexts_;
remote_contexts_.clear();
}
CloseRemoteContexts(remote_contexts_copy, context_id, context_view_id);
}
void EagerContext::CloseRemoteContexts(
const std::vector<string>& remote_contexts, uint64 context_id,
uint64 context_view_id) {
eager::CloseContextRequest request;
request.set_context_id(context_id);
request.set_context_view_id(context_view_id);
std::vector<eager::CloseContextResponse> responses(remote_contexts.size());
BlockingCounter counter(static_cast<int>(remote_contexts.size()));
int i = 0;
for (const auto& worker : remote_contexts) {
core::RefCountPtr<eager::EagerClient> client;
Status s = GetClient(worker, &client);
client->CloseContextAsync(
&request, &responses[i],
[&worker, &counter, context_id](const Status& s) {
if (!s.ok()) {
LOG(ERROR) << "Unable to close remote context with ID "
<< context_id << " for worker: " << worker << " due to "
<< s.message();
}
counter.DecrementCount();
});
i++;
}
counter.Wait();
}
#endif
void EagerContext::WaitForAndCloseRemoteContexts() {
ClearCachesAndThreadExecutors();
#if !defined(IS_MOBILE_PLATFORM)
{
mutex_lock l(keep_alive_thread_shutdown_mu_);
shutting_down_ = true;
keep_alive_thread_cv_.notify_all();
}
keep_alive_thread_.reset();
if (!IsRemoteContextsEmpty()) {
CloseAndClearAllRemoteContexts();
}
{
mutex_lock l(remote_state_mu_);
default_executor_.ShutDown().IgnoreError();
std::unordered_map<std::thread::id, EagerExecutor*> executors_copy;
{
mutex_lock l(executor_map_mu_);
executors_copy = thread_local_executor_;
}
for (const auto& it : executors_copy) {
it.second->ShutDown().IgnoreError();
}
remote_eager_workers_ = nullptr;
}
#endif
}
EagerContext::~EagerContext() {
WaitForAndCloseRemoteContexts();
custom_device_op_handler_.Clear();
ClearCachesAndThreadExecutors();
std::unordered_map<std::thread::id, EagerExecutor*> executors_copy;
{
mutex_lock l(executor_map_mu_);
executors_copy = thread_local_executor_;
}
for (const auto& entry : executors_copy) {
entry.second->RemoveCleanups(reinterpret_cast<intptr_t>(this));
}
for (auto& entry : registered_functions_) {
while (!entry.second->Unref()) {
}
}
registered_functions_.clear();
#if !defined(IS_MOBILE_PLATFORM)
if (server_) {
LOG(WARNING) << "Unable to destroy server_ object, so releasing instead. "
"Servers don't support clean shutdown.";
if (server_->worker_env()->session_mgr != nullptr) {
Status s = server_->StopCoordinationService();
if (!s.ok()) {
LOG(ERROR) << "Failed to stop coordination service: " << s;
}
}
server_.release();
}
{
mutex_lock l(keep_alive_thread_shutdown_mu_);
shutting_down_ = true;
keep_alive_thread_cv_.notify_all();
}
keep_alive_thread_.reset();
if (!remote_contexts_.empty()) {
CloseAndClearAllRemoteContexts();
}
if (worker_env_ != nullptr && worker_env_->rendezvous_mgr != nullptr) {
worker_env_->rendezvous_mgr->CleanupAll();
}
#endif
if (resource_deallocator_ != nullptr) {
resource_deallocator_();
}
}
bool EagerContext::FindFunctionByName(const string& name) const {
return func_lib_def_.Find(name) != nullptr;
}
Status EagerContext::FindFunctionOpData(
const string& name, const tensorflow::OpRegistrationData** op_data) {
return func_lib_def_.LookUp(name, op_data);
}
const FunctionDef* EagerContext::FindFunctionDef(const string& name) const {
return func_lib_def_.Find(name);
}
core::RefCountPtr<FunctionRecord> EagerContext::FindRecord(
const string& name) const {
return func_lib_def_.FindRecord(name);
}
std::unique_ptr<RunMetadata> EagerContext::ExportRunMetadata() {
mutex_lock ml(metadata_mu_);
auto result = std::make_unique<RunMetadata>();
run_metadata_.swap(result);
return result;
}
ImmediateExecutionTensorHandle* EagerContext::TFTensorHandleFromInterface(
ImmediateExecutionTensorHandle* handle) {
return handle;
}
Status EagerContext::RegisterFunction(AbstractFunction* f) {
TF_ASSIGN_OR_RETURN(core::RefCountPtr<FunctionRecord> record,
f->GetFunctionRecord());
if (!record) {
return absl::InvalidArgumentError("GetFunctionRecord returned nullptr.");
}
return AddFunctionRecord(std::move(record), FunctionDefLibrary(),
register_abstract_functions_local_only_);
}
bool EagerContext::UsesTFRT() { return false; }
bool EagerContext::RunEagerOpAsFunction() const {
VLOG(3) << "RunEagerOpAsFunction: " << run_eager_op_as_function_;
return run_eager_op_as_function_;
}
void EagerContext::SetRunEagerOpAsFunction(bool enable) {
run_eager_op_as_function_ = enable;
}
bool EagerContext::JitCompileRewrite() const {
VLOG(3) << "JitCompileRewrite: " << jit_compile_rewrite_;
return jit_compile_rewrite_;
}
void EagerContext::SetJitCompileRewrite(bool enable) {
jit_compile_rewrite_ = enable;
}
void EagerContext::ListDevices(
std::vector<tensorflow::DeviceAttributes>* device_attributes) {
std::vector<Device*> devices = ListAllTfDevices();
device_attributes->reserve(devices.size());
for (const auto& dev : devices) {
device_attributes->emplace_back(dev->attributes());
}
}
std::vector<Device*> EagerContext::ListAllTfDevices() {
std::vector<Device*> devices;
std::unordered_set<string> dev_names;
if (local_device_mgr()) {
for (const auto& dev : local_device_mgr()->ListDevices()) {
devices.emplace_back(dev);
dev_names.emplace(dev->attributes().name());
}
}
if (remote_device_mgr()) {
for (const auto& dev : remote_device_mgr()->ListDevices()) {
Device* device = nullptr;
if (local_device_mgr()->LookupDevice(dev->name(), &device) !=
absl::OkStatus()) {
devices.emplace_back(dev);
}
}
}
return devices;
}
Status EagerContext::AddDevices(std::vector<std::unique_ptr<Device>> devices) {
std::vector<std::unique_ptr<Device>> local_devices, remote_devices;
while (!devices.empty()) {
if (devices.front()->IsLocal()) {
local_devices.push_back(std::move(devices.front()));
} else {
remote_devices.push_back(std::move(devices.front()));
}
devices.erase(devices.begin());
}
TF_RETURN_IF_ERROR(
reinterpret_cast<DynamicDeviceMgr*>(local_device_manager_.Get())
->AddDevices(std::move(local_devices)));
if (!remote_devices.empty()) {
if (!remote_device_mgr()) {
remote_device_manager_.Reset(
std::make_unique<tensorflow::DynamicDeviceMgr>());
}
TF_RETURN_IF_ERROR(
reinterpret_cast<DynamicDeviceMgr*>(remote_device_manager_.Get())
->AddDevices(std::move(remote_devices)));
}
pflr_->InitializeDeviceAndFlr();
InitPrioritizedDeviceTypeList();
return absl::OkStatus();
}
void EagerContext::StartStep() {
mutex_lock ml(metadata_mu_);
num_active_steps_++;
}
void EagerContext::EndStep() {
mutex_lock ml(metadata_mu_);
num_active_steps_--;
if (num_active_steps_ == 0) {
step_container_ = std::make_unique<ScopedStepContainer>(
0, [this](const string& name) { ClearResourceContainer(name); });
}
}
ScopedStepContainer* EagerContext::StepContainer() {
mutex_lock ml(metadata_mu_);
return step_container_.get();
}
Status EagerContext::MaybeRegisterFunctionRemotely(const FunctionDef& fdef) {
if (!remote_device_manager_.Owned()) return absl::OkStatus();
#if !defined(IS_MOBILE_PLATFORM)
std::shared_ptr<eager::EnqueueRequest> request(new eager::EnqueueRequest);
request->set_context_id(GetContextId());
eager::RegisterFunctionOp* register_function =
request->add_queue()->mutable_register_function();
*register_function->mutable_function_def() = fdef;
StripDefaultAttributes(
*OpRegistry::Global(),
register_function->mutable_function_def()->mutable_node_def());
auto remote_contexts = GetRemoteContexts(); | #include "tensorflow/core/common_runtime/eager/context.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context_distributed_manager.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
typedef FunctionDefHelper FDH;
static Device* CreateDevice(const string& type, int n) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name("/job:localhost/replica:0/task:0/device:" + type + ":" +
std::to_string(n));
attr.set_device_type(type);
return new FakeDevice(attr);
}
class EagerContextTest : public ::testing::Test {
public:
EagerContext* context() { return context_.get(); }
void InitContext(const SessionOptions& opts,
ContextDevicePlacementPolicy policy, bool async = false) {
ASSERT_EQ(context_, nullptr);
InitDeviceManager();
context_ = core::RefCountPtr<EagerContext>(new EagerContext(
opts, policy, async, device_manager_.get(),
false, nullptr,
nullptr, nullptr,
true));
}
protected:
void InitDeviceManager() {
ASSERT_EQ(device_manager_, nullptr);
device_manager_ = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> added_devices;
added_devices.emplace_back(CreateDevice(DEVICE_CPU, 0));
added_devices.emplace_back(CreateDevice(DEVICE_CPU, 1));
added_devices.emplace_back(CreateDevice(DEVICE_GPU, 0));
added_devices.emplace_back(CreateDevice(DEVICE_GPU, 1));
added_devices.emplace_back(CreateDevice(DEVICE_TPU, 0));
TF_CHECK_OK(device_manager_->AddDevices(std::move(added_devices)));
}
std::unique_ptr<DynamicDeviceMgr> device_manager_;
core::RefCountPtr<EagerContext> context_;
};
TEST_F(EagerContextTest, CompositeDevice) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
std::vector<string> underlying_devices = {
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:0/device:CPU:1"};
CompositeDevice* composite_device_0 = nullptr;
TF_ASSERT_OK(context()->FindOrCreateCompositeDevice(underlying_devices,
"",
&composite_device_0));
EXPECT_EQ(composite_device_0->name(),
"/job:localhost/replica:0/task:0/device:COMPOSITE:0");
CompositeDevice* device = nullptr;
TF_EXPECT_OK(context()->FindCompositeDeviceFromName(
"/job:localhost/replica:0/task:0/device:COMPOSITE:0", &device));
EXPECT_EQ(device, composite_device_0);
CompositeDevice* composite_device_1 = nullptr;
TF_ASSERT_OK(context()->FindOrCreateCompositeDevice(underlying_devices,
"",
&composite_device_1));
EXPECT_EQ(composite_device_1, composite_device_0);
underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:2");
CompositeDevice* composite_device_2 = nullptr;
TF_ASSERT_OK(context()->FindOrCreateCompositeDevice(underlying_devices,
"",
&composite_device_2));
EXPECT_EQ(composite_device_2->name(),
"/job:localhost/replica:0/task:0/device:COMPOSITE:1");
TF_EXPECT_OK(context()->FindCompositeDeviceFromName(
"/job:localhost/replica:0/task:0/device:COMPOSITE:1", &device));
EXPECT_EQ(device, composite_device_2);
EXPECT_TRUE(absl::IsNotFound(context()->FindCompositeDeviceFromName(
"/job:localhost/replica:0/task:0/device:COMPOSITE:2", &device)));
}
TEST_F(EagerContextTest, CompositeDeviceWithGivenName) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
const std::vector<string> underlying_devices_0 = {
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:0/device:CPU:1"};
const string composite_device_name =
"/job:worker1/replica:0/task:0/device:COMPOSITE:5";
CompositeDevice* composite_device_0 = nullptr;
TF_ASSERT_OK(context()->FindOrCreateCompositeDevice(
underlying_devices_0, composite_device_name, &composite_device_0));
EXPECT_EQ(composite_device_0->name(), composite_device_name);
CompositeDevice* device = nullptr;
TF_EXPECT_OK(
context()->FindCompositeDeviceFromName(composite_device_name, &device));
EXPECT_EQ(device, composite_device_0);
std::vector<string> underlying_devices_1 = {
"/job:worker/replica:0/task:0/device:CPU:1",
"/job:worker/replica:0/task:0/device:CPU:2"};
CompositeDevice* composite_device_1 = nullptr;
TF_ASSERT_OK(context()->FindOrCreateCompositeDevice(
underlying_devices_1, composite_device_0->name(), &composite_device_1));
EXPECT_EQ(composite_device_1, composite_device_0);
}
TEST_F(EagerContextTest, AddFunctionDef) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const FunctionDef x_times_two = FDH::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
TF_EXPECT_OK(context()->AddFunctionDef(x_times_two));
}
TEST_F(EagerContextTest, AddFunctionDefRepeatSame) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const FunctionDef x_times_two = FDH::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
TF_EXPECT_OK(context()->AddFunctionDef(x_times_two));
const FunctionDef x_times_two_copy = FDH::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
TF_EXPECT_OK(context()->AddFunctionDef(x_times_two_copy));
}
TEST_F(EagerContextTest, AddFunctionDefRepeatDifferent) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const FunctionDef x_times_two = FDH::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
TF_EXPECT_OK(context()->AddFunctionDef(x_times_two));
const Tensor kThree = test::AsScalar<int64_t>(3);
const FunctionDef x_times_two_copy = FDH::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kThree}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
Status s = context()->AddFunctionDef(x_times_two_copy);
EXPECT_FALSE(s.ok());
}
TEST_F(EagerContextTest, FunctionErrorRecovery) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT, true);
const FunctionDef assert_and_identity = FDH::Define(
"AssertAndIdentity",
{"x: float", "condition: bool"},
{"y: float"},
{},
{
{{"assert"},
"Assert",
{"condition", "x"},
{{"T", std::vector<DataType>{DT_FLOAT}}}},
{{"y"},
"Identity",
{"x"},
{{"T", DT_FLOAT}},
{"assert"}},
});
Status s = context()->AddFunctionDef(assert_and_identity);
auto fail_op = ImmediateOpPtr(context()->CreateOperation());
TF_ASSERT_OK(fail_op->Reset("AssertAndIdentity",
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor float_tensor = test::AsScalar<float>(3.0);
auto input_float = core::RefCountPtr<ImmediateExecutionTensorHandle>(
context()->CreateLocalHandleFromTFTensor(
float_tensor, context()->HostCPUName().c_str()));
Tensor bool_tensor_false = test::AsScalar<bool>(false);
auto input_bool_false = core::RefCountPtr<ImmediateExecutionTensorHandle>(
context()->CreateLocalHandleFromTFTensor(
bool_tensor_false, context()->HostCPUName().c_str()));
TF_ASSERT_OK(fail_op->AddInput(input_float.get()));
TF_ASSERT_OK(fail_op->AddInput(input_bool_false.get()));
std::vector<AbstractTensorHandle*> retvals(1);
int num_retvals = retvals.size();
StatusGroup op_and_sync_status;
op_and_sync_status.Update(
fail_op->Execute(absl::MakeSpan(retvals), &num_retvals));
op_and_sync_status.Update(context()->SyncExecutors());
ASSERT_THAT(op_and_sync_status.as_summary_status().message(),
HasSubstr("assertion failed"));
if (retvals[0] != nullptr) {
retvals[0]->Unref();
retvals[0] = nullptr;
}
Tensor bool_tensor_true = test::AsScalar<bool>(true);
auto input_bool_true = core::RefCountPtr<ImmediateExecutionTensorHandle>(
context()->CreateLocalHandleFromTFTensor(
bool_tensor_true, context()->HostCPUName().c_str()));
auto success_op = ImmediateOpPtr(context()->CreateOperation());
TF_ASSERT_OK(success_op->Reset(
"AssertAndIdentity", "/job:localhost/replica:0/task:0/device:CPU:0"));
TF_ASSERT_OK(success_op->AddInput(input_float.get()));
TF_ASSERT_OK(success_op->AddInput(input_bool_true.get()));
TF_ASSERT_OK(success_op->Execute(absl::MakeSpan(retvals), &num_retvals));
TF_ASSERT_OK(context()->SyncExecutors());
retvals[0]->Unref();
retvals[0] = nullptr;
}
TEST_F(EagerContextTest, XlaCompileDeviceType) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT, true);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const FunctionDef x_times_two = FDH::Define(
"XTimesTwo",
{"x: int64"},
{"y: int64"}, {},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"y"}, "Mul", {"x", "two"}, {{"T", DT_INT64}}},
});
Status s = context()->AddFunctionDef(x_times_two);
context()->SetJitCompileRewrite(true);
auto op = ImmediateOpPtr(context()->CreateOperation());
TF_ASSERT_OK(
op->Reset("XTimesTwo", "/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor int_tensor = test::AsScalar<int64_t>(3);
auto input_int = core::RefCountPtr<ImmediateExecutionTensorHandle>(
context()->CreateLocalHandleFromTFTensor(
int_tensor, context()->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input_int.get()));
std::vector<AbstractTensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(op->Execute(absl::MakeSpan(retvals), &num_retvals));
retvals[0]->Unref();
retvals[0] = nullptr;
}
TEST_F(EagerContextTest, LocalRendezvousCreation) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
auto rendezvous_creator = context()->RendezvousFactory();
tsl::core::RefCountPtr<Rendezvous> rendezvous_1;
TF_ASSERT_OK(rendezvous_creator(1, nullptr, &rendezvous_1));
EXPECT_EQ(rendezvous_1->RefCount(), 1);
tsl::core::RefCountPtr<Rendezvous> rendezvous_2;
TF_ASSERT_OK(rendezvous_creator(1, nullptr, &rendezvous_2));
EXPECT_EQ(rendezvous_2->RefCount(), 2);
rendezvous_1.reset();
EXPECT_EQ(rendezvous_2->RefCount(), 1);
tsl::core::WeakPtr<Rendezvous> weak2{rendezvous_2.get()};
rendezvous_2.reset();
EXPECT_EQ(weak2.GetNewRef(), nullptr);
}
void TestGlobalRendezvous(EagerContext* context, bool reuse_global_rendezvous) {
auto rendezvous_creator = context->RendezvousFactory(reuse_global_rendezvous);
tsl::core::RefCountPtr<Rendezvous> rendezvous_1;
TF_ASSERT_OK(rendezvous_creator(-1, nullptr, &rendezvous_1));
EXPECT_EQ(rendezvous_1->RefCount(), 2);
tsl::core::RefCountPtr<Rendezvous> rendezvous_2;
TF_ASSERT_OK(rendezvous_creator(-1, nullptr, &rendezvous_2));
EXPECT_EQ(rendezvous_2->RefCount(), 3);
context->ResetGlobalRendezvousForFunction();
tsl::core::RefCountPtr<Rendezvous> rendezvous_3;
TF_ASSERT_OK(rendezvous_creator(-1, nullptr, &rendezvous_3));
EXPECT_EQ(rendezvous_3->RefCount(), 2);
}
TEST_F(EagerContextTest, GlobalRendezvousCreation) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
TestGlobalRendezvous(context(), false);
}
TEST_F(EagerContextTest, ReuseGlobalRendezvous) {
InitContext(SessionOptions(), DEVICE_PLACEMENT_EXPLICIT);
TestGlobalRendezvous(context(), true);
}
}
} |
1,314 | cpp | tensorflow/tensorflow | attribute | tensorflow/core/tfrt/mlrt/attribute/attribute.cc | tensorflow/core/tfrt/mlrt/attribute/attribute_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_ATTRIBUTE_ATTRIBUTE_H_
#define TENSORFLOW_CORE_TFRT_MLRT_ATTRIBUTE_ATTRIBUTE_H_
#include <string>
#include "absl/status/statusor.h"
#include "mlir/IR/Attributes.h"
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace tensorflow {
namespace tf_mlrt {
class ShapeAttr {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(uint8_t, unranked);
DEFINE_BYTECODE_FIELD(mlrt::bc::Vector<int64_t>, dims);
};
class Constructor {
public:
Constructor(mlrt::bc::Allocator* allocator, mlrt::bc::BcAddr_t address)
: allocator_(allocator), address_(address) {}
void set_unranked(bool unranked) {
StorageType::construct_unranked(allocator_, address_, unranked);
}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_dims(allocator_, address_,
std::forward<Args>(args)...);
}
mlrt::bc::BcAddr_t address() const { return address_; }
private:
mlrt::bc::Allocator* allocator_;
mlrt::bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit ShapeAttr(const char* p) : p_(p) {}
bool unranked() const { return StorageType::read_unranked(p_); }
mlrt::bc::Vector<int64_t> dims() const { return StorageType::read_dims(p_); }
private:
const char* p_ = nullptr;
};
class TensorAttr {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(tensorflow::DataType, dtype);
DEFINE_BYTECODE_FIELD(uint64_t, num_elements);
DEFINE_BYTECODE_FIELD(mlrt::bc::Vector<int64_t>, shape);
DEFINE_BYTECODE_FIELD(mlrt::bc::Vector<char>, data);
};
class Constructor {
public:
Constructor(mlrt::bc::Allocator* allocator, mlrt::bc::BcAddr_t address,
tensorflow::DataType dtype)
: allocator_(allocator), address_(address) {
StorageType::construct_dtype(allocator_, address_, dtype);
}
void set_num_elements(size_t num) {
StorageType::construct_num_elements(allocator_, address_, num);
}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_shape(allocator_, address_,
std::forward<Args>(args)...);
}
template <typename... Args>
auto construct_data(Args&&... args) {
return StorageType::construct_data(allocator_, address_,
std::forward<Args>(args)...);
}
mlrt::bc::BcAddr_t address() const { return address_; }
private:
mlrt::bc::Allocator* allocator_;
mlrt::bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit TensorAttr(const char* p) : p_(p) {}
tensorflow::DataType dtype() const { return StorageType::read_dtype(p_); }
mlrt::bc::Vector<int64_t> shape() const {
return StorageType::read_shape(p_);
}
mlrt::bc::Vector<char> data() const { return StorageType::read_data(p_); }
private:
const char* p_ = nullptr;
};
absl::StatusOr<std::string> EncodeTensorflowAttribute(
const mlrt::ModuleEmitterContext& module_context, mlir::Attribute attr);
}
}
#endif
#include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tf_mlrt {
absl::StatusOr<std::string> EncodeTensorflowAttribute(
const mlrt::ModuleEmitterContext& module_context, mlir::Attribute attr) {
if (auto result = mlrt::EncodeSimpleAttribute(module_context, attr)) {
return std::move(*result);
}
if (auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr)) {
auto element_type = dense_attr.getElementType();
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(tensorflow::ConvertToDataType(element_type, &dtype));
if (dtype == tensorflow::DT_STRING) {
return absl::InvalidArgumentError(
"String tensor attribute is not yet supported");
}
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto tensor_ctor = mlrt::bc::New<TensorAttr>(&allocator, dtype);
auto shaped_type = dense_attr.getType();
size_t num_elements = shaped_type.getNumElements();
tensor_ctor.set_num_elements(num_elements);
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
tensor_ctor.construct_shape(shape);
if (dtype == tensorflow::DT_BOOL) {
std::vector<uint8_t> data(num_elements);
int i = 0;
for (auto v : dense_attr.getValues<bool>()) {
data[i++] = static_cast<uint8_t>(v);
}
tensor_ctor.construct_data(data.size())
.Place(reinterpret_cast<const char*>(data.data()), data.size());
} else {
auto raw_data = dense_attr.getRawData();
if (dense_attr.isSplat()) {
std::vector<char> data(raw_data.size() * num_elements);
char* p = data.data();
for (int i = 0; i < num_elements; ++i, p += raw_data.size()) {
std::memcpy(p, raw_data.data(), raw_data.size());
}
tensor_ctor.construct_data(data.size()).Place(data.data(), data.size());
} else {
tensor_ctor.construct_data(raw_data.size())
.Place(raw_data.data(), raw_data.size());
}
}
return std::string(buffer.data(), buffer.size());
}
if (auto type_attr = mlir::dyn_cast<mlir::TypeAttr>(attr)) {
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(
tensorflow::ConvertToDataType(type_attr.getValue(), &dtype));
std::string data(sizeof(dtype), '\0');
std::memcpy(data.data(), &dtype, sizeof(dtype));
return data;
}
if (auto shape_attr = mlir::dyn_cast<mlir::TF::ShapeAttr>(attr)) {
llvm::ArrayRef<int64_t> shape;
if (!shape_attr.getUnranked()) {
auto shape_or = shape_attr.getValue();
if (!shape_or.has_value()) {
std::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Failed to get shape from shape attr: ", attr_str));
}
shape = *shape_or;
}
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto shape_attr_ctor = mlrt::bc::New<ShapeAttr>(&allocator);
shape_attr_ctor.set_unranked(shape_attr.getUnranked());
std::vector<int64_t> shape_vec(shape.begin(), shape.end());
shape_attr_ctor.construct_shape(shape_vec);
return std::string(buffer.data(), buffer.size());
}
if (auto array_attr = mlir::dyn_cast<mlir::ArrayAttr>(attr)) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>(
&allocator, array_attr.size());
int i;
for (i = 0; i < array_attr.size(); ++i) {
if (auto type_attr = mlir::dyn_cast<mlir::TypeAttr>(array_attr[i])) {
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(
tensorflow::ConvertToDataType(type_attr.getValue(), &dtype));
ctor.ConstructAt(i, dtype);
} else {
break;
}
}
if (i == array_attr.size()) {
return std::string(buffer.data(), buffer.size());
}
}
std::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
}
} | #include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include <array>
#include <cstring>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
TEST(AttributeTest, TensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
std::array<int64_t, 4> data = {0, 1, 2, 3};
auto dense_i64_attr = builder.getI64VectorAttr(data);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_i64_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_INT64);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
EXPECT_EQ(
absl::string_view(tensor_attr.data().data(), tensor_attr.data().size()),
absl::string_view(reinterpret_cast<const char*>(data.data()),
data.size() * sizeof(int64_t)));
}
TEST(AttributeTest, BoolTensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
auto dense_bool_attr = builder.getBoolVectorAttr({true, false, true, false});
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_bool_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_BOOL);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
std::array<uint8_t, 4> expected_data = {1, 0, 1, 0};
EXPECT_EQ(
absl::string_view(tensor_attr.data().data(), tensor_attr.data().size()),
absl::string_view(reinterpret_cast<const char*>(expected_data.data()),
expected_data.size() * sizeof(uint8_t)));
}
TEST(AttributeTest, SplatTensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
auto dense_splat_i64_attr = mlir::DenseElementsAttr::get<int64_t>(
mlir::RankedTensorType::get(4, builder.getI64Type()), 100);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_splat_i64_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_INT64);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
EXPECT_EQ(tensor_attr.data().size(), 4 * sizeof(int64_t));
const char* p = tensor_attr.data().data();
for (int i = 0; i < 4; ++i, p += sizeof(int64_t)) {
int64_t v;
std::memcpy(&v, p, sizeof(int64_t));
EXPECT_EQ(v, 100);
}
}
TEST(AttributeTest, TypedAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
auto type_attr = mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(32));
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer, EncodeTensorflowAttribute(emitter_context, type_attr));
tensorflow::DataType dtype;
std::memcpy(&dtype, attr_buffer.data(), sizeof(dtype));
EXPECT_EQ(dtype, DT_INT32);
}
TEST(AttributeTest, ShapeAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
std::array<int64_t, 4> data = {1, 2, 3, 4};
auto shape_attr = mlir::TF::ShapeAttr::get(
&mlir_context, llvm::ArrayRef<int64_t>(data.begin(), data.end()),
false);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer, EncodeTensorflowAttribute(emitter_context, shape_attr));
ShapeAttr shape_attr_decoded(attr_buffer.data());
EXPECT_EQ(shape_attr_decoded.unranked(), false);
EXPECT_THAT(shape_attr_decoded.dims(),
::testing::ElementsAreArray({1, 2, 3, 4}));
}
TEST(AttributeTest, DtypeArrayAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
std::array<mlir::Attribute, 4> arr = {
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(32)),
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(64)),
mlir::TypeAttr::get(builder.getType<mlir::Float32Type>()),
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(1))};
auto arr_attr = mlir::ArrayAttr::get(
&mlir_context, llvm::ArrayRef<mlir::Attribute>(arr.begin(), arr.end()));
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, arr_attr));
mlrt::bc::Vector<tensorflow::DataType> dtype_arr(attr_buffer.data());
EXPECT_THAT(dtype_arr, ::testing::ElementsAreArray(
{DT_INT32, DT_INT64, DT_FLOAT, DT_BOOL}));
}
TEST(AttributeTest, UnsupportedAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
auto dense_string_attr = mlir::DenseStringElementsAttr::get(
mlir::RankedTensorType::get({2}, builder.getType<mlir::TF::StringType>()),
{"a", "b"});
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
EXPECT_THAT(
EncodeTensorflowAttribute(emitter_context, dense_string_attr),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument,
"String tensor attribute is not yet supported"));
EXPECT_THAT(
EncodeTensorflowAttribute(emitter_context, builder.getUnitAttr()),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
}
}
} |
1,315 | cpp | tensorflow/tensorflow | shard_restore_util | tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.cc | tensorflow/core/tfrt/mlrt/kernel/shard_restore_util_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_KERNEL_SHARD_RESTORE_UTIL_H_
#define TENSORFLOW_CORE_TFRT_MLRT_KERNEL_SHARD_RESTORE_UTIL_H_
#include <cstddef>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
namespace tensorflow {
namespace tf_mlrt {
std::vector<std::vector<int>> ShardVariables(
int num_shards, absl::Span<int64_t> variable_sizes);
}
}
#endif
#include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <queue>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
namespace tensorflow {
namespace tf_mlrt {
std::vector<std::vector<int>> ShardVariables(
int num_shards, absl::Span<int64_t> variable_sizes) {
DCHECK_GT(num_shards, 0);
struct IndexSize {
int index;
int64_t size;
};
std::vector<IndexSize> variable_index_sizes;
variable_index_sizes.reserve(variable_sizes.size());
for (int i = 0; i < variable_sizes.size(); ++i) {
variable_index_sizes.push_back({.index = i, .size = variable_sizes[i]});
}
std::sort(
variable_index_sizes.begin(), variable_index_sizes.end(),
[&](const IndexSize& a, const IndexSize& b) { return a.size > b.size; });
struct RestoreVariableCluster {
std::vector<int> indices;
size_t total_size = 0;
};
auto cmp = [](const RestoreVariableCluster& a,
const RestoreVariableCluster& b) {
return a.total_size > b.total_size;
};
std::priority_queue<RestoreVariableCluster,
std::vector<RestoreVariableCluster>, decltype(cmp)>
min_heap;
for (int i = 0; i < num_shards; ++i) {
min_heap.push(RestoreVariableCluster());
}
for (int i = 0; i < variable_index_sizes.size(); ++i) {
RestoreVariableCluster min_cluster = min_heap.top();
min_heap.pop();
min_cluster.total_size += variable_index_sizes[i].size;
min_cluster.indices.push_back(variable_index_sizes[i].index);
min_heap.push(std::move(min_cluster));
}
std::vector<std::vector<int>> shards;
shards.reserve(min_heap.size());
while (!min_heap.empty()) {
auto& min_cluster = min_heap.top();
if (min_cluster.total_size > 0) {
shards.push_back(min_cluster.indices);
}
min_heap.pop();
}
return shards;
}
}
} | #include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(ShardRestoreUtilTest, Basic) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {8, 10, 3};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 2);
EXPECT_THAT(shards[0], ElementsAre(1));
EXPECT_THAT(shards[1], ElementsAre(0, 2));
}
TEST(ShardRestoreUtilTest, Imbalance) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {3, 3, 10, 3};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 2);
EXPECT_THAT(shards[0], UnorderedElementsAre(0, 1, 3));
EXPECT_THAT(shards[1], ElementsAre(2));
}
TEST(ShardRestoreUtilTest, SingleShard) {
int num_shards = 1;
std::vector<int64_t> shard_sizes = {10, 2};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 1);
EXPECT_THAT(shards[0], ElementsAre(0, 1));
}
TEST(ShardRestoreUtilTest, NumVariablesLessThanShard) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {1};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 1);
EXPECT_THAT(shards[0], ElementsAre(0));
}
}
}
} |
1,316 | cpp | tensorflow/tensorflow | create_pjrt_client_util | tensorflow/core/tfrt/common/create_pjrt_client_util.cc | tensorflow/core/tfrt/common/create_pjrt_client_util_test.cc | #ifndef TENSORFLOW_CORE_TFRT_COMMON_CREATE_PJRT_CLIENT_UTIL_H_
#define TENSORFLOW_CORE_TFRT_COMMON_CREATE_PJRT_CLIENT_UTIL_H_
#include <optional>
#include <set>
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
absl::StatusOr<xla::PjRtClient*> GetOrCreatePjRtClient(
const DeviceType& device_type,
std::optional<std::set<int>> allowed_devices = std::nullopt);
}
#endif
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include <optional>
#include <set>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
absl::StatusOr<xla::PjRtClient*> GetOrCreatePjRtClient(
const DeviceType& device_type,
std::optional<std::set<int>> allowed_devices) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetOrCreatePjRtClient(device_type);
}
} | #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/types.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CreatePjRtClientTest, GetNotExistPjRtClientNotImplemented) {
EXPECT_THAT(
GetOrCreatePjRtClient(DEVICE_CPU),
StatusIs(error::NOT_FOUND,
HasSubstr(absl::StrCat("The PJRT client factory of `",
DEVICE_CPU, "` is not registered"))));
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST(CreatePjRtClientTest, GetNotExistGpuPjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
GetOrCreatePjRtClient(DEVICE_XLA_GPU));
EXPECT_THAT(pjrt_client, ::testing::NotNull());
}
#endif
}
} |
1,317 | cpp | tensorflow/tensorflow | pjrt_state | tensorflow/core/tfrt/common/pjrt_state.cc | tensorflow/core/tfrt/common/pjrt_state_test.cc | #ifndef TENSORFLOW_CORE_TFRT_COMMON_PJRT_STATE_H_
#define TENSORFLOW_CORE_TFRT_COMMON_PJRT_STATE_H_
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/local_device_state.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "xla/tsl/framework/allocator.h"
#include "tensorflow/core/framework/resource_base.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char kPjRtStateResourceName[] = "pjrt_state";
using PjRtClientsMap = std::map<DeviceType, std::unique_ptr<xla::PjRtClient>>;
struct PjRtGpuClientCreationInfo {
std::set<int> allowed_devices;
std::unique_ptr<se::MultiDeviceAdapter> allocator;
std::unique_ptr<tsl::Allocator> host_memory_allocator;
std::map<int, std::unique_ptr<xla::LocalDeviceState>> local_device_states;
xla::LocalClient* local_client;
};
class PjRtState : public ResourceBase {
public:
static PjRtState* Create();
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type);
absl::StatusOr<xla::PjRtClient*> GetOrCreatePjRtClient(
const DeviceType& device_type);
Status SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client);
Status MovePjRtClientToUnused(const DeviceType& device_type);
string DebugString() const override;
absl::Status SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info);
PjRtGpuClientCreationInfo* GetPjRtGpuClientCreationInfo();
private:
explicit PjRtState() {}
absl::Mutex mu_;
PjRtClientsMap clients_ ABSL_GUARDED_BY(mu_);
std::vector<std::unique_ptr<xla::PjRtClient>> unused_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<PjRtGpuClientCreationInfo> pjrt_gpu_client_creation_info_
ABSL_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/tf_pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
PjRtState* PjRtState::Create() { return new PjRtState(); }
absl::StatusOr<xla::PjRtClient*> PjRtState::GetPjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
absl::StatusOr<xla::PjRtClient*> PjRtState::GetOrCreatePjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
std::unique_ptr<xla::PjRtClient> pjrt_client;
xla::PjrtClientFactoryOptions options = xla::PjrtClientFactoryOptions();
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtClient> client,
xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
device_type, options));
pjrt_client = xla::TfPjRtClient::CreateTfPjRtClient(std::move(client));
clients_[device_type] = std::move(pjrt_client);
return clients_[device_type].get();
}
Status PjRtState::SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
}
clients_[device_type] = std::move(client);
return absl::OkStatus();
}
Status PjRtState::MovePjRtClientToUnused(const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
clients_.erase(it);
return absl::OkStatus();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
Status PjRtState::SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
absl::MutexLock lock(&mu_);
pjrt_gpu_client_creation_info_ = std::move(info);
return absl::OkStatus();
}
PjRtGpuClientCreationInfo* PjRtState::GetPjRtGpuClientCreationInfo() {
absl::MutexLock lock(&mu_);
return pjrt_gpu_client_creation_info_.get();
}
string PjRtState::DebugString() const { return "PjRtState"; }
} | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace {
using tensorflow::PjRtState;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class PjRtStateTestFixture : public testing::Test {
protected:
PjRtStateTestFixture() { pjrt_state_ = PjRtState::Create(); }
~PjRtStateTestFixture() override {
tensorflow::core::ScopedUnref pjrt_state_ref(pjrt_state_);
}
PjRtState* pjrt_state_;
};
TEST_F(PjRtStateTestFixture, SetAndGetPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
TEST_F(PjRtStateTestFixture, AddAlreadyExistsPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_1,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU, xla::GetTfrtCpuClient(true,
1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_2,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_NE(pjrt_client_1, pjrt_client_2);
}
TEST_F(PjRtStateTestFixture, GetNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, DeletePjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
xla::PjRtClient* pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
EXPECT_EQ(pjrt_client_ptr->platform_name(), "cpu");
}
TEST_F(PjRtStateTestFixture, DeleteNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientExist) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
auto pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
pjrt_state_->GetOrCreatePjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client_get, pjrt_client_ptr);
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientNotExist) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, pjrt_state_->GetOrCreatePjRtClient(
tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
} |
1,318 | cpp | tensorflow/tensorflow | metrics | third_party/xla/xla/pjrt/metrics.cc | tensorflow/cc/saved_model/metrics_test.cc | #ifndef XLA_TSL_FRAMEWORK_METRICS_H_
#define XLA_TSL_FRAMEWORK_METRICS_H_
#include <cstdint>
namespace tsl {
namespace metrics {
void UpdateBfcAllocatorDelayTime(const uint64_t delay_usecs);
}
}
#endif
#include "xla/tsl/framework/metrics.h"
#include <cstdint>
#include "tsl/lib/monitoring/counter.h"
namespace tsl {
namespace metrics {
namespace {
auto* bfc_allocator_delay =
monitoring::Counter<0>::New("/tensorflow/core/bfc_allocator_delay",
"The total time spent running each graph "
"optimization pass in microseconds.");
}
void UpdateBfcAllocatorDelayTime(const uint64_t delay_usecs) {
static auto* bfc_allocator_delay_cell = bfc_allocator_delay->GetCell();
if (delay_usecs > 0) {
bfc_allocator_delay_cell->IncrementBy(delay_usecs);
}
}
}
} | #include "tensorflow/cc/saved_model/metrics.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "json/json.h"
#include "json/reader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace metrics {
TEST(MetricsTest, TestSavedModelWrite) {
EXPECT_EQ(SavedModelWriteApi("foo").value(), 0);
SavedModelWriteApi("foo").IncrementBy(1);
EXPECT_EQ(SavedModelWriteApi("foo").value(), 1);
EXPECT_EQ(SavedModelWriteCount("1").value(), 0);
SavedModelWriteCount("1").IncrementBy(1);
EXPECT_EQ(SavedModelWriteCount("1").value(), 1);
}
TEST(MetricsTest, TestSavedModelRead) {
SavedModelReadApi("bar").IncrementBy(1);
EXPECT_EQ(SavedModelReadApi("bar").value(), 1);
SavedModelReadCount("2").IncrementBy(1);
EXPECT_EQ(SavedModelReadCount("2").value(), 1);
SavedModelReadApi("baz").IncrementBy(1);
EXPECT_EQ(SavedModelReadApi("baz").value(), 1);
SavedModelReadCount("2").IncrementBy(1);
EXPECT_EQ(SavedModelReadCount("2").value(), 2);
}
TEST(MetricsTest, TestCheckpointRead) {
EXPECT_EQ(CheckpointReadDuration("foo").value().num(), 0);
CheckpointReadDuration("foo").Add(100);
EXPECT_EQ(CheckpointReadDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestCheckpointWrite) {
EXPECT_EQ(CheckpointWriteDuration("foo").value().num(), 0);
CheckpointWriteDuration("foo").Add(100);
EXPECT_EQ(CheckpointWriteDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestAsyncCheckpointWrite) {
EXPECT_EQ(AsyncCheckpointWriteDuration("foo").value().num(), 0);
AsyncCheckpointWriteDuration("foo").Add(100);
EXPECT_EQ(AsyncCheckpointWriteDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestTrainingTimeSaved) {
EXPECT_EQ(TrainingTimeSaved("foo").value(), 0);
TrainingTimeSaved("foo").IncrementBy(100);
EXPECT_EQ(TrainingTimeSaved("foo").value(), 100);
}
TEST(MetricsTest, TestCheckpointSize) {
EXPECT_EQ(CheckpointSize("foo", 10).value(), 0);
CheckpointSize("foo", 10).IncrementBy(1);
EXPECT_EQ(CheckpointSize("foo", 10).value(), 1);
}
TEST(MetricsTest, TestWriteFingerprint) {
EXPECT_EQ(SavedModelWriteFingerprint().value(), "");
SavedModelWriteFingerprint().Set("foo");
EXPECT_EQ(SavedModelWriteFingerprint().value(), "foo");
SavedModelWriteFingerprint().Set("bar");
EXPECT_EQ(SavedModelWriteFingerprint().value(), "bar");
}
TEST(MetricsTest, TestWritePath) {
EXPECT_EQ(SavedModelWritePath().value(), "");
SavedModelWritePath().Set("foo");
EXPECT_EQ(SavedModelWritePath().value(), "foo");
SavedModelWritePath().Set("bar");
EXPECT_EQ(SavedModelWritePath().value(), "bar");
}
TEST(MetricsTest, TestWritePathAndSingleprint) {
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "");
SavedModelWritePathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "foo");
SavedModelWritePathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "bar");
EXPECT_EQ(
MakeSavedModelPathAndSingleprint("path", "singleprint").value_or(""),
"path:singleprint");
}
TEST(MetricsTest, TestInvalidMakePathAndSingleprint) {
EXPECT_THAT(MakeSavedModelPathAndSingleprint("", "singleprint"),
testing::StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(MakeSavedModelPathAndSingleprint("path", ""),
testing::StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MetricsTest, TestReadFingerprint) {
EXPECT_EQ(SavedModelReadFingerprint().value(), "");
SavedModelReadFingerprint().Set("foo");
EXPECT_EQ(SavedModelReadFingerprint().value(), "foo");
SavedModelReadFingerprint().Set("bar");
EXPECT_EQ(SavedModelReadFingerprint().value(), "bar");
}
TEST(MetricsTest, TestReadPath) {
EXPECT_EQ(SavedModelReadPath().value(), "");
SavedModelReadPath().Set("foo");
EXPECT_EQ(SavedModelReadPath().value(), "foo");
SavedModelReadPath().Set("bar");
EXPECT_EQ(SavedModelReadPath().value(), "bar");
}
TEST(MetricsTest, TestReadPathAndSingleprint) {
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "");
SavedModelReadPathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "foo");
SavedModelReadPathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "bar");
TF_ASSERT_OK_AND_ASSIGN(
auto path_singleprint,
ParseSavedModelPathAndSingleprint("path/model:name:singleprint"));
auto [path, singleprint] = path_singleprint;
EXPECT_EQ(path, "path/model:name");
EXPECT_EQ(singleprint, "singleprint");
}
TEST(MetricsTest, TestMakeFingerprintJson) {
FingerprintDef fingerprint;
fingerprint.set_saved_model_checksum(1);
fingerprint.set_graph_def_program_hash(2);
fingerprint.set_signature_def_hash(3);
fingerprint.set_saved_object_graph_hash(4);
fingerprint.set_checkpoint_hash(5);
std::string serialized_fingerprint_json = MakeFingerprintJson(fingerprint);
EXPECT_EQ(
serialized_fingerprint_json,
"{\n\t\"checkpoint_hash\" : 5,\n\t\"graph_def_program_hash\" : "
"2,\n\t\"saved_model_checksum\" : 1,\n\t\"saved_object_graph_hash\" : "
"4,\n\t\"signature_def_hash\" : 3\n}");
Json::Value fingerprint_json = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(serialized_fingerprint_json, fingerprint_json);
EXPECT_EQ(fingerprint_json["saved_model_checksum"].asUInt64(), 1);
EXPECT_EQ(fingerprint_json["graph_def_program_hash"].asUInt64(), 2);
EXPECT_EQ(fingerprint_json["signature_def_hash"].asUInt64(), 3);
EXPECT_EQ(fingerprint_json["saved_object_graph_hash"].asUInt64(), 4);
EXPECT_EQ(fingerprint_json["checkpoint_hash"].asUInt64(), 5);
}
TEST(MetricsTest, TestFoundFingerprintOnLoad) {
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintFound);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "FOUND");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintNotFound);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "NOT_FOUND");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintError);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "ERROR");
}
TEST(MetricsTest, TestShardingCallbackDuration) {
EXPECT_EQ(ShardingCallbackDuration().value(), 0);
ShardingCallbackDuration().IncrementBy(100);
EXPECT_EQ(ShardingCallbackDuration().value(), 100);
}
TEST(MetricsTest, TestNumCheckpointShardsWritten) {
EXPECT_EQ(NumCheckpointShardsWritten().value(), 0);
NumCheckpointShardsWritten().IncrementBy(10);
EXPECT_EQ(NumCheckpointShardsWritten().value(), 10);
}
TEST(MetricsTest, TestShardingCallbackDescription) {
EXPECT_EQ(ShardingCallbackDescription().value(), "");
ShardingCallbackDescription().Set("foo");
EXPECT_EQ(ShardingCallbackDescription().value(), "foo");
}
}
} |
1,319 | cpp | tensorflow/tensorflow | async_value_tensor | tensorflow/core/tfrt/common/async_value_tensor.cc | tensorflow/core/tfrt/common/async_value_tensor_test.cc | #ifndef TENSORFLOW_CORE_TFRT_COMMON_ASYNC_VALUE_TENSOR_H_
#define TENSORFLOW_CORE_TFRT_COMMON_ASYNC_VALUE_TENSOR_H_
#include <cstddef>
#include <memory>
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
namespace tensorflow {
class AsyncValueTensor {
public:
static AsyncValueTensor* FromTensor(const Tensor* tensor);
const tfrt::RCReference<tfrt::AsyncValue>& GetAsyncRef();
void SetAsyncRef(tfrt::RCReference<tfrt::AsyncValue> av_ref);
std::shared_ptr<xla::PjRtBuffer> GetBuffer();
void SetBuffer(std::shared_ptr<xla::PjRtBuffer> buffer);
static AsyncValueTensor* FromOpaquePointer(void* ptr);
static void* ToOpaquePointer(AsyncValueTensor* tensor);
private:
tfrt::RCReference<tfrt::AsyncValue> av_ref_;
std::shared_ptr<xla::PjRtBuffer> buffer_;
};
class AsyncValueAllocator : public Allocator {
public:
void* AllocateRaw(size_t alignment, size_t num_bytes) override;
void DeallocateRaw(void* ptr) override;
bool AllocatesOpaqueHandle() const override { return true; }
string Name() override { return "async-value"; }
};
}
#endif
#include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/tensor.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace {
constexpr uintptr_t kTag = 0x1ULL;
}
AsyncValueTensor* AsyncValueTensor::FromTensor(
const Tensor* tensor) {
AsyncValueTensor* av_tensor =
FromOpaquePointer(const_cast<char*>(tensor->tensor_data().data()));
return av_tensor;
}
const tfrt::RCReference<tfrt::AsyncValue>& AsyncValueTensor::GetAsyncRef() {
return av_ref_;
}
void AsyncValueTensor::SetAsyncRef(tfrt::RCReference<tfrt::AsyncValue> av_ref) {
av_ref_ = std::move(av_ref);
}
std::shared_ptr<xla::PjRtBuffer> AsyncValueTensor::GetBuffer() {
return buffer_;
}
void AsyncValueTensor::SetBuffer(std::shared_ptr<xla::PjRtBuffer> buffer) {
buffer_ = std::move(buffer);
}
AsyncValueTensor* AsyncValueTensor::FromOpaquePointer(void* ptr) {
uintptr_t value = reinterpret_cast<uintptr_t>(ptr);
if (value & kTag) {
return reinterpret_cast<AsyncValueTensor*>(value & ~kTag);
} else {
return nullptr;
}
}
void* AsyncValueTensor::ToOpaquePointer(AsyncValueTensor* tensor) {
uintptr_t value = reinterpret_cast<uintptr_t>(tensor);
CHECK_EQ(value & kTag, 0);
value |= kTag;
return reinterpret_cast<AsyncValueTensor*>(value);
}
void* AsyncValueAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
return AsyncValueTensor::ToOpaquePointer(new AsyncValueTensor);
}
void AsyncValueAllocator::DeallocateRaw(void* ptr) {
delete AsyncValueTensor::FromOpaquePointer(ptr);
}
} | #include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
TEST(AsyncValueTensorTest, InvalidTensor) {
tensorflow::Tensor tensor(tensorflow::DT_INT64, tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_EQ(avt, nullptr);
}
TEST(AsyncValueTensorTest, SetAndGetAsyncValue) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
tsl::AsyncValueRef<int32_t> value =
tsl::MakeConstructedAsyncValueRef<int32_t>(123);
avt->SetAsyncRef(value.CopyRCRef());
auto ret_value = avt->GetAsyncRef();
ASSERT_EQ(ret_value, value.CopyRCRef());
}
TEST(AsyncValueTensorTest, SetAndGetBuffer) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
std::shared_ptr<xla::PjRtBuffer> buffer;
avt->SetBuffer(buffer);
auto ret_buffer = avt->GetBuffer();
ASSERT_EQ(ret_buffer, buffer);
}
}
} |
1,320 | cpp | tensorflow/tensorflow | pjrt_util | tensorflow/core/tfrt/common/pjrt_util.cc | tensorflow/core/tfrt/common/pjrt_util_test.cc | #ifndef TENSORFLOW_CORE_TFRT_COMMON_PJRT_UTIL_H_
#define TENSORFLOW_CORE_TFRT_COMMON_PJRT_UTIL_H_
#include <memory>
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
namespace tensorflow {
Status SetPjRtClientInTFGlobalResourceManager(
const DeviceType& device_type, std::unique_ptr<xla::PjRtClient> client);
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type);
Status SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(
std::unique_ptr<PjRtGpuClientCreationInfo> info);
absl::StatusOr<PjRtGpuClientCreationInfo*> GetPjRtGpuClientCreationInfo();
}
#endif
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
Status SetPjRtClientInTFGlobalResourceManager(
const DeviceType& device_type, std::unique_ptr<xla::PjRtClient> client) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (client == nullptr) {
return errors::InvalidArgument("PJRT client is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtClient(device_type, std::move(client)));
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtClient(device_type);
}
absl::Status SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (info == nullptr) {
return absl::InvalidArgumentError("PJRT client creation info is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtGpuClientCreationInfo(std::move(info)));
return absl::OkStatus();
}
absl::StatusOr<PjRtGpuClientCreationInfo*> GetPjRtGpuClientCreationInfo() {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtGpuClientCreationInfo();
}
} | #include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "xla/pjrt/cpu/cpu_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(PjRtUtilTest, SetGetAndDeletePjRtClient) {
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(
DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetPjRtClient(DEVICE_CPU));
EXPECT_THAT(pjrt_client, ::testing::NotNull());
}
TEST(PjRtStateResourceManagerTest, SetNullPjRtClient) {
EXPECT_THAT(
SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU, nullptr),
StatusIs(error::INVALID_ARGUMENT, HasSubstr("PJRT client is nullptr")));
}
TEST(PjRtGpuClientCreationInfoTest, SetAndGet) {
auto info = std::make_unique<PjRtGpuClientCreationInfo>();
info->allowed_devices.insert(123);
TF_ASSERT_OK(
SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(std::move(info)));
TF_ASSERT_OK_AND_ASSIGN(PjRtGpuClientCreationInfo * retrieved_info,
GetPjRtGpuClientCreationInfo());
EXPECT_THAT(retrieved_info->allowed_devices, ElementsAre(123));
}
}
} |
1,321 | cpp | tensorflow/tensorflow | tfrt_session | tensorflow/core/tfrt/tfrt_session/tfrt_session.cc | tensorflow/core/tfrt/tfrt_session/tfrt_session_test.cc | #ifndef TENSORFLOW_CORE_TFRT_TFRT_SESSION_TFRT_SESSION_H_
#define TENSORFLOW_CORE_TFRT_TFRT_SESSION_TFRT_SESSION_H_
#include <cstdint>
#include <functional>
#include <memory>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorflow/compiler/mlir/tfrt/backend_compiler.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
struct TfrtThreadpoolOptions {
int32_t num_main_threads = port::MaxParallelism();
absl::Duration init_timeout = absl::Milliseconds(100);
int32_t max_concurrent_handler = 128;
int32_t num_sub_thread_pool = 1;
};
struct TfrtSessionOptions {
TfrtThreadpoolOptions threadpool_options;
tensorflow::tfrt_stub::Runtime* runtime = nullptr;
bool enable_mlrt = false;
bool use_tpu = false;
bool use_gpu = false;
tensorflow::BackendCompiler* backend_compiler = nullptr;
};
class TfrtSessionFactory : public tensorflow::SessionFactory {
public:
TfrtSessionFactory();
bool AcceptsOptions(const SessionOptions& options) override;
Status NewSession(const SessionOptions& options,
Session** out_session) override TF_LOCKS_EXCLUDED(mutex_);
using RuntimeInitializer = absl::Status (*)(tfrt_stub::Runtime*);
static void RegisterInitializer(RuntimeInitializer initializer);
static tfrt_stub::Runtime* GetRuntime();
private:
class ThreadPoolManager;
friend Status InitializeTfrtSession(const TfrtSessionOptions& options);
friend Status UpdateTfrtSessionOptionsLocked(
const TfrtSessionOptions& options);
Status InitializeLocked(const TfrtSessionOptions& options)
TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
bool IsInitialized() const TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
return runtime_ != nullptr;
}
mutable absl::Mutex mutex_;
mutable absl::Mutex runtime_mutex_;
tensorflow::tfrt_stub::Runtime* runtime_ TF_GUARDED_BY(mutex_) = nullptr;
std::unique_ptr<tensorflow::tfrt_stub::Runtime> owned_runtime_
TF_GUARDED_BY(mutex_);
TfrtDeviceInfraTarget device_target_ TF_GUARDED_BY(mutex_) =
TfrtDeviceInfraTarget::kCpu;
bool tpu_use_tpu_runner_ TF_GUARDED_BY(mutex_) = false;
bool use_gpu_ TF_GUARDED_BY(mutex_) = false;
std::unique_ptr<ThreadPoolManager> thread_pool_manager_ TF_GUARDED_BY(mutex_);
bool enable_mlrt_ TF_GUARDED_BY(mutex_) = false;
tensorflow::BackendCompiler* backend_compiler_ TF_GUARDED_BY(mutex_);
};
Status InitializeTfrtSession(const TfrtSessionOptions& options);
Status UpdateTfrtSessionOptionsLocked(const TfrtSessionOptions& options);
}
#endif
#include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/die_if_null.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "Eigen/ThreadPool"
#include "llvm/ADT/STLExtras.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/local_session_selection.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/batch_kernel.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace {
class ThreadPoolInterfaceWrapper : public thread::ThreadPoolInterface {
public:
explicit ThreadPoolInterfaceWrapper(Eigen::ThreadPoolInterface* thread_pool)
: thread_pool_{thread_pool} {
DCHECK(thread_pool);
}
void Schedule(std::function<void()> fn) override {
return thread_pool().Schedule(std::move(fn));
}
void ScheduleWithHint(std::function<void()> fn, int start, int end) override {
return thread_pool().ScheduleWithHint(std::move(fn), start, end);
}
void Cancel() override { thread_pool().Cancel(); }
int NumThreads() const override { return thread_pool().NumThreads(); }
int CurrentThreadId() const override {
return thread_pool().CurrentThreadId();
}
private:
Eigen::ThreadPoolInterface& thread_pool() const {
DCHECK(thread_pool_);
return *thread_pool_;
}
Eigen::ThreadPoolInterface* thread_pool_ = nullptr;
};
class TfrtSessionInterOpThreadPools {
public:
TfrtSessionInterOpThreadPools(int size, bool run_in_caller_thread)
: thread_pools_(size), run_in_caller_thread_(run_in_caller_thread) {}
void SetThreadPool(int index, ThreadPoolInterfaceWrapper* thread_pool) {
thread_pools_.at(index) = thread_pool;
}
absl::StatusOr<ThreadPoolInterfaceWrapper*> GetThreadPool(int index) {
if (index < 0 || index >= thread_pools_.size())
return errors::InvalidArgument("Invalid thread pool index ", index);
return thread_pools_[index];
}
bool run_in_caller_thread() const { return run_in_caller_thread_; }
private:
std::vector<ThreadPoolInterfaceWrapper*> thread_pools_;
bool run_in_caller_thread_;
};
class TfrtSession : public tensorflow::Session {
public:
explicit TfrtSession(const SessionOptions& options,
tensorflow::tfrt_stub::Runtime* runtime,
TfrtDeviceInfraTarget device_target,
bool tpu_use_tpu_runner, bool use_gpu,
TfrtSessionInterOpThreadPools inter_op_thread_pools,
bool enable_mlrt,
tensorflow::BackendCompiler* backend_compiler)
: runtime_{runtime},
device_target_{device_target},
tpu_use_tpu_runner_{tpu_use_tpu_runner},
use_gpu_{use_gpu},
inter_op_thread_pools_{std::move(inter_op_thread_pools)},
enable_mlrt_(enable_mlrt),
options_{options},
backend_compiler_(backend_compiler) {}
Status Create(const GraphDef& graph) override {
return Create(GraphDef(graph));
}
Status Create(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return CreateLocked(std::move(graph));
}
Status CreateLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (graph.node_size() == 0) {
LOG(ERROR) << "Ignoring empty graph.";
return absl::OkStatus();
}
if (session_state_ == SessionState::kCreated) {
return errors::AlreadyExists(
"A Graph has already been created for this session.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
auto options = GetGraphExecutionOptions();
tensorflow::tfrt_stub::UpdateTpuTargetByBridgeCompatibility(options, graph);
auto* nodes = graph.mutable_node();
for (auto it = nodes->begin(), end = nodes->end(); it != end; ++it) {
if (it->name() == "ConfigureDistributedTPU") {
nodes->erase(it);
break;
}
}
auto session_options =
tensorflow::tfrt_stub::CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_optimize_for_static_graph(
options_.config.experimental().optimize_for_static_graph());
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(
options_.config.experimental().disable_optimize_for_static_graph());
LOG_FIRST_N(INFO, 10) << "SessionOptions: "
<< session_options.config.DebugString();
const auto& fdef_lib = graph.library();
TF_ASSIGN_OR_RETURN(auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
session_options, fdef_lib));
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tensorflow::tf_mlrt::RegisterTfMlrtBatchKernels(*kernel_registry);
auto resource_context = std::make_unique<tfrt::ResourceContext>();
tfrt_stub::ModelRuntimeContext model_context(
&options, "unknown_export_dir", resource_context.get());
model_context.set_graph_def(&graph);
model_context.set_device_mgr(&fallback_state->device_manager());
model_context.set_is_local_session(
!options_.config.experimental().enable_multi_host());
TF_RETURN_IF_ERROR(options.runtime->CreateRuntimeResources(model_context));
GraphOptimizationPassOptions optimization_options;
optimization_options.session_options = &options_;
FunctionLibraryDefinition flib_def = fallback_state->func_lib_def();
optimization_options.flib_def = &flib_def;
std::unordered_map<string, std::unique_ptr<Graph>> partition_graphs;
auto initial_graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
tensorflow::GraphConstructorOptions opts;
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
tensorflow::ConvertGraphDefToGraph(opts, graph, initial_graph.get()));
partition_graphs["graph"] = std::move(initial_graph);
optimization_options.partition_graphs = &partition_graphs;
OptimizationPassRegistry::Global()->LogAllGroupings(1);
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
LOG_FIRST_N(INFO, 10) << "GraphExecutionOptions: " << options;
TF_ASSIGN_OR_RETURN(
graph_executor_,
tensorflow::tfrt_stub::GraphExecutor::Create(
options, std::move(fallback_state), std::move(resource_context),
std::move(graph), std::move(kernel_registry)));
session_state_ = SessionState::kCreated;
return absl::OkStatus();
}
Status Extend(const GraphDef& graph) override {
return Extend(GraphDef(graph));
}
Status Extend(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return ExtendLocked(std::move(graph));
}
Status ExtendLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kCreated) {
return graph_executor_->Extend(graph);
}
return CreateLocked(std::move(graph));
}
Status RunInternal(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs,
const thread::ThreadPoolOptions& thread_pool_options) {
{
absl::MutexLock lock(&session_state_lock_);
if (session_state_ == SessionState::kInitialized) {
return errors::Unavailable("Session not created yet.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
}
DCHECK(outputs || output_tensor_names.empty()) << "No outputs in Run()";
tensorflow::tfrt_stub::GraphExecutionRunOptions
graph_execution_run_options{};
if (run_options.timeout_in_ms() > 0) {
graph_execution_run_options.deadline = absl::ToChronoTime(
absl::Now() + absl::Milliseconds(run_options.timeout_in_ms()));
}
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> work_queue;
auto* const intra_op_thread_pool = thread_pool_options.intra_op_threadpool;
if (inter_op_thread_pools_.run_in_caller_thread() ||
run_options.inter_op_thread_pool() == -1) {
work_queue = tfrt_stub::WrapDefaultWorkQueue(
tfrt::CreateSingleThreadedWorkQueue(), intra_op_thread_pool);
} else if (thread_pool_options.inter_op_threadpool != nullptr) {
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool,
thread_pool_options.inter_op_threadpool);
} else {
TF_ASSIGN_OR_RETURN(auto* thread_pool,
inter_op_thread_pools_.GetThreadPool(
run_options.inter_op_thread_pool()));
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool, thread_pool);
}
graph_execution_run_options.work_queue = work_queue.get();
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(graph_executor_->Run(
graph_execution_run_options, inputs, output_tensor_names,
target_node_names, &output_tensors));
if (outputs) {
DCHECK_EQ(output_tensors.size(), output_tensor_names.size());
outputs->swap(output_tensors);
} else {
DCHECK(output_tensor_names.empty()) << "No outputs in Run()";
}
return absl::OkStatus();
}
Status Run(const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs) override {
return RunInternal(RunOptions{}, inputs, output_tensor_names,
target_node_names, outputs, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata) override {
return Run(run_options, inputs, output_tensor_names, target_node_names,
outputs, run_metadata, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_tensor_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
return RunInternal(run_options, inputs, output_tensor_names,
target_tensor_names, outputs, thread_pool_options);
}
Status MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) override {
absl::MutexLock lock(&callables_lock_);
*out_handle = next_callable_handle_++;
assert(callables_.find(*out_handle) == callables_.end());
callables_[*out_handle] = {callable_options};
return absl::OkStatus();
}
Status RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) override {
return RunCallable(handle, feed_tensors, fetch_tensors, run_metadata, {});
}
Status RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
Callable callable;
{
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callable = it->second;
}
if (callable.callable_options.feed_size() != feed_tensors.size())
return errors::InvalidArgument("Invalid number of feed tensors");
std::vector<std::pair<std::string, Tensor>> inputs;
for (const auto& it :
llvm::zip(callable.callable_options.feed(), feed_tensors)) {
inputs.emplace_back(std::make_pair(std::get<0>(it), std::get<1>(it)));
}
std::vector<std::string> output_tensor_names;
for (const auto& tensor_name : callable.callable_options.fetch()) {
output_tensor_names.emplace_back(tensor_name);
}
std::vector<std::string> target_node_names;
for (const auto& node_name : callable.callable_options.target()) {
target_node_names.emplace_back(node_name);
}
return Run(inputs, output_tensor_names, target_node_names, fetch_tensors);
}
Status ReleaseCallable(CallableHandle handle) override {
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callables_.erase(it);
return absl::OkStatus();
}
Status Close() override {
absl::MutexLock lock(&session_state_lock_);
session_state_ = SessionState::kClosed;
return absl::OkStatus();
}
Status ListDevices(std::vector<DeviceAttributes>* response) override {
return errors::Unimplemented("TfrtSession::ListDevices is Unimplemented.");
}
Status LocalDeviceManager(const DeviceMgr** output) override {
*output = &graph_executor_->fallback_state().device_manager();
return absl::OkStatus();
}
private:
tfrt::HostContext* GetHostContext() {
return runtime_->core_runtime()->GetHostContext();
}
tensorflow::tfrt_stub::GraphExecutionOptions GetGraphExecutionOptions()
const {
::tensorflow::tfrt_stub::GraphExecutionOptions options(runtime_);
auto& compile_options = options.compile_options;
compile_options.variable_device =
DeviceNameUtils::FullName("localhost", 0,
0, "CPU", 0);
compile_options.enable_grappler = true;
compile_options.device_target = device_target_;
compile_options.tpu_fuse_ops = tpu_use_tpu_runner_;
compile_options.hoist_invariant_ops = true;
compile_options.sink_in_invariant_ops = false;
compile_options.cost_threshold = 1024;
if (use_gpu_) {
options.enable_tfrt_gpu = true;
options.enable_grappler_function_optimizer = true;
}
compile_options.use_tpu_host_allocator_for_inputs = tpu_use_tpu_runner_;
options.compile_options.backend_compiler = backend_compiler_;
options.model_metadata = options_.config.experimental().session_metadata();
options.enable_mlrt = enable_mlrt_;
return options;
}
Status CheckNotClosedLocked() const
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kClosed) {
return errors::Cancelled("Session has been closed.");
}
return absl::OkStatus();
}
struct Callable {
CallableOptions callable_options;
};
enum class SessionState {
kInitialized,
kCreated,
kClosed,
};
mutable absl::Mutex session_state_lock_;
SessionState session_state_ TF_GUARDED_BY(session_state_lock_) =
SessionState::kInitialized;
std::unique_ptr<::tensorflow::tfrt_stub::GraphExecutor> graph_executor_;
tensorflow::tfrt_stub::Runtime* runtime_ = nullptr;
const TfrtDeviceInfraTarget device_target_;
const bool tpu_use_tpu_runner_;
const bool use_gpu_;
TfrtSessionInterOpThreadPools inter_op_thread_pools_;
mutable absl::Mutex callables_lock_;
CallableHandle next_callable_handle_ TF_GUARDED_BY(callables_lock_) = 0;
absl::flat_hash_map<CallableHandle, Callable> callables_
TF_GUARDED_BY(callables_lock_);
bool enable_mlrt_ = false;
SessionOptions options_ = SessionOptions();
tensorflow::BackendCompiler* backend_compiler_ = nullptr;
};
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>
CreateRunHandlerWorkQueue(const TfrtThreadpoolOptions& session_options) {
int num_complementary_threads =
std::max(1, session_options.num_main_threads / 2);
tfrt::tf::RunHandlerThreadWorkQueue::Options options;
options.num_main_threads =
session_options.num_main_threads;
options.num_complementary_threads = num_complementary_threads;
options.init_timeout_ms =
absl::ToInt64Milliseconds(session_options.init_timeout);
options.max_concurrent_handler =
session_options.max_concurrent_handler;
options.num_sub_thread_pool =
session_options.num_sub_thread_pool;
std::vector<int> num_threads;
const int num_threads_per_pool =
options.num_main_threads / options.num_sub_thread_pool;
num_threads.resize(options.num_sub_thread_pool - 1, num_threads_per_pool);
num_threads.push_back(options.num_main_threads -
(options.num_sub_thread_pool - 1) *
num_threads_per_pool);
options.num_threads_in_sub_thread_pool = num_threads;
options.sub_thread_request_percentage = {1.0};
options.use_adaptive_waiting_time = true;
LOG_FIRST_N(INFO, 10) << "RunHandlerThreadWorkQueue Options: " << options;
return std::make_unique<tfrt::tf::RunHandlerThreadWorkQueue>(options);
}
}
class TfrtSessionFactory::ThreadPoolManager {
public:
absl::StatusOr<TfrtSessionInterOpThreadPools> UpdateAndGetInterOpThreadPools(
const SessionOptions& options) {
if (options.config.inter_op_parallelism_threads() > 0) {
LOG(WARNING) << "TFRT session does not support positive "
"inter_op_parallelism_threads for now";
}
if (options.config.use_per_session_threads()) {
return errors::InvalidArgument(
"TFRT session does not yet support use_per_session_threads()");
}
auto session_inter_op_thread_pool_size =
options.config.session_inter_op_thread_pool_size();
if (session_inter_op_thread_pool_size > 0) {
TfrtSessionInterOpThreadPools inter_op_thread_pools{
session_inter_op_thread_pool_size, false};
for (const auto& it :
llvm::enumerate(options.config.session_inter_op_thread_pool())) {
const ThreadPoolOptionProto& pool_options = it.value();
auto pool_index = it.index();
auto num_threads = pool_options.num_threads();
if (num_threads != 0) {
TF_ASSIGN_OR_RETURN(
auto* thread_pool,
GetOrCreateThreadPool(options.env, pool_options, pool_index));
inter_op_thread_pools.SetThreadPool(pool_index, thread_pool);
} else {
inter_op_thread_pools.SetThreadPool(pool_index,
GlobalThreadPool(options));
}
}
return inter_op_thread_pools;
} else if (options.config.inter_op_parallelism_threads() < 0) {
return TfrtSessionInterOpThreadPools{0,
true};
} else if (session_inter_op_thread_pool_size == 0) {
TfrtSessionInterOpThreadPools session_thread_pool_options{
1, false};
session_thread_pool_options.SetThreadPool(0, GlobalThreadPool(options));
return session_thread_pool_options;
} else {
return errors::InvalidArgument(
"session_inter_op_thread_pool_size must be >= 0");
}
}
private:
class ThreadPoolWithNumThreads {
public:
ThreadPoolWithNumThreads(int n | #include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
class TfrtSessionEnvironment : public ::testing::Environment {
public:
void SetUp() override {
TfrtSessionOptions options{
.threadpool_options = tensorflow::TfrtThreadpoolOptions{
.num_main_threads = tensorflow::port::MaxParallelism(),
.init_timeout = absl::Milliseconds(100),
.max_concurrent_handler = 128,
.num_sub_thread_pool = 1}};
TF_ASSERT_OK(InitializeTfrtSession(options));
}
};
class TfrtSessionTest : public ::testing::Test {
protected:
void SetUp() override {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
inputs_.push_back(std::make_pair(
"input1", test::AsTensor<int32_t>({1, 1, 1}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input2", test::AsTensor<int32_t>({2, 2, 2}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input3", test::AsTensor<int32_t>({3, 3, 3}, TensorShape{1, 3})));
}
std::unique_ptr<Session> session_;
std::vector<std::pair<std::string, Tensor>> inputs_;
std::vector<std::string> output_tensor_names_{"result1", "result21",
"result31"};
std::vector<std::string> target_node_names_{"result22", "result32"};
};
TEST_F(TfrtSessionTest, NoTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
auto session = absl::WrapUnique(NewSession(options));
ASSERT_TRUE(session != nullptr);
tensorflow::GraphDef graph_def;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
node: {
name: "input"
op: "Placeholder"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
}
node: {
name: "sleep_seconds"
op: "Const"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
attr: {
key: "value"
value: {
tensor: {
tensor_shape: {}
dtype: DT_INT32
int_val: 2
}
}
}
}
node: {
name: "sleep"
op: "SleepIdentityOp"
input: "sleep_seconds:0"
input: "input:0"
attr: {
key: "T"
value: { type: DT_INT32 }
}
})pb"
,
&graph_def));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session->Run(
RunOptions{},
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata));
ASSERT_EQ(outputs.size(), 1);
test::ExpectEqual(outputs[0], test::AsTensor<int32_t>({1}, TensorShape{1}));
RunOptions run_options;
run_options.set_timeout_in_ms(1);
auto status = session->Run(
run_options,
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(), ::testing::HasSubstr("Deadline exceeded"));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyInter) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = nullptr};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyIntra) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = nullptr,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunInCallerThreadSessionOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_inter_op_parallelism_threads(-1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(
{}, {}, {},
{"init"}, nullptr, &run_metadata));
}
TEST_F(TfrtSessionTest, RunInCallerThreadRunOptions) {
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.set_inter_op_thread_pool(-1);
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(run_options, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, IntraOpThreadPoolOptionWarning) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_intra_op_parallelism_threads(1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
}
TEST_F(TfrtSessionTest, Callable) {
CallableOptions callable_options;
std::vector<Tensor> feed_tensors;
for (auto& input : inputs_) {
callable_options.add_feed(input.first);
feed_tensors.emplace_back(input.second);
}
for (auto& output : output_tensor_names_) {
callable_options.add_fetch(output);
}
for (auto& target : target_node_names_) {
callable_options.add_target(target);
}
Session::CallableHandle callable_handle;
TF_ASSERT_OK(session_->MakeCallable(callable_options, &callable_handle));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session_->RunCallable(callable_handle, feed_tensors, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
TF_ASSERT_OK(session_->ReleaseCallable(callable_handle));
}
TEST_F(TfrtSessionTest, WithTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_, target_node_names_,
&outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, CreateWithEmptyGraphIsNoop) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Create(GraphDef()));
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
}
TEST_F(TfrtSessionTest, CreateAgainError) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr(
"A Graph has already been created for this session."));
}
TEST_F(TfrtSessionTest, CreateAfterCloseError) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, ExtendWhenNotCreated) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Extend(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(session_->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", tensorflow::tfrt_stub::CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(tensorflow::tfrt_stub::GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate_ErrorWithStaticGraphOptimization) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_optimize_for_static_graph(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
auto status = session_->Extend(extension);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("Extending the graph is not supported when"));
}
TEST_F(TfrtSessionTest, ExtendAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Extend(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, RunAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::vector<Tensor> outputs;
auto status = session_->Run(inputs_, output_tensor_names_,
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, InitializeTwiceCrashes) {
TfrtSessionOptions options;
auto second_initialize = [](TfrtSessionOptions options) {
auto status = InitializeTfrtSession(options);
TF_ASSERT_OK(status);
};
ASSERT_DEBUG_DEATH(second_initialize(options), "");
}
TEST_F(TfrtSessionTest, GetRuntime) {
auto runtime = TfrtSessionFactory::GetRuntime();
EXPECT_NE(runtime, nullptr);
}
TEST_F(TfrtSessionTest, RegisterTwiceCrashes) {
TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); });
ASSERT_DEBUG_DEATH(TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); }),
"");
}
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
testing::AddGlobalTestEnvironment(new tensorflow::TfrtSessionEnvironment());
return RUN_ALL_TESTS();
} |
1,322 | cpp | tensorflow/tensorflow | cost_recorder | tensorflow/core/tfrt/fallback/cost_recorder.cc | tensorflow/core/tfrt/fallback/cost_recorder_test.cc | #ifndef TENSORFLOW_CORE_TFRT_FALLBACK_COST_RECORDER_H_
#define TENSORFLOW_CORE_TFRT_FALLBACK_COST_RECORDER_H_
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace tfrt_stub {
class CostRecorder {
public:
void RecordCost(int64_t op_key, uint64_t execution_time);
uint64_t GetCost(int64_t op_key) const;
Status WriteToFile() const;
size_t size() const;
static const char* MesuredCostPathEnvVarName() {
return "TF_TFRT_MEASURED_COST_PATH";
}
private:
mutable tensorflow::mutex op_cost_map_mutex_;
absl::flat_hash_map<int64_t, std::pair<uint64_t, uint64_t>> op_cost_map_
TF_GUARDED_BY(op_cost_map_mutex_);
};
}
}
#endif
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace tfrt_stub {
void CostRecorder::RecordCost(int64_t op_key, uint64_t execution_time) {
mutex_lock l(op_cost_map_mutex_);
op_cost_map_[op_key].first += execution_time;
op_cost_map_[op_key].second += 1;
}
uint64_t CostRecorder::GetCost(int64_t op_key) const {
tf_shared_lock l(op_cost_map_mutex_);
const auto iter = op_cost_map_.find(op_key);
if (iter == op_cost_map_.end()) return std::numeric_limits<uint32_t>::max();
const auto total_cost = iter->second.first;
const auto num_ops = iter->second.second;
auto r =
std::max(static_cast<uint64_t>(1),
static_cast<uint64_t>(total_cost / num_ops));
VLOG(2) << "Get cost for op_key=" << op_key << ", cost=" << r;
return r;
}
Status CostRecorder::WriteToFile() const {
OpCostMapProto op_cost_map_proto;
{
tf_shared_lock l(op_cost_map_mutex_);
for (const auto& [op_key, op_cost] : op_cost_map_) {
const uint64_t avg_op_cost = op_cost.first / op_cost.second;
(*op_cost_map_proto.mutable_op_cost_map())[op_key] = avg_op_cost;
}
}
std::string measured_cost_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(MesuredCostPathEnvVarName(), "",
&measured_cost_path));
return tensorflow::WriteTextProto(tensorflow::Env::Default(),
measured_cost_path, op_cost_map_proto);
}
size_t CostRecorder::size() const {
tf_shared_lock l(op_cost_map_mutex_);
return op_cost_map_.size();
}
}
} | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr int64_t kTestOpKey = 1;
constexpr uint64_t kTestCost = 1234;
constexpr uint64_t kTestAvgCost = 1851;
TEST(CostRecorderTest, RecordCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, kTestCost);
EXPECT_EQ(recorder.size(), 1);
}
TEST(CostRecorderTest, GetCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
EXPECT_EQ(recorder.size(), 1);
EXPECT_EQ(recorder.GetCost(kTestOpKey), kTestAvgCost);
}
TEST(CostRecorderTest, GetCostDefaultValueTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
EXPECT_EQ(recorder.GetCost(kTestOpKey),
std::numeric_limits<uint32_t>::max());
}
TEST(CostRecorderTest, WriteToFileTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv("TF_TFRT_MEASURED_COST_PATH", measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map_size(), 0);
}
TEST(CostRecorderTest, ProtoRecordsTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
ASSERT_EQ(recorder.size(), 1);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv(CostRecorder::MesuredCostPathEnvVarName(),
measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map().find(kTestOpKey)->second,
kTestAvgCost);
}
}
}
} |
1,323 | cpp | tensorflow/tensorflow | fallback_state | tensorflow/core/tfrt/fallback/fallback_state.cc | tensorflow/core/tfrt/fallback/fallback_state_test.cc | #ifndef TENSORFLOW_CORE_TFRT_FALLBACK_FALLBACK_STATE_H_
#define TENSORFLOW_CORE_TFRT_FALLBACK_FALLBACK_STATE_H_
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/graph_execution_state.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace tfrt_stub {
class FallbackState {
public:
static absl::StatusOr<std::unique_ptr<FallbackState>> Create(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib);
static absl::StatusOr<std::unique_ptr<FallbackState>> CreateWithCpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib);
static absl::StatusOr<std::unique_ptr<FallbackState>> CreateWithMockGpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib);
FallbackState(const SessionOptions &session_options,
std::vector<std::unique_ptr<Device>> devices,
const tensorflow::FunctionDefLibrary &fdef_lib);
absl::StatusOr<std::unique_ptr<GraphExecutionState>>
CreateGraphExecutionState(GraphDef graph_def, bool run_placer = true) const;
Status AddFunctionDef(const FunctionDef &func_def);
const SessionOptions &session_options() const { return session_options_; }
const DeviceMgr &device_manager() const { return device_manager_; }
DeviceMgr &device_manager() { return device_manager_; }
const DeviceSet &device_set() const { return device_set_; }
const ProcessFunctionLibraryRuntime &process_function_library_runtime()
const {
return pflr_;
}
const FunctionLibraryDefinition &func_lib_def() const {
return func_lib_def_;
}
private:
SessionOptions session_options_;
StaticDeviceMgr device_manager_;
DeviceSet device_set_;
FunctionLibraryDefinition func_lib_def_;
ProcessFunctionLibraryRuntime pflr_;
};
}
}
#endif
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/tpu/virtual_device.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
string DeviceName(absl::string_view name_prefix, absl::string_view device_type,
int32_t task_id, size_t device_id) {
return strings::StrCat(absl::StripSuffix(name_prefix, "0"), task_id,
"/device:", device_type, ":", device_id);
}
DeviceAttributes BuildDeviceAttributes(absl::string_view name_prefix,
const char *device_type, int32_t task_id,
size_t device_id) {
const DeviceAttributes attrs = Device::BuildDeviceAttributes(
DeviceName(name_prefix, device_type, task_id, device_id),
DeviceType(device_type), Bytes(16ULL << 30), DeviceLocality(),
strings::StrCat("device: ", device_type, " device"));
return attrs;
}
}
absl::StatusOr<std::unique_ptr<FallbackState>> FallbackState::Create(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithCpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithMockGpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_attrs =
BuildDeviceAttributes("/job:localhost/replica:0/task:0", "GPU", 0, 0);
devices.push_back(
std::make_unique<VirtualDevice>(session_options.env, device_attrs));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
FallbackState::FallbackState(const SessionOptions &session_options,
std::vector<std::unique_ptr<Device>> devices,
const tensorflow::FunctionDefLibrary &fdef_lib)
: session_options_(session_options),
device_manager_(std::move(devices)),
func_lib_def_(OpRegistry::Global(), fdef_lib),
pflr_(&device_manager_, session_options.env, &session_options.config,
TF_GRAPH_DEF_VERSION, &func_lib_def_,
session_options.config.graph_options().optimizer_options(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64, const DeviceMgr *device_mgr,
tsl::core::RefCountPtr<Rendezvous> *r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}) {
for (auto *d : device_manager_.ListDevices()) {
device_set_.AddDevice(d);
}
device_set_.set_client_device(device_manager_.HostCPU());
}
absl::StatusOr<std::unique_ptr<GraphExecutionState>>
FallbackState::CreateGraphExecutionState(GraphDef graph_def,
bool run_placer) const {
GraphExecutionStateOptions options;
options.device_set = &device_set_;
options.session_options = &session_options_;
options.session_handle = "tfrt_fallback_handle";
options.run_placer = run_placer;
std::unique_ptr<GraphExecutionState> execution_state;
TF_RETURN_IF_ERROR(GraphExecutionState::MakeForBaseGraph(
std::move(graph_def), options, &execution_state));
return execution_state;
}
Status FallbackState::AddFunctionDef(const FunctionDef &func_def) {
return func_lib_def_.AddFunctionDef(func_def);
}
}
} | #include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <utility>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Not;
TEST(FallbackStateTest, CreateRendezvous) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
"dummy_fn",
{},
{},
{},
{});
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::Create({}, flib));
const ProcessFunctionLibraryRuntime& pflr =
fallback_state->process_function_library_runtime();
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:localhost/replica:0/task:0";
opts.remote_execution = true;
auto status = pflr.RunSync(opts, pflr.GetHandle("dummy_fn"), {}, nullptr);
EXPECT_THAT(status, Not(StatusIs(error::FAILED_PRECONDITION,
HasSubstr("rendezvous"))));
}
TEST(FallbackStateTest, CreateGraphExecutionState) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tfrt_stub::FallbackState::CreateWithCpuDevice(session_options, fdef_lib));
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
fallback_state->CreateGraphExecutionState(std::move(graphdef)));
}
TEST(FallbackStateTest, CreateWithMockGpuDevice) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::CreateWithMockGpuDevice(
session_options, fdef_lib));
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDeviceType("GPU"), 0);
}
}
} |
1,324 | cpp | tensorflow/tensorflow | op_kernel_runner | tensorflow/core/tfrt/fallback/op_kernel_runner.cc | tensorflow/core/tfrt/fallback/op_kernel_runner_test.cc | #ifndef TENSORFLOW_CORE_TFRT_FALLBACK_OP_KERNEL_RUNNER_H_
#define TENSORFLOW_CORE_TFRT_FALLBACK_OP_KERNEL_RUNNER_H_
#include <assert.h>
#include <stddef.h>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace tfrt_stub {
class OpKernelRunner {
public:
static absl::StatusOr<OpKernelRunner> Create(
absl::string_view op_name, absl::string_view node_name,
absl::string_view device_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime);
ABSL_DEPRECATED("Please use the Create() method that takes node_name.")
static absl::StatusOr<OpKernelRunner> Create(
absl::string_view op_name, absl::string_view device_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime) {
return Create(op_name, op_name, device_name, num_args,
attr_builder, device_manager,
process_function_library_runtime);
}
static absl::StatusOr<OpKernelRunner> Create(
absl::string_view op_name, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tensorflow::Device* device);
ABSL_DEPRECATED("Please use the Create() method that takes node_name.")
static absl::StatusOr<OpKernelRunner> Create(
absl::string_view op_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tensorflow::Device* device) {
return Create(op_name, op_name, num_args, attr_builder,
process_function_library_runtime, device);
}
OpKernelRunner() = default;
explicit operator bool() const { return op_kernel_ != nullptr; }
void Run(OpKernelContext* context) const {
DVLOG(1) << "KernelFallbackExecuteCompat Running Op: "
<< op_kernel_->def().DebugString()
<< ", on Device: " << context->device()->name();
op_kernel_->Compute(context);
}
void RunAsync(OpKernelContext* context,
AsyncOpKernel::DoneCallback done_callback) const;
bool IsAsync() const { return info_->is_async; }
tensorflow::OpKernel* op_kernel() const { return op_kernel_.get(); }
tensorflow::Device* device() const { return info_->device; }
tensorflow::FunctionLibraryRuntime* function_library_runtime() const {
return info_->function_library_runtime;
}
tensorflow::ResourceMgr* resource_manager() const {
return info_->resource_manager;
}
absl::Span<const AllocatorAttributes> input_alloc_attrs() const {
return input_alloc_attrs_;
}
absl::Span<const AllocatorAttributes> output_alloc_attrs() const {
return output_alloc_attrs_;
}
private:
explicit OpKernelRunner(
tensorflow::Device* device,
tensorflow::FunctionLibraryRuntime* function_library_runtime,
std::unique_ptr<OpKernel> op_kernel);
std::unique_ptr<OpKernel> op_kernel_;
absl::Span<const AllocatorAttributes> input_alloc_attrs_;
absl::Span<const AllocatorAttributes> output_alloc_attrs_;
struct Info {
tensorflow::Device* device = nullptr;
tensorflow::FunctionLibraryRuntime* function_library_runtime = nullptr;
tensorflow::ResourceMgr* resource_manager = nullptr;
bool is_async = false;
absl::InlinedVector<AllocatorAttributes, 4UL> input_alloc_attrs;
absl::InlinedVector<AllocatorAttributes, 1UL> output_alloc_attrs;
};
std::unique_ptr<Info> info_;
};
struct OpKernelRunState {
std::vector<const tensorflow::TensorBuffer*> tensor_buffers;
std::vector<tensorflow::TensorValue> input_tf_tensor_values;
OpKernelContext::Params params;
absl::InlinedVector<tensorflow::Tensor, 4UL> input_tf_tensors;
OpKernelRunState() = default;
OpKernelRunState(absl::Span<const tensorflow::TensorValue> tensor_values,
const OpKernelContext::Params& p,
tensorflow::DeviceBase* device = nullptr) {
input_tf_tensors.reserve(tensor_values.size());
for (const auto& tensor_value : tensor_values) {
input_tf_tensors.push_back(*tensor_value.tensor);
}
for (auto& tensor : input_tf_tensors) {
input_tf_tensor_values.emplace_back(&tensor);
}
params = p;
params.inputs = input_tf_tensor_values;
params.eigen_gpu_device = nullptr;
if (device != nullptr) params.device = device;
}
OpKernelRunState(const OpKernelRunState& other) = delete;
OpKernelRunState& operator=(const OpKernelRunState& other) = delete;
~OpKernelRunState() = default;
};
class OpKernelRunnerTable {
public:
OpKernelRunnerTable() = default;
bool Insert(int64_t index, OpKernelRunner runner) {
if (runners_.size() <= index) runners_.resize(index + 1);
if (runners_[index]) return false;
runners_[index] = std::move(runner);
return true;
}
const OpKernelRunner* Get(int64_t index) const {
CHECK_GT(runners_.size(), index)
<< "runner index is out of bounds: index=" << index
<< " size=" << runners_.size();
CHECK(runners_[index])
<< "runner is not available: index=" << index;
return GetUnsafe(index);
}
const OpKernelRunner* GetUnsafe(int64_t index) const {
DCHECK_GT(runners_.size(), index);
auto& result = runners_[index];
DCHECK(result);
return &result;
}
private:
std::vector<OpKernelRunner> runners_;
};
}
}
#endif
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
Status CheckOpDefCompatibility(const tensorflow::OpDef& op_def) {
auto check_arg_def = [&](const auto& arg_def) {
if (arg_def.is_ref())
return tensorflow::errors::Internal(
"TFRT kernel fallback error: Unsupported ref args in ",
op_def.name());
return absl::OkStatus();
};
for (const auto& arg_def : op_def.input_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
for (const auto& arg_def : op_def.output_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
return absl::OkStatus();
}
absl::StatusOr<tensorflow::NodeDef> BuildNodeDef(
const tensorflow::OpDef& op_def, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder) {
tensorflow::NodeDef node_def;
node_def.set_name(std::string(node_name));
node_def.set_op(op_def.name());
for (int i = 0; i < num_args; ++i) {
node_def.add_input("dummy_input");
}
auto* attr_value_map = node_def.mutable_attr();
TF_RETURN_IF_ERROR(attr_builder(attr_value_map));
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
attr_value_map->insert({attr_def.name(), attr_def.default_value()});
}
}
return node_def;
}
tensorflow::Status CreateOpKernel(
tensorflow::FunctionLibraryRuntime* flr, tensorflow::NodeDef ndef,
std::unique_ptr<tensorflow::OpKernel>* result) {
std::shared_ptr<const tensorflow::NodeProperties> props;
TF_RETURN_IF_ERROR(tensorflow::NodeProperties::CreateFromNodeDef(
std::move(ndef), flr->GetFunctionLibraryDefinition(), &props));
tensorflow::OpKernel* k = nullptr;
TF_RETURN_IF_ERROR(flr->CreateKernel(props, &k));
result->reset(k);
return absl::OkStatus();
}
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name,
absl::string_view device_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime) {
tensorflow::Device* device = nullptr;
Status s = device_manager.LookupDevice(device_name, &device);
if (!s.ok()) {
LOG_EVERY_N_SEC(WARNING, 30)
<< "Failed to find device " << device_name
<< " when creating OpKernel: " << op_name << ". Error: " << s
<< ", fallback to host device instead";
device = device_manager.HostCPU();
}
return Create(op_name, node_name, num_args, attr_builder,
process_function_library_runtime, device);
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tensorflow::Device* device) {
const OpDef* op_def = nullptr;
TF_RETURN_IF_ERROR(tensorflow::OpRegistry::Global()->LookUpOpDef(
std::string(op_name), &op_def));
TF_RETURN_IF_ERROR(CheckOpDefCompatibility(*op_def));
VLOG(1) << "KernelFallbackExecuteCompat creating op from OpDef: "
<< op_def->DebugString();
TF_ASSIGN_OR_RETURN(auto node_def,
BuildNodeDef(*op_def, node_name, num_args, attr_builder));
VLOG(1) << "KernelFallbackExecuteCompat created NodeDef: "
<< node_def.DebugString();
tensorflow::FunctionLibraryRuntime* function_library_runtime = nullptr;
function_library_runtime =
process_function_library_runtime.GetFLR(device->name());
std::unique_ptr<OpKernel> op_kernel;
TF_RETURN_IF_ERROR(CreateOpKernel(function_library_runtime,
std::move(node_def), &op_kernel));
return OpKernelRunner(device, function_library_runtime, std::move(op_kernel));
}
OpKernelRunner::OpKernelRunner(
tensorflow::Device* device,
tensorflow::FunctionLibraryRuntime* function_library_runtime,
std::unique_ptr<tensorflow::OpKernel> op_kernel)
: op_kernel_(std::move(op_kernel)), info_(std::make_unique<Info>()) {
DCHECK(device);
DCHECK(function_library_runtime);
info_->device = device;
info_->function_library_runtime = function_library_runtime;
info_->resource_manager = device->resource_manager();
info_->is_async = (op_kernel_->AsAsync() != nullptr);
const auto& input_memory_types = op_kernel_->input_memory_types();
auto& input_alloc_attrs = info_->input_alloc_attrs;
auto& output_alloc_attrs = info_->output_alloc_attrs;
input_alloc_attrs.resize(op_kernel_->num_inputs());
for (size_t i = 0, e = op_kernel_->num_inputs(); i < e; ++i) {
input_alloc_attrs[i].set_on_host(input_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
const auto& output_memory_types = op_kernel_->output_memory_types();
output_alloc_attrs.resize(op_kernel_->num_outputs());
for (size_t i = 0, e = output_alloc_attrs.size(); i < e; ++i) {
output_alloc_attrs[i].set_on_host(output_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
input_alloc_attrs_ = input_alloc_attrs;
output_alloc_attrs_ = output_alloc_attrs;
}
void OpKernelRunner::RunAsync(OpKernelContext* context,
AsyncOpKernel::DoneCallback done_callback) const {
DVLOG(1) << "KernelFallbackExecuteCompat Running Async Op: "
<< op_kernel_->def().DebugString()
<< ", on Device: " << context->device()->name();
AsyncOpKernel* async = op_kernel_->AsAsync();
DCHECK(async);
async->ComputeAsync(context, std::move(done_callback));
}
}
} | #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner_cache.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::IsNull;
using ::testing::SizeIs;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
constexpr const char* kDeviceType = "GPU";
#else
constexpr const char* kDeviceType = "CPU";
#endif
class TestOpKernel : public OpKernel {
public:
using OpKernel::OpKernel;
~TestOpKernel() override = default;
void Compute(OpKernelContext* context) override {
context->set_output(0, context->input(0));
}
};
REGISTER_KERNEL_BUILDER(Name("TestOp").Device(DEVICE_CPU), TestOpKernel);
REGISTER_OP("TestOp").Input("x: int32").Output("y: int32");
TEST(OpKernelRunnerTest, Create) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
TF_ASSERT_OK_AND_ASSIGN(
auto runner,
OpKernelRunner::Create(
"TestOp", "TestOp_node_name",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner.op_kernel()->name(), "TestOp_node_name");
}
TEST(OpKernelRunnerTest, OpKernelRunnerCache) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
OpKernelRunnerCache cache;
tfrt::Location loc(nullptr, 100);
TF_ASSERT_OK_AND_ASSIGN(
auto* runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
TF_ASSERT_OK_AND_ASSIGN(
runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
}
TEST(OpKernelRunnerTest, OpKernelRunState) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({kDeviceType, 1});
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::GetFactory(kDeviceType)
->CreateDevices(options,
"/job:a/replica:0/task:0",
&devices));
ASSERT_EQ(devices.size(), 1);
OpKernelContext::Params params;
params.device = devices[0].get();
params.ensure_eigen_gpu_device();
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ASSERT_THAT(params.eigen_gpu_device, ::testing::NotNull());
#endif
Tensor a(DT_FLOAT, TensorShape({}));
Tensor b(DT_INT32, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> inputs{TensorValue(&a),
TensorValue(&b)};
params.inputs = inputs;
Tensor c(DT_UINT8, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> new_inputs{TensorValue(&c)};
OpKernelRunState run_state(new_inputs, params);
EXPECT_THAT(run_state.input_tf_tensors, SizeIs(1));
EXPECT_THAT(run_state.input_tf_tensor_values, SizeIs(1));
EXPECT_EQ(run_state.params.inputs.data(),
run_state.input_tf_tensor_values.data());
EXPECT_THAT(run_state.params.eigen_gpu_device, IsNull());
}
}
}
} |
1,325 | cpp | tensorflow/tensorflow | serialize_utils | tensorflow/core/tfrt/saved_model/utils/serialize_utils.cc | tensorflow/core/tfrt/saved_model/utils/serialize_utils_test.cc | #ifndef TENSORFLOW_CORE_TFRT_SAVED_MODEL_UTILS_SERIALIZE_UTILS_H_
#define TENSORFLOW_CORE_TFRT_SAVED_MODEL_UTILS_SERIALIZE_UTILS_H_
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/Support/ToolOutputFile.h"
#include "mlir/Support/FileUtilities.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
absl::Status SerializeBEF(const tfrt::BefBuffer &bef,
const std::string &filepath);
absl::StatusOr<tfrt::BefBuffer> DeserializeBEFBuffer(
const std::string &filepath);
absl::Status SerializeMLRTBytecode(const mlrt::bc::Buffer &byteCode,
const std::string &filepath);
absl::StatusOr<mlrt::bc::Buffer> DeserializeMlrtBytecodeBuffer(
const std::string &filepath);
}
}
#endif
#include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/ToolOutputFile.h"
#include "mlir/Support/FileUtilities.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
absl::Status SerializeBEF(const tfrt::BefBuffer &bef,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os()).write(reinterpret_cast<const char *>(bef.data()), bef.size());
output->keep();
LOG(INFO) << "Completed serializing BEF to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<tfrt::BefBuffer> DeserializeBEFBuffer(
const std::string &filepath) {
std::string data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &data));
tfrt::BefBuffer bef(data.begin(), data.end());
LOG(INFO) << "Successfully loaded serialized BEF from: " << filepath;
return bef;
}
absl::Status SerializeMLRTBytecode(const mlrt::bc::Buffer &bytecode,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os())
.write(reinterpret_cast<const char *>(bytecode.data()), bytecode.size());
output->keep();
LOG(INFO) << "Completed serializing MLRTBytecode to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<mlrt::bc::Buffer> DeserializeMlrtBytecodeBuffer(
const std::string &filepath) {
std::string bytecode_data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &bytecode_data));
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
allocator.Allocate(bytecode_data.length(), alignof(char));
memcpy(buffer.data(), bytecode_data.data(), bytecode_data.length());
LOG(INFO) << "Successfully loaded serialized MLRTBytecode from: " << filepath;
return buffer;
}
}
} | #include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_util.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(SerializeBEFTest, HandlesCompleteProcess) {
tfrt::BefBuffer old_bef;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&old_bef, model_context));
const std::string filepath =
io::JoinPath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
std::string("serialized_bef.mlir.bef"));
TF_ASSERT_OK(tensorflow::tfrt_stub::SerializeBEF(old_bef, filepath));
ASSERT_NE(old_bef.size(), 0);
TF_ASSERT_OK_AND_ASSIGN(const tfrt::BefBuffer bef,
DeserializeBEFBuffer(filepath));
ASSERT_TRUE(old_bef.size() == bef.size());
std::unique_ptr<Runtime> default_runtime =
DefaultTfrtRuntime(1);
SavedModel::Options default_options =
DefaultSavedModelOptions(default_runtime.get());
TF_EXPECT_OK(tfrt::CreateBefFileFromBefBuffer(
*default_options.graph_execution_options.runtime, bef)
.status());
}
TEST(SerializeMLRTTest, HandlesSerializeAndDeserializeProcess) {
mlrt::bc::Buffer old_bytecode;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
mlir::OwningOpRef<mlir::ModuleOp> module_with_op_keys;
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.enable_mlrt = true;
tfrt::ResourceContext resource_context;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK_AND_ASSIGN(
old_bytecode, mlrt_compiler::ConvertTfMlirToBytecode(
options.compile_options, *fallback_state, module.get(),
model_context, &module_with_op_keys));
const std::string aot_package_path =
GetAotPackagePath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"));
tsl::Env* env = tsl::Env::Default();
TF_ASSERT_OK(env->RecursivelyCreateDir(aot_package_path));
const std::string filepath =
io::JoinPath(aot_package_path, std::string("serialized_mlrt.mlir.mlrt"));
TF_ASSERT_OK(
tensorflow::tfrt_stub::SerializeMLRTBytecode(old_bytecode, filepath));
ASSERT_NE(old_bytecode.size(), 0);
mlrt::bc::Buffer bytecode;
TF_ASSERT_OK_AND_ASSIGN(bytecode, DeserializeMlrtBytecodeBuffer(filepath));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
TF_ASSERT_OK_AND_ASSIGN(
bytecode,
LoadMlrtAndMlir(options.compile_options, module_with_op_keys.get(),
getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
fallback_state.get()));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
}
}
}
} |
1,326 | cpp | tensorflow/tensorflow | stream_ops_util | tensorflow/core/tfrt/kernels/stream_ops_util.cc | tensorflow/core/tfrt/kernels/stream_ops_util_test.cc | #ifndef TENSORFLOW_CORE_TFRT_KERNELS_STREAM_OPS_UTIL_H_
#define TENSORFLOW_CORE_TFRT_KERNELS_STREAM_OPS_UTIL_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>>
UnbatchStreamResults(const tensorflow::Tensor& step_ids,
absl::Span<const tensorflow::Tensor> tensors);
}
}
#endif
#include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>>
UnbatchStreamResults(const tensorflow::Tensor& step_ids,
absl::Span<const tensorflow::Tensor> tensors) {
std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>> responses;
if (step_ids.dims() > 0) {
if (step_ids.dtype() != tensorflow::DT_INT64 || step_ids.dims() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected a 1-D int64 tensor for batched step ids but got dtype=",
tensorflow::DataTypeString(step_ids.dtype()),
" shape=", step_ids.shape().DebugString()));
}
const int batch_size = step_ids.dim_size(0);
for (int i = 0; i < tensors.size(); ++i) {
const tensorflow::TensorShape& shape = tensors[i].shape();
if (shape.dims() < 1 || shape.dim_size(0) != batch_size) {
return absl::InvalidArgumentError(absl::StrCat(
"All inputs to PwStreamResults inside tf.batch_function are "
"required to be batched (batch_size=",
batch_size, ") but input #", i, " has shape ",
shape.DebugString()));
}
}
std::vector<int> sizes;
absl::flat_hash_set<int64_t> unique_step_ids;
for (int i = 0; i < step_ids.NumElements(); ++i) {
const int64_t request_id = step_ids.flat<int64_t>()(i);
const int64_t step_id =
static_cast<uint64_t>(request_id) >> (64 - kStepIdBitSize);
VLOG(1) << "PwStreamResults op is unbatching request_id=" << request_id
<< ", step_id=" << step_id;
if (step_id <= 0) {
return absl::InternalError(
absl::StrCat("Invalid step id=", step_id,
"; this usually indicates that `PwStreamResults` "
"was called from an unsupported nested context"));
}
if (i != 0 && request_id == step_ids.flat<int64_t>()(0)) {
break;
}
if (!responses.empty() && responses.back().first == step_id) {
sizes.back()++;
} else {
responses.push_back({step_id, {}});
sizes.push_back(1);
const bool inserted = unique_step_ids.insert(step_id).second;
if (!inserted) {
return absl::InternalError(absl::StrCat(
"Non-contiguous step ids found in the step id batch: ",
step_ids.DebugString(batch_size)));
}
}
}
int offset = 0;
for (int i = 0; i < responses.size(); ++i) {
auto& outputs = responses[i].second;
outputs.resize(tensors.size());
const int limit = offset + sizes[i];
for (int j = 0; j < tensors.size(); ++j) {
outputs[j] = tensors[j].Slice(offset, limit);
}
offset = limit;
}
} else {
const int64_t step_id = step_ids.flat<int64_t>()(0);
if (step_id <= 0) {
return absl::InternalError(
"Invalid step id; this usually indicates that `PwStreamResults` was "
"called from an unsupported nested context");
}
responses.push_back({step_id, std::vector<tensorflow::Tensor>(
tensors.begin(), tensors.end())});
}
return responses;
}
}
} | #include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::tensorflow::test::AsScalar;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::status::IsOkAndHolds;
int64_t RequestId(int64_t step_id, uint32_t id) {
return (step_id << kStepIdBitSize) | id;
}
TEST(UnbatchStreamResultsTest, ScalarStepId) {
const tensorflow::Tensor step_ids = AsScalar<int64_t>(1);
const std::vector<tensorflow::Tensor> tensors = {
AsScalar<int32_t>(1),
AsTensor<int32_t>({2, 3}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsScalar<int32_t>(1)),
TensorEq(AsTensor<int32_t>({2, 3})))))));
}
TEST(UnbatchStreamResultsTest, Batched) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({1, 2, 3, 4}),
AsTensor<int32_t>({5, 6, 7, 8}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({1, 2})),
TensorEq(AsTensor<int32_t>({5, 6})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({3})),
TensorEq(AsTensor<int32_t>({7})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({4})),
TensorEq(AsTensor<int32_t>({8})))))));
}
TEST(UnbatchStreamResultsTest, BatchedUnordered) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(2, 0), RequestId(1, 0), RequestId(1, 1), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({20, 10, 10, 30}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 10})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({20})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
TEST(UnbatchStreamResultsTest, PaddingOneExample) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 0), RequestId(1, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 10, 10, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10})))))));
}
TEST(UnbatchStreamResultsTest, PaddingMultipleExamples) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 20, 30, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 20})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
}
}
} |
1,327 | cpp | tensorflow/tensorflow | ifrt_program_ops | tensorflow/core/tfrt/kernels/ifrt_program_ops.cc | tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc | #ifndef TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#define TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace tfrt_stub {
class IfrtCallOp : public tensorflow::OpKernel {
public:
explicit IfrtCallOp(tensorflow::OpKernelConstruction* ctx);
IfrtCallOp(const IfrtCallOp& other) = delete;
IfrtCallOp& operator=(const IfrtCallOp& other) = delete;
void Compute(tensorflow::OpKernelContext* ctx) override;
private:
int64_t program_id_;
std::vector<std::string> variable_names_;
std::vector<int> variable_arg_indices_;
absl::once_flag init_once_;
tensorflow::ifrt_serving::IfrtServingExecutable* executable_;
};
}
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace tfrt_stub {
REGISTER_OP("IfrtCall")
.Input("args: Tin")
.Output("results: Tout")
.Attr("Tin: list(type) >= 0")
.Attr("Tout: list(type) >= 0")
.Attr("program_id: int")
.Attr("variable_arg_indices: list(int)")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
Calls an IFRT program identified by the given program id.
This op looks up a `ServingExecutable` from `ServingExecutableRegistry` using
the program id, calls the executable with the op's inputs as arguments, and
returns its results as the op's outputs.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
program_id: int64 id that can be used to look up compiled programs from
ServingExecutableRegistry`.
variable_arg_indices: must be in sorted ascending order. The argument at position
variable_arg_indices[k] in tpu program is already loaded as an ifrt array and
the input `args[variable_arg_indices[k]]` is the key to look for this loaded array.
)");
REGISTER_OP("IfrtLoadVariable")
.Input("variable: Tin")
.Output("array_key: Tout")
.Output("tensor: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.Attr("used_by_host: bool")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
This op loads a restored variable tensor as a tensor future. It is areplacement of `tf.ReadVariableOp`.
This op returns a scalar string tensor containing the restored variable name, which
is composed from `container_name` and `shared_name` from a `var_handle` and can be
used as a key within the runtime, as well as a future for the tensor.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
variable: the variable handle of the variable tensor to be loaded.
array_key: the key to be used to look up the loaded array by the 'IfrtCall' op.
tensor: the future of the loaded tensor. The future contains a valid tensor if `use_by_host` is true.
'used_by_host': a boolean indicating whether the variable is used by the host OP
or excelusively by the TPU.
)");
}
} | #include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using tensorflow::ifrt_serving::ServingExecutableRegistry;
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper;
using tensorflow::test::AsTensor;
using tensorflow::test::TensorEq;
using ::testing::Return;
class IfrtCallOpTest : public OpsTestBase {
protected:
Status Init(int64_t program_id, int num_inputs, DataType input_type,
const std::vector<int>& variable_arg_indices,
const std::vector<DataType>& output_type_list) {
TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall")
.Input(FakeInput(num_inputs, input_type))
.Attr("program_id", program_id)
.Attr("variable_arg_indices", variable_arg_indices)
.Attr("Tout", output_type_list)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IfrtCallOpTest, Basic) {
int64_t program_id = 123;
TF_ASSERT_OK(Init(
program_id,
2,
DT_INT32,
{},
{DT_INT32}));
tsl::test_util::MockServingDeviceSelector selector;
IfrtServingExecutableTestHelper helper(&selector);
EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
TF_ASSERT_OK_AND_ASSIGN(
ServingExecutableRegistry::Handle handle,
ServingExecutableRegistry::Register(program_id, std::move(executable)));
auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); });
AddInputFromArray<int32_t>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<int32_t>(TensorShape({3, 1}), {1, 2, 3});
for (int i = 0; i < helper.num_cores() + 1; ++i) {
TF_ASSERT_OK(RunOpKernel());
}
Tensor expected_out = AsTensor<int32_t>({14}, TensorShape({1, 1}));
EXPECT_THAT(*GetOutput(0), TensorEq(expected_out));
}
}
}
} |
1,328 | cpp | tensorflow/tensorflow | graph_executor | tensorflow/core/tfrt/graph_executor/graph_executor.cc | tensorflow/core/tfrt/graph_executor/graph_executor_test.cc | #ifndef TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#define TENSORFLOW_CORE_TFRT_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tfrt/backend_compiler.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/executable_context.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/sync_resource_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/stream.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include "tsl/lib/monitoring/sampler.h"
#include "tsl/platform/thread_annotations.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_executor/bef_file.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/ref_count.h"
namespace tensorflow {
namespace tfrt_stub {
struct RequestInfo {
tfrt::RCReference<tfrt::RequestContext> tfrt_request_context;
std::unique_ptr<WorkQueueInterface> request_queue_owner;
WorkQueueInterface* request_queue = nullptr;
std::function<void(std::function<void()>)> runner;
tensorflow::CancellationManager cancellation_manager;
};
struct SymbolUids {
std::string tf_symbol_uid;
std::string tfrt_symbol_uid;
};
absl::StatusOr<std::unique_ptr<RequestInfo>> CreateRequestInfo(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, FallbackState& fallback_state,
const ProcessFunctionLibraryRuntime& process_function_library_runtime,
CostRecorder* cost_recorder = nullptr);
tensorflow::Status GraphExecutionRunOnFunction(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
absl::string_view signature_name, const SymbolUids& symbol_uids,
const tfrt::Function* func, const mlrt::LoadedExecutable* loaded_executable,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, const Runtime& runtime,
FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tfrt::RequestDeadlineTracker* req_deadline_tracker,
std::optional<StreamCallbackId> stream_callback_id,
CostRecorder* cost_recorder = nullptr);
tensorflow::Status RunMlrtFunction(
mlrt::bc::Function function,
const mlrt::LoadedExecutable& loaded_executable,
const tsl::RCReference<tfrt::RequestContext>& request_context,
tfrt::ConcurrentWorkQueue& work_queue,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
SyncResourceState* sync_resource_state);
class GraphExecutor {
public:
using Options = GraphExecutionOptions;
using RunOptions = GraphExecutionRunOptions;
class LoadedClientGraph {
public:
LoadedClientGraph(std::string name, SymbolUids symbol_uids,
GraphExecutor* graph_executor,
std::unique_ptr<mlir::MLIRContext> mlir_context,
mlir::OwningOpRef<mlir::ModuleOp> tf_mlir_with_op_keys,
mlir::OwningOpRef<mlir::ModuleOp> tfrt_mlir,
std::shared_ptr<ExecutableContext> executable_context,
std::optional<StreamCallbackId> stream_callback_id,
bool is_restore, FunctionLibraryDefinition flib_def,
tsl::monitoring::SamplerCell* latency_sampler);
CostRecorder* MaybeGetCostRecorder(absl::Time now, bool* do_recompilation);
Status UpdateCost(const CostRecorder& cost_recorder,
const Runtime& runtime);
void UpdateCostAnalysisData(absl::Time now, bool do_recompilation);
std::shared_ptr<ExecutableContext> executable_context() const {
tensorflow::mutex_lock lock(executable_context_mu_);
return executable_context_;
}
absl::string_view name() const { return name_; }
const SymbolUids& symbol_uids() const { return symbol_uids_; }
OpKernelRunnerTable& runner_table() { return runner_table_; }
tfd::FallbackResourceArray& resource_array() { return resource_array_; }
SyncResourceState& sync_resource_state() { return sync_resource_state_; }
std::optional<StreamCallbackId> stream_callback_id() const {
return stream_callback_id_;
}
bool is_restore() const { return is_restore_; }
const ProcessFunctionLibraryRuntime& process_function_library_runtime()
const {
return pflr_;
}
tsl::monitoring::SamplerCell* latency_sampler() { return latency_sampler_; }
private:
std::string name_;
SymbolUids symbol_uids_;
GraphExecutor* graph_executor_ = nullptr;
std::unique_ptr<mlir::MLIRContext> mlir_context_;
struct CostAnalysisData {
mutable tensorflow::mutex mu;
bool is_available TF_GUARDED_BY(mu) = false;
std::unique_ptr<CostRecorder> cost_recorder;
mlir::OwningOpRef<mlir::ModuleOp> tf_mlir_with_op_keys;
mlir::OwningOpRef<mlir::ModuleOp> tfrt_mlir;
absl::Time start_time TF_GUARDED_BY(mu) = absl::Now();
int num_cost_updates TF_GUARDED_BY(mu) = 0;
};
CostAnalysisData cost_analysis_data_;
OpKernelRunnerTable runner_table_;
tfd::FallbackResourceArray resource_array_;
mutable tensorflow::mutex executable_context_mu_;
std::shared_ptr<ExecutableContext> executable_context_
TF_GUARDED_BY(executable_context_mu_);
SyncResourceState sync_resource_state_;
std::optional<StreamCallbackId> stream_callback_id_;
bool is_restore_;
FunctionLibraryDefinition flib_def_;
ProcessFunctionLibraryRuntime pflr_;
tsl::monitoring::SamplerCell* latency_sampler_;
};
struct ClientGraph {
std::string name;
tensorflow::GraphImportConfig::InputArrays input_nodes;
std::vector<std::string> output_nodes;
std::vector<std::string> target_nodes;
};
static absl::StatusOr<std::unique_ptr<GraphExecutor>> Create(
Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
tensorflow::GraphDef graph_def,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry);
GraphExecutor(Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
std::unique_ptr<tensorflow::tfrt_stub::TfrtGraphExecutionState>
graph_execution_state,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry);
tensorflow::Status Run(
const RunOptions& run_options,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
std::vector<tensorflow::Tensor>* outputs);
tensorflow::Status RunWithSyncInterpreter(
const std::string& graph_name, absl::Span<mlrt::Value> input_values,
absl::Span<const std::string> input_names,
absl::Span<const tensorflow::DataType> input_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
absl::Span<mlrt::Value> outputs);
tensorflow::Status Extend(const GraphDef& graph);
tensorflow::tfrt_stub::TfrtGraphExecutionState& graph_execution_state()
const {
return *graph_execution_state_;
}
const tensorflow::tfrt_stub::Runtime& runtime() const {
DCHECK(options_.runtime);
return *options_.runtime;
}
tfrt::ResourceContext& resource_context() { return *resource_context_; }
const Options& options() const { return options_; }
const FallbackState& fallback_state() const { return *fallback_state_; }
FallbackState& fallback_state() { return *fallback_state_; }
tensorflow::Status CompileGraph(
const std::string& graph_name,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names);
const mlrt::KernelRegistry& kernel_registry() const {
return *kernel_registry_;
}
private:
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
LoadClientGraph(
const GraphExecutor::ClientGraph& client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs);
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
ImportAndCompileClientGraph(
const GraphExecutor::ClientGraph& client_graph,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs);
absl::StatusOr<
std::pair<FunctionLibraryDefinition, mlir::OwningOpRef<mlir::ModuleOp>>>
ImportClientGraphToMlirModule(const GraphExecutor::ClientGraph& client_graph,
mlir::MLIRContext* context) const;
absl::StatusOr<tfrt::BefBuffer> CompileMlirModuleToBef(
mlir::ModuleOp module) const;
tensorflow::Status InitBef(
LoadedClientGraph* loaded_client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue);
tensorflow::Status InitBytecode(LoadedClientGraph* loaded_graph);
absl::StatusOr<std::reference_wrapper<GraphExecutor::LoadedClientGraph>>
GetOrCreateLoadedClientGraph(
const RunOptions& run_options,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::string_view graph_name = "",
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs = {})
TF_LOCKS_EXCLUDED(loaded_client_graphs_mu_);
Options options_;
std::unique_ptr<FallbackState> fallback_state_;
std::unique_ptr<tensorflow::tfrt_stub::TfrtGraphExecutionState>
graph_execution_state_;
tfrt::RequestDeadlineTracker req_deadline_tracker_;
tensorflow::mutex loaded_client_graphs_mu_;
absl::flat_hash_map<std::string ,
std::unique_ptr<LoadedClientGraph>>
loaded_client_graphs_ TF_GUARDED_BY(loaded_client_graphs_mu_);
std::unique_ptr<mlrt::KernelRegistry> kernel_registry_;
std::unique_ptr<tfrt::ResourceContext> resource_context_;
protected:
absl::Duration simulated_duration_ = absl::ZeroDuration();
tensorflow::mutex num_recompilations_mu_;
int num_recompilations_ TF_GUARDED_BY(num_recompilations_mu_) = 0;
};
void RegisterMlirDialect(mlir::DialectRegistry& registry,
tensorflow::BackendCompiler* backend_compiler);
}
}
#endif
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_utils.h"
#include "tensorflow/core/tfrt/common/metrics.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/executable_context.h"
#include "tensorflow/core/tfrt/graph_executor/export_mlir.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/sync_resource_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/step_id.h"
#include "tensorflow/core/tfrt/runtime/stream.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/stubs/tfrt_native_lowering_stub.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/lib/monitoring/sampler.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/chain.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/support/string_util.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr char kDeadlineExceededMessage[] = "Deadline exceeded.";
constexpr char kTensorNameJoiningDelimiter[] = "-";
constexpr char kArgumentTypeJoiningDelimiter[] = "^";
constexpr char kFallbackInitFunction[] = "_tfrt_fallback_init";
constexpr char kResourceInitFunction[] = "_tfrt_resource_init";
StepId GetNextStepId() {
static StepIdGenerator gen;
return gen.GetNextStepId();
}
auto* graph_executor_mode = monitoring::Gauge<std::string, 2>::New(
"/tfrt/graph_executor/mode",
"Record the total number of imported savedmodel using different graph "
"executor modes (BEF vs MLRT interpreter)",
"model_name", "model_version");
}
tensorflow::Status RunMlrtFunction(
mlrt::bc::Function function,
const mlrt::LoadedExecutable& loaded_executable,
const tsl::RCReference<tfrt::RequestContext>& request_context,
tfrt::ConcurrentWorkQueue& work_queue,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
SyncResourceState* sync_resource_state) {
DCHECK(function);
const auto* fallback_request_state =
request_context->GetDataIfExists<tfd::KernelFallbackCompatRequestState>();
DCHECK(fallback_request_state);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(&work_queue);
tfrt::ExecutionContext exec_ctx(request_context);
AddSyncContext(execution_context, *request_context->host(),
sync_resource_state);
execution_context.AddUserContext(std::make_unique<tf_mlrt::Context>(
fallback_request_state, request_context->resource_context(),
request_context->cancellation_context().get()));
execution_context.AddUserErrorLogger(
[fallback_request_state](absl::Status status) {
if (fallback_request_state) {
LOG(ERROR) << "Model "
<< fallback_request_state->session_metadata().name()
<< " version "
<< fallback_request_state->session_metadata().version()
<< " has error: " << status;
}
});
absl::InlinedVector<mlrt::Value, 4> mlrt_inputs;
mlrt_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
mlrt_inputs.emplace_back(FallbackTensor(input));
}
absl::InlinedVector<mlrt::Value, 4> mlrt_outputs(
function.output_regs().size());
tsl::RCReference<tsl::AsyncValue> chain =
tsl::MakeConstructedAsyncValueRef<tsl::Chain>();
execution_context.set_exit_handler(
[chain = chain.get()]() { chain->SetStateConcrete(); });
execution_context.CallByMove(function, absl::MakeSpan(mlrt_inputs),
absl::MakeSpan(mlrt_outputs));
work_queue.AddTask(
[&execution_context]() { mlrt::Execute(execution_context); });
work_queue.Await(chain);
if (!execution_context.status().ok()) {
outputs->resize(mlrt_outputs.size(), tensorflow::Tensor());
return execution_context.status();
}
for (auto& mlrt_output : mlrt_outputs) {
DCHECK(mlrt_output.HasValue());
outputs->push_back(std::move(mlrt_output.Get<FallbackTensor>().tensor()));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<RequestInfo>> CreateRequestInfo(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array,
tensorflow::tfrt_stub::FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
CostRecorder* cost_recorder) {
auto request_info = std::make_unique<RequestInfo>();
DCHECK(options.runtime);
const Runtime& runtime = *options.runtime;
int64_t request_id = 0;
if (work_queue != nullptr) {
request_id = work_queue->id();
if (request_id == 0) request_id = GetNextStepId().id;
request_info->request_queue = work_queue;
} else {
request_id = GetNextStepId().id;
TF_ASSIGN_OR_RETURN(request_info->request_queue_owner,
runtime.CreateRequestQueue(request_id));
request_info->request_queue = request_info->request_queue_owner.get();
}
auto* request_queue = request_info->request_queue;
request_info->runner = [request_queue](std::function<void()> f) {
request_queue->AddTask(std::move(f));
};
tfrt::RequestContextBuilder request_context_builder(
runtime.core_runtime()->GetHostContext(), resource_context, request_id);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
request_context_builder.context_data()
.emplace<tfd::KernelFallbackCompatRequestState>(
&request_info->runner, &fallback_state.device_manager(),
request_context_builder.id(), runner_table, resource_array,
request_queue->GetIntraOpThreadPool(), options.model_metadata,
&process_function_library_runtime);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_runtime_config(&options.runtime_config);
fallback_request_state.set_cancellation_manager(
&request_info->cancellation_manager);
tfrt::RequestOptions request_options;
request_options.priority = run_options.priority;
request_context_builder.set_request_options(request_options);
auto expected_req_ctx = std::move(request_context_builder).build();
if (!expected_req_ctx) {
return tensorflow::errors::Internal(
tfrt::StrCat(expected_req_ctx.takeError()));
}
request_info->tfrt_request_context = std::move(expected_req_ctx.get());
return request_info;
}
tensorflow::Status GraphExecutionRunOnFunction(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
absl::string_view signature_name, const SymbolUids& symbol_uids,
const tfrt::Function* func, const mlrt::LoadedExecutable* loaded_executable,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, const Runtime& runtime,
FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tfrt::RequestDeadlineTracker* req_deadline_tracker,
std::optional<StreamCallbackId> stream_callback_id,
CostRecorder* cost_recorder) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options, run_options, run_options.work_queue,
resource_context, client_graph_resource_context,
runner_table, resource_array, fallback_state,
process_function_library_runtime, cost_recorder));
int64_t request_id = request_info->tfrt_request_context->id();
tsl::profiler::TraceMe traceme(
[request_id, signature_name, &options, symbol_uids] {
return tsl::profiler::TraceMeEncode(
"TfrtModelRun",
{{"_r", 1},
{"id", request_id},
{"signature", signature_name},
{"model_id", absl::StrCat(options.model_metadata.name(), ":",
options.model_metadata.version())},
{"tf_symbol_ | #include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "learning/brain/experimental/tfrt/native_lowering/kernels/math_kernels.h"
#include "learning/brain/experimental/tfrt/native_lowering/kernels/sync_fallback_kernels.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/tensor/dense_host_tensor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::status::StatusIs;
class GraphExecutorForTestingCostAnalysis : public GraphExecutor {
public:
int num_recompilations() {
tensorflow::mutex_lock lock(num_recompilations_mu_);
return num_recompilations_;
}
void AdvanceTime(absl::Duration duration) {
simulated_duration_ = simulated_duration_ + duration;
}
};
class GraphExecutorTest : public ::testing::TestWithParam<bool> {};
tensorflow::Status GetSimpleGraphDef(GraphDef& graph_def) {
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
return scope.ToGraphDef(&graph_def);
}
std::unique_ptr<mlrt::KernelRegistry> GetKernelRegistry() {
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tfrt::cpu::RegisterMlrtMathKernels(kernel_registry.get());
tfrt::cpu::RegisterMlrtFallbackCompatKernels(kernel_registry.get());
return kernel_registry;
}
TEST_P(GraphExecutorTest, Vanilla) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisOptionsOverrideToOnce) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_online_cost_analysis = true;
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_EQ(graph_executor->num_recompilations(), 0);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisEveryTime) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), i + 1);
}
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisDisabled) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 0);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisPeriodic) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::Minutes(10);
options.cost_analysis_options.updates_per_interval = 5;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
}
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 3);
}
REGISTER_OP("TestCancel")
.Input("x: T")
.Output("z: T")
.Attr("T: {int32}")
.SetShapeFn(::tensorflow::shape_inference::UnchangedShape);
class TestCancelKernel : public OpKernel {
public:
explicit TestCancelKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
auto status = absl::CancelledError();
ctx->cancellation_manager()->StartCancelWithStatus(status);
ctx->SetStatus(status);
}
};
REGISTER_KERNEL_BUILDER(Name("TestCancel").Device(DEVICE_CPU),
TestCancelKernel);
REGISTER_OP("TestIsCancelled").Output("z: T").Attr("T: {bool}").SetIsStateful();
class TestIsCancelledKernel : public OpKernel {
public:
explicit TestIsCancelledKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
ctx->set_output(
0, tensorflow::Tensor(ctx->cancellation_manager()->IsCancelled()));
}
};
REGISTER_KERNEL_BUILDER(Name("TestIsCancelled").Device(DEVICE_CPU),
TestIsCancelledKernel);
TEST_P(GraphExecutorTest, Cancellation) {
GraphDef graph_def;
tensorflow::GraphDefBuilder builder(
tensorflow::GraphDefBuilder::kFailImmediately);
const tensorflow::TensorShape tensor_shape({10, 9});
tensorflow::Node* input = tensorflow::ops::SourceOp(
"Placeholder", builder.opts()
.WithName("input")
.WithAttr("dtype", tensorflow::DT_INT32)
.WithAttr("shape", tensor_shape));
tensorflow::ops::SourceOp("TestIsCancelled",
builder.opts()
.WithName("is_cancelled")
.WithAttr("T", tensorflow::DT_BOOL));
tensorflow::ops::UnaryOp("TestCancel", input,
builder.opts()
.WithName("test_cancel")
.WithAttr("T", tensorflow::DT_INT32));
TF_ASSERT_OK(builder.ToGraphDef(&graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
{
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_THAT(graph_executor->Run({}, inputs,
{"test_cancel:0"},
{}, &outputs),
StatusIs(absl::StatusCode::kCancelled));
}
{
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, {},
{"is_cancelled:0"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<bool>(outputs[0]),
::testing::ElementsAreArray({false}));
}
}
INSTANTIATE_TEST_SUITE_P(GraphExecutorTestSuite, GraphExecutorTest,
::testing::Bool());
TEST_F(GraphExecutorTest, Extend) {
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
auto session_options = CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
session_options, graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(graph_executor->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, DisableCompilation) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
GraphExecutor::RunOptions run_options;
run_options.disable_compilation = true;
auto status = graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("GraphExecutor: compilation is disabled in "
"execution but the compiled graph is not found"));
run_options.disable_compilation = false;
TF_ASSERT_OK(graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, SyncExecute) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.compile_options.compile_to_sync_tfrt_dialect = true;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<mlrt::Value> inputs;
tfrt::DenseHostTensor dht =
tfrt::CreateTensorFromValues<int32_t>({1, 3}, {1, 1, 1});
inputs.emplace_back(std::move(dht));
std::vector<mlrt::Value> results;
results.resize(1);
TF_ASSERT_OK(graph_executor->RunWithSyncInterpreter(
"test_graph", absl::Span<mlrt::Value>(inputs),
{"input"}, {DT_INT32},
{"rank"},
{}, absl::Span<mlrt::Value>(results)));
tfrt::DenseHostTensor expected =
tfrt::CreateTensorFromValues<int32_t>({}, {2});
EXPECT_EQ(expected, results[0].Get<tfrt::DenseHostTensor>());
}
}
}
} |
1,329 | cpp | tensorflow/tensorflow | ifrt_restore_tensor_registry | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_RESTORE_TENSOR_REGISTRY_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_RESTORE_TENSOR_REGISTRY_H_
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace ifrt_serving {
class IfrtRestoreTensorRegistry {
public:
struct RestoredTensorInfo {
bool used_by_host = false;
DtypeAndShape dtype_and_shape;
xla::ifrt::Future<tensorflow::Tensor> tensor_future;
};
absl::Status TryRegister(absl::string_view name,
RestoredTensorInfo restored_tensor_info)
ABSL_LOCKS_EXCLUDED(mutex_);
xla::ifrt::Future<tensorflow::Tensor> GetRestoredTensor(
absl::string_view name) const ABSL_LOCKS_EXCLUDED(mutex_);
absl::Status SetUsedByHost(absl::string_view name)
ABSL_LOCKS_EXCLUDED(mutex_);
absl::StatusOr<DtypeAndShape> GetDtypeAndShape(absl::string_view name) const
ABSL_LOCKS_EXCLUDED(mutex_);
void Freeze() ABSL_LOCKS_EXCLUDED(mutex_);
private:
mutable absl::Mutex mutex_;
absl::flat_hash_map<std::string, RestoredTensorInfo> restored_tensors_
ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace ifrt_serving {
absl::Status IfrtRestoreTensorRegistry::TryRegister(
absl::string_view name, RestoredTensorInfo restored_tensor_info) {
absl::MutexLock lock(&mutex_);
auto& info = restored_tensors_[name];
if (info.tensor_future.IsValid()) {
return absl::AlreadyExistsError(
absl::StrCat("Variable '", name, "' already registered."));
}
info = std::move(restored_tensor_info);
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor>
IfrtRestoreTensorRegistry::GetRestoredTensor(absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return xla::ifrt::Future<tensorflow::Tensor>(
absl::NotFoundError(absl::StrCat("Variable '", name, "' not found.")));
}
return it->second.tensor_future;
}
absl::Status IfrtRestoreTensorRegistry::SetUsedByHost(absl::string_view name) {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
it->second.used_by_host = true;
return absl::OkStatus();
}
void IfrtRestoreTensorRegistry::Freeze() {
absl::MutexLock lock(&mutex_);
xla::ifrt::Future<tensorflow::Tensor> release_tensor_future(
absl::UnavailableError("Tensor is already release."));
for (auto& [name, info] : restored_tensors_) {
if (!info.used_by_host) {
info.tensor_future = release_tensor_future;
}
}
}
absl::StatusOr<DtypeAndShape> IfrtRestoreTensorRegistry::GetDtypeAndShape(
absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
return it->second.dtype_and_shape;
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
using tsl::testing::IsOk;
using tsl::testing::StatusIs;
namespace tensorflow {
namespace ifrt_serving {
namespace {
TEST(IfrtRestoreTensorRegistryTest, RetrieveNonRegisteredTensorFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest,
RetrieveNonRegisteredTensorDTypeAndShapeFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetDtypeAndShape("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, SetNonExistedTensorAsUsedByHostFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredExistedTensorFails) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtRestoreTensorRegistryTest, SetTensorAsUsedByHost) {
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"), IsOk());
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredTensorCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_1").Await());
test::ExpectEqual(retrieved, input_tensor);
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest,
RegisteredTensorDTypeAndShapeCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest, FeezeTensorRegistry) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise1 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future1 = xla::ifrt::Future<tensorflow::Tensor>(promise1);
auto promise2 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future2 = xla::ifrt::Future<tensorflow::Tensor>(promise2);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info1 = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future1};
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info2 = {
.used_by_host = true,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future2};
IfrtRestoreTensorRegistry registry;
TF_ASSERT_OK(registry.TryRegister("input_tensor_1", restored_tensor_info1));
TF_ASSERT_OK(registry.TryRegister("input_tensor_2", restored_tensor_info2));
promise1.Set(input_tensor);
promise2.Set(input_tensor);
registry.Freeze();
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kUnavailable));
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_2").Await());
test::ExpectEqual(retrieved, input_tensor);
}
}
}
} |
1,330 | cpp | tensorflow/tensorflow | ifrt_serving_core_selector | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_CORE_SELECTOR_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_CORE_SELECTOR_H_
#include <cstdint>
#include "absl/container/flat_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/framework/serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
class IfrtServingCoreSelector {
public:
explicit IfrtServingCoreSelector(tsl::ServingDeviceSelector* device_selector,
int num_cores);
tsl::DeviceReservation ReserveDevice(int64_t program_id);
private:
tsl::ServingDeviceSelector* device_selector_;
absl::Mutex mu_;
absl::flat_hash_map<int64_t, int64_t> run_counter_ ABSL_GUARDED_BY(mu_);
int num_cores_;
};
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/framework/serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
IfrtServingCoreSelector::IfrtServingCoreSelector(
tsl::ServingDeviceSelector* device_selector, int num_cores)
: device_selector_(device_selector), num_cores_(num_cores) {}
tsl::DeviceReservation IfrtServingCoreSelector::ReserveDevice(
int64_t program_id) {
absl::MutexLock lock(&mu_);
int64_t run_count = run_counter_[program_id]++;
if (run_count < num_cores_) {
return tsl::DeviceReservation(run_count, nullptr);
}
return device_selector_->ReserveDevice(absl::StrCat(program_id));
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class IfrtServingCoreSelectorTest : public ::testing::Test {
protected:
explicit IfrtServingCoreSelectorTest() {
core_selector_ = std::make_unique<IfrtServingCoreSelector>(
&serving_device_selector_, num_cores_);
}
tsl::test_util::MockServingDeviceSelector serving_device_selector_;
std::unique_ptr<IfrtServingCoreSelector> core_selector_;
int num_cores_ = 2;
};
TEST_F(IfrtServingCoreSelectorTest, ReservedDevicesReturns) {
int64_t program_id1 = 111111;
EXPECT_CALL(serving_device_selector_,
ReserveDevice(absl::StrCat(program_id1)))
.WillOnce([this](::testing::Unused) {
return tsl::DeviceReservation(0, &serving_device_selector_);
});
for (int i = 0; i < num_cores_; ++i) {
EXPECT_THAT(core_selector_->ReserveDevice(program_id1).device_index(), i);
}
tsl::DeviceReservation reservation =
core_selector_->ReserveDevice(program_id1);
EXPECT_THAT(reservation.device_index(), 0);
}
}
}
} |
1,331 | cpp | tensorflow/tensorflow | ifrt_loaded_variable_utils | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.cc | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_LOADED_VARIABLE_UTILS_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_LOADED_VARIABLE_UTILS_H_
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/client.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
inline constexpr int kNoCoreSelectedIndex = -1;
absl::StatusOr<ifrt_serving::DtypeAndShape> GetDtypeAndShape(
const ResourceHandle& resource_handle);
std::string GetRuntimeNameFromVarHandle(const ResourceHandle& handle);
absl::Status AsyncLoadRestoredTensorAsIfrtLoadedVariable(
absl::string_view runtime_name,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry,
ifrt_serving::IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
const VariableDeviceShardingConfigProto& sharding_config);
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> LoadIfrtVariable(
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const tensorflow::Tensor& variable,
const VariableDeviceShardingConfigProto& sharding_config) {
std::vector<int> device_ids{sharding_config.device_ids().begin(),
sharding_config.device_ids().end()};
TF_ASSIGN_OR_RETURN(xla::HloSharding hlo_sharding,
xla::HloSharding::FromProto(sharding_config.sharding()));
return tensorflow::ifrt_serving::MakeArrayFromTensor(
*ifrt_client, variable, sharding_config.device_ids(), hlo_sharding,
thread_pool);
}
}
absl::StatusOr<ifrt_serving::DtypeAndShape> GetDtypeAndShape(
const ResourceHandle& resource_handle) {
const std::vector<DtypeAndPartialTensorShape>& dtype_and_partial_shapes =
resource_handle.dtypes_and_shapes();
if (dtype_and_partial_shapes.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1 dtype and shape, got ", dtype_and_partial_shapes.size()));
}
ifrt_serving::DtypeAndShape dtype_and_shape;
if (!dtype_and_partial_shapes.front().shape.AsTensorShape(
&dtype_and_shape.shape)) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to convert partial shape to full tensor shape: ",
dtype_and_partial_shapes.front().shape.DebugString()));
}
dtype_and_shape.dtype = dtype_and_partial_shapes.front().dtype;
return dtype_and_shape;
}
std::string GetRuntimeNameFromVarHandle(const ResourceHandle& handle) {
return absl::StrCat(handle.container(), "__", handle.name());
}
absl::Status AsyncLoadRestoredTensorAsIfrtLoadedVariable(
absl::string_view runtime_name,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry,
ifrt_serving::IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
const VariableDeviceShardingConfigProto& sharding_config) {
absl::flat_hash_set<int> device_ids{sharding_config.device_ids().begin(),
sharding_config.device_ids().end()};
IfrtLoadedVariableRegistry::Key loaded_variable_key{
.device_ids = std::move(device_ids),
.input_name = std::string(runtime_name),
};
if (ifrt_loaded_variable_registry.GetLoadedVariable(loaded_variable_key)
.ok()) {
VLOG(1) << "Found alread registered variable for " << runtime_name;
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
if (!restored_tensor_future.IsValid()) {
return absl::InternalError(absl::StrCat(
"LoadVariableOp: failed to fetch variable tensor: ", runtime_name));
}
auto loaded_variable_promise =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>::CreatePromise();
auto loaded_variable_future =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>(
loaded_variable_promise);
TF_ASSIGN_OR_RETURN(
absl::StatusOr<ifrt_serving::DtypeAndShape> dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(runtime_name));
TF_RETURN_IF_ERROR(ifrt_loaded_variable_registry.TryRegisterLoadedVariable(
loaded_variable_key,
[&]() -> absl::StatusOr<
ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable> {
return ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable(
{.array = loaded_variable_future});
}));
restored_tensor_future.OnReady(
[ifrt_client = std::move(ifrt_client), &thread_pool = thread_pool,
checkpoint_loader_queue = checkpoint_loader_queue,
sharding_config = sharding_config,
loaded_variable_promise = std::move(loaded_variable_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
loaded_variable_promise.Set(restored_tensor.status());
return;
}
checkpoint_loader_queue->AddTask(
[ifrt_client = ifrt_client, &thread_pool = thread_pool,
sharding_config = std::move(sharding_config),
restored_tensor = std::move(*restored_tensor),
loaded_variable_promise =
std::move(loaded_variable_promise)]() mutable {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
variable_array =
LoadIfrtVariable(ifrt_client, thread_pool,
restored_tensor, sharding_config);
loaded_variable_promise.Set(std::move(variable_array));
});
});
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::test::TensorEq;
using tsl::testing::StatusIs;
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableNotFoundWrongName) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfigProto sharding_config;
sharding_config.add_device_ids(0);
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(restored_tensor_registry.TryRegister("var_x_wrong",
restored_tensor_info));
promise.Set(input_tensor);
EXPECT_THAT(
AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableSucceed) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfigProto sharding_config;
sharding_config.add_device_ids(0);
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(
restored_tensor_registry.TryRegister("var_x", restored_tensor_info));
TF_ASSERT_OK(AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config));
promise.Set(input_tensor);
IfrtLoadedVariableRegistry::Key key{
.device_ids = {0},
.input_name = "var_x",
};
TF_ASSERT_OK_AND_ASSIGN(auto v,
loaded_variable_registry.GetLoadedVariable(key));
TF_ASSERT_OK_AND_ASSIGN(auto assembled_array, v.array.Await());
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
assembled_array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
tensorflow::Tensor host_tensor(input_tensor.dtype(), input_tensor.shape());
TF_ASSERT_OK(
disassembled_arrays[i]
->CopyToHostBuffer(host_tensor.data(), {},
xla::ifrt::ArrayCopySemantics::kAlwaysCopy)
.Await());
EXPECT_THAT(host_tensor, TensorEq(input_tensor));
}
}
}
}
} |
1,332 | cpp | tensorflow/tensorflow | ifrt_executable_registry | tensorflow/core/tfrt/ifrt/ifrt_executable_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_EXECUTABLE_REGISTRY_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_EXECUTABLE_REGISTRY_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
class ServingExecutableRegistry {
public:
class Handle {
public:
Handle();
Handle(Handle&& other);
Handle& operator=(Handle&& other);
Handle(const Handle&) = delete;
Handle& operator=(const Handle&) = delete;
~Handle();
std::optional<int64_t> program_id() const { return program_id_; }
void Release();
absl::Status Freeze();
private:
friend class ServingExecutableRegistry;
explicit Handle(int64_t program_id);
std::optional<int64_t> program_id_;
};
static absl::StatusOr<Handle> Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable);
static IfrtServingExecutable* Lookup(int64_t program_id);
private:
friend class Handle;
static absl::Mutex mu_;
static absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>* const
executables_ ABSL_GUARDED_BY(&mu_);
};
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
ServingExecutableRegistry::Handle::Handle(Handle&& other) {
*this = std::move(other);
}
ServingExecutableRegistry::Handle& ServingExecutableRegistry::Handle::operator=(
Handle&& other) {
if (this != &other) {
program_id_ = std::move(other.program_id_);
other.program_id_ = std::nullopt;
}
return *this;
}
ServingExecutableRegistry::Handle::~Handle() { Release(); }
absl::Status ServingExecutableRegistry::Handle::Freeze() {
if (!program_id_.has_value()) {
return absl::FailedPreconditionError("Program is not registered");
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
return absl::NotFoundError(
absl::StrCat("Program ", *program_id_, " not found in the registry"));
}
VLOG(1) << "Freeze the program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
it->second->Freeze();
return absl::OkStatus();
}
void ServingExecutableRegistry::Handle::Release() {
if (!program_id_.has_value()) {
return;
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
LOG(ERROR) << "Program " << *program_id_ << " not found in the registry";
return;
}
VLOG(1) << "Unregistering program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
ServingExecutableRegistry::executables_->erase(it);
program_id_ = std::nullopt;
}
ServingExecutableRegistry::Handle::Handle(int64_t program_id)
: program_id_(program_id) {}
absl::StatusOr<ServingExecutableRegistry::Handle>
ServingExecutableRegistry::Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable) {
absl::MutexLock l(&mu_);
VLOG(1) << "Registering program " << program_id << " from signature '"
<< executable->signature_name() << "' of model '"
<< executable->model_name() << "'"
<< ", address is " << executable.get();
if (!executables_->insert({program_id, std::move(executable)}).second) {
return absl::AlreadyExistsError(absl::StrCat(
"Program ", program_id, " already exists in the program registry"));
}
return Handle(program_id);
}
IfrtServingExecutable* ServingExecutableRegistry::Lookup(int64_t program_id) {
absl::ReaderMutexLock l(&mu_);
VLOG(1) << "Looking up program " << program_id;
const auto it = executables_->find(program_id);
return it != executables_->end() ? it->second.get() : nullptr;
}
ABSL_CONST_INIT absl::Mutex ServingExecutableRegistry::mu_(absl::kConstInit);
absl::flat_hash_map<int64_t, std::unique_ptr<IfrtServingExecutable>>* const
ServingExecutableRegistry::executables_ =
new absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>();
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
const tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static auto* const thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
CreateIfrtServingExecutable(mlir::MLIRContext& context, int64_t program_id) {
constexpr absl::string_view kDataDirectory =
"tensorflow/core/tfrt/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/executable.mlir"));
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
if (!mlir_module) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to parse MLIR file: ", mlir_module_path));
}
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
IfrtLoadedVariableRegistry ifrt_loaded_variable_registry;
IfrtRestoreTensorRegistry ifrt_restore_tensor_registry;
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSIGN_OR_RETURN(std::unique_ptr<tensorflow::StaticDeviceMgr> device_mgr,
CreateTfStaticDeviceMgr());
return IfrtServingExecutable::Create(
program_id, "test", "main", std::move(mlir_module), client,
&GetThreadPool(), &ifrt_loaded_variable_registry,
&ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(),
tensorflow::IdentityShapeRepresentationFn(),
nullptr,
nullptr);
}
TEST(IfrtExecutableRegistry, Basic) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, DuplicateRegistrationFails) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
EXPECT_THAT(
ServingExecutableRegistry::Register(program_id, std::move(executable)),
testing::StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtExecutableRegistry, ReleaseOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_EQ(ServingExecutableRegistry::Lookup(program_id), nullptr);
}
TEST(IfrtExecutableRegistry, FreezeOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
ASSERT_OK(handle.Freeze());
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, FreezeFailedProgramNotRegistered) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_THAT(handle.Freeze(),
testing::StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(IfrtExecutableRegistry, InvalidProgramIdShallReturnNull) {
int64_t program_id = 1234;
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, nullptr);
}
}
}
} |
1,333 | cpp | tensorflow/tensorflow | sharding_utils | tensorflow/core/tfrt/ifrt/sharding_utils.cc | tensorflow/core/tfrt/ifrt/sharding_utils_test.cc | #ifndef TENSORFLOW_CORE_TPU_KERNELS_SHARDING_UTILS_H_
#define TENSORFLOW_CORE_TPU_KERNELS_SHARDING_UTILS_H_
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace sharding_internal {
absl::Status ValidateShapesForSlice(absl::string_view input_name,
const Tensor* input,
const std::vector<int32_t>& num_splits,
const std::vector<int32_t>& paddings);
template <int Rank>
Eigen::DSizes<Eigen::DenseIndex, Rank> TF_ATTRIBUTE_NOINLINE
ShapeAsEigenDSizes(const TensorShape& shape);
template <int Rank>
Eigen::DSizes<Eigen::DenseIndex, Rank> ShapeAsEigenDSizes(
const TensorShape& shape) {
return shape.AsEigenDSizes<Rank>();
}
}
template <int Rank>
Eigen::DSizes<Eigen::DenseIndex, Rank> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, Rank>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 1> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 1>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 2> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 2>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 3> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 4> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 4>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 5> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 5>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 6> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 6>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 7> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 7>& slice_shape, int index);
template <>
Eigen::DSizes<Eigen::DenseIndex, 8> TF_ATTRIBUTE_NOINLINE GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 8>& slice_shape, int index);
template <int Rank>
Eigen::DSizes<Eigen::DenseIndex, Rank> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, Rank>& slice_shape,
const int index) {
return Eigen::DSizes<Eigen::DenseIndex, Rank>();
}
template <typename Device, typename T>
class XlaNDSplitter {
public:
static absl::StatusOr<XlaNDSplitter<Device, T>> Create(
const std::vector<int32_t>& num_splits, int num_slices,
const std::vector<int32_t>& paddings, bool has_paddings) {
if (num_splits.size() != paddings.size()) {
return absl::InvalidArgumentError(
absl::StrCat("num_splits size ", num_splits.size(),
" mismatch with paddings size ", paddings.size(), "."));
}
int splits_cnt = 1;
for (auto split : num_splits) {
splits_cnt *= split;
}
if (num_slices != splits_cnt) {
return absl::InvalidArgumentError(absl::StrCat(
"Expect num_slices ", splits_cnt, " but got ", num_slices));
}
return XlaNDSplitter<Device, T>(num_splits, num_slices, paddings,
has_paddings);
}
absl::Status Split(
const Tensor* input, absl::string_view input_name,
const std::function<Status(const Tensor&)>& assign_or_copy_value_fn,
const std::function<Status(int index, const TensorShape& shape,
Tensor** tensor)>& allocate_output_fn,
const Device& device) {
if (num_splits_.size() != paddings_.size()) {
return absl::InvalidArgumentError(
absl::StrCat("num_splits size ", num_splits_.size(),
" mismatch with paddings size ", paddings_.size(), "."));
}
const int rank = input->shape().dims();
const auto& input_shape = input->shape().dim_sizes();
TF_RETURN_IF_ERROR(sharding_internal::ValidateShapesForSlice(
input_name, input, num_splits_, paddings_));
TensorShape output_slice_shape;
for (int i = 0; i < rank; ++i) {
output_slice_shape.AddDim((input_shape[i] + paddings_[i]) /
((num_slices_ == 1) ? 1 : num_splits_[i]));
}
if (num_slices_ == 1 && !has_paddings_) {
TF_RETURN_IF_ERROR(assign_or_copy_value_fn(*input));
} else {
std::vector<Tensor*> output_slices(num_slices_);
for (int i = 0; i < num_slices_; i++) {
TF_RETURN_IF_ERROR(allocate_output_fn(
i, output_slice_shape, &output_slices[i]));
}
if (rank == 1) {
SliceAndMaybePad<1>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 2) {
SliceAndMaybePad<2>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 3) {
SliceAndMaybePad<3>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 4) {
SliceAndMaybePad<4>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 5) {
SliceAndMaybePad<5>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 6) {
SliceAndMaybePad<6>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 7) {
SliceAndMaybePad<7>(device, input, input_shape, output_slice_shape,
output_slices);
} else if (rank == 8) {
SliceAndMaybePad<8>(device, input, input_shape, output_slice_shape,
output_slices);
}
}
return absl::OkStatus();
}
private:
template <int Rank>
class SliceAndMaybePadState {
public:
int num_complete_pad_dims_;
int num_partial_pad_dims_;
TensorShape non_padded_slice_shape_;
Eigen::array<Eigen::IndexPair<int64_t>, Rank> slice_paddings_;
Eigen::DSizes<Eigen::DenseIndex, Rank> slice_indices_;
Eigen::DSizes<Eigen::DenseIndex, Rank> output_slice_shape_dsizes_;
Eigen::DSizes<Eigen::DenseIndex, Rank> non_padded_slice_shape_dsizes_;
TF_ATTRIBUTE_NOINLINE SliceAndMaybePadState(
absl::Span<const int32_t> num_splits,
const absl::Span<const int64_t> input_shape,
const TensorShape& output_slice_shape, int slice_index) {
output_slice_shape_dsizes_ =
sharding_internal::ShapeAsEigenDSizes<Rank>(output_slice_shape);
num_complete_pad_dims_ = 0;
num_partial_pad_dims_ = 0;
slice_indices_ = GetSliceIndices<Rank>(
num_splits, output_slice_shape_dsizes_, slice_index);
for (int dim = 0; dim < Rank; ++dim) {
const int64_t dim_size = input_shape[dim];
const int64_t out_dim = output_slice_shape_dsizes_[dim];
int64_t non_padded_dim = 0;
if (slice_indices_[dim] >= dim_size) {
slice_indices_[dim] = dim_size;
non_padded_dim = 0;
slice_paddings_[dim] = {0, out_dim};
num_complete_pad_dims_++;
} else if (slice_indices_[dim] + out_dim > dim_size) {
non_padded_dim = dim_size - slice_indices_[dim];
slice_paddings_[dim] = {0, out_dim - non_padded_dim};
num_partial_pad_dims_++;
} else {
non_padded_dim = out_dim;
}
non_padded_slice_shape_.AddDim(non_padded_dim);
}
non_padded_slice_shape_dsizes_ =
sharding_internal::ShapeAsEigenDSizes<Rank>(non_padded_slice_shape_);
}
};
std::vector<int32_t> num_splits_;
int num_slices_;
std::vector<int32_t> paddings_;
bool has_paddings_;
explicit XlaNDSplitter(const std::vector<int32_t>& num_splits, int num_slices,
const std::vector<int32_t>& paddings,
bool has_paddings)
: num_splits_(num_splits),
num_slices_(num_slices),
paddings_(paddings),
has_paddings_(has_paddings) {}
void TF_ATTRIBUTE_NOINLINE SetToConstant(Tensor* output_slice,
const Device& device) {
auto output_flat = output_slice->flat<T>();
output_flat.device(device) = output_flat.constant(T());
}
template <int Rank>
void TF_ATTRIBUTE_NOINLINE AssignFromInput(
Tensor* output_slice, const Device& device, const Tensor* input,
const Eigen::DSizes<Eigen::DenseIndex, Rank>& slice_indices,
const Eigen::DSizes<Eigen::DenseIndex, Rank>& output_slice_shape_dsizes) {
output_slice->tensor<T, Rank>().device(device) =
input->tensor<T, Rank>().slice(slice_indices,
output_slice_shape_dsizes);
}
template <int Rank>
void TF_ATTRIBUTE_NOINLINE
SliceAndMaybePad(const Device& device, const Tensor* input,
const absl::Span<const int64_t> input_shape,
const TensorShape& output_slice_shape,
const std::vector<Tensor*>& output_slices) {
const auto& input_tensor = input->tensor<T, Rank>();
for (int i = 0; i < num_slices_; ++i) {
Tensor* output_slice = output_slices[i];
SliceAndMaybePadState<Rank> r(num_splits_, input_shape,
output_slice_shape, i);
if (r.num_complete_pad_dims_ == Rank ||
(r.num_complete_pad_dims_ > 0 || r.num_partial_pad_dims_ > 0)) {
SetToConstant(output_slice, device);
}
if (r.num_complete_pad_dims_ == Rank) {
} else if (r.num_complete_pad_dims_ > 0 || r.num_partial_pad_dims_ > 0) {
output_slice->tensor<T, Rank>()
.slice(Eigen::DSizes<Eigen::DenseIndex, Rank>(),
r.non_padded_slice_shape_dsizes_)
.device(device) = input_tensor.slice(
r.slice_indices_, r.non_padded_slice_shape_dsizes_);
} else {
AssignFromInput<Rank>(output_slice, device, input, r.slice_indices_,
r.output_slice_shape_dsizes_);
}
}
}
};
template <typename Device, typename T>
class XlaNDConcatenator {
public:
static absl::StatusOr<XlaNDConcatenator<Device, T>> Create(
const std::vector<int32_t>& num_concats, int num_slices,
const std::vector<int32_t>& paddings, bool has_paddings) {
if (num_concats.size() != paddings.size()) {
return absl::InvalidArgumentError(
absl::StrCat("num_concats size ", num_concats.size(),
" mismatch with paddings size ", paddings.size(), "."));
}
int concats_cnt = 1;
for (auto concat : num_concats) {
concats_cnt *= concat;
}
if (num_slices != concats_cnt) {
return absl::InvalidArgumentError(absl::StrCat(
"Expect num_slices ", concats_cnt, " but got ", num_slices));
}
return XlaNDConcatenator<Device, T>(num_concats, num_slices, paddings,
has_paddings);
}
absl::Status ComputeInternal(
absl::Span<const Tensor> inputs,
const std::function<Status(const Tensor&)>& assign_or_copy_value_fn,
const std::function<absl::StatusOr<Tensor*>()>& get_output_fn,
const Device& device) {
const int rank = inputs[0].shape().dims();
if (rank < 1 || rank > 8) {
return absl::InvalidArgumentError(absl::StrCat(
"'inputs' tensors must have rank in range (0, 8], but got ", rank,
"."));
}
if (num_slices_ == 1 && !has_paddings_) {
return assign_or_copy_value_fn(inputs[0]);
}
TF_ASSIGN_OR_RETURN(Tensor * output, get_output_fn());
if (rank == 1) {
MaybeUnpadAndAssign<1>(device, inputs, output);
} else if (rank == 2) {
MaybeUnpadAndAssign<2>(device, inputs, output);
} else if (rank == 3) {
MaybeUnpadAndAssign<3>(device, inputs, output);
} else if (rank == 4) {
MaybeUnpadAndAssign<4>(device, inputs, output);
} else if (rank == 5) {
MaybeUnpadAndAssign<5>(device, inputs, output);
} else if (rank == 6) {
MaybeUnpadAndAssign<6>(device, inputs, output);
} else if (rank == 7) {
MaybeUnpadAndAssign<7>(device, inputs, output);
} else if (rank == 8) {
MaybeUnpadAndAssign<8>(device, inputs, output);
}
return absl::OkStatus();
}
private:
template <int Rank>
class MaybeUnpadAndAssignState {
public:
int num_complete_pad_dims_;
int num_partial_pad_dims_;
TensorShape non_padded_slice_shape_;
Eigen::DSizes<Eigen::DenseIndex, Rank> slice_shape_dsizes_;
Eigen::array<Eigen::IndexPair<int64_t>, Rank> slice_paddings_;
Eigen::DSizes<Eigen::DenseIndex, Rank> slice_indices_;
Eigen::DSizes<Eigen::DenseIndex, Rank> output_slice_shape_dsizes_;
Eigen::DSizes<Eigen::DenseIndex, Rank> non_padded_slice_shape_dsizes_;
TF_ATTRIBUTE_NOINLINE MaybeUnpadAndAssignState(
absl::Span<const int32_t> num_concats, const Tensor& input0,
Tensor* output, int slice_index) {
slice_shape_dsizes_ = input0.shape().AsEigenDSizes<Rank>();
slice_indices_ =
GetSliceIndices<Rank>(num_concats, slice_shape_dsizes_, slice_index);
num_complete_pad_dims_ = 0;
num_partial_pad_dims_ = 0;
for (int dim = 0; dim < Rank; ++dim) {
const int64_t dim_size = output->shape().dim_size(dim);
int64_t non_padded_dim = 0;
if (slice_indices_[dim] >= dim_size) {
slice_indices_[dim] = dim_size;
non_padded_dim = 0;
num_complete_pad_dims_++;
} else if (slice_indices_[dim] + slice_shape_dsizes_[dim] > dim_size) {
non_padded_dim = dim_size - slice_indices_[dim];
num_partial_pad_dims_++;
} else {
non_padded_dim = slice_shape_dsizes_[dim];
}
non_padded_slice_shape_.AddDim(non_padded_dim);
}
non_padded_slice_shape_dsizes_ =
non_padded_slice_shape_.AsEigenDSizes<Rank>();
}
};
std::vector<int32_t> num_concats_;
int num_slices_;
std::vector<int32_t> paddings_;
bool has_paddings_;
explicit TF_ATTRIBUTE_NOINLINE XlaNDConcatenator(
const std::vector<int32_t>& num_concats, int num_slices,
const std::vector<int32_t>& paddings, bool has_paddings)
: num_concats_(num_concats),
num_slices_(num_slices),
paddings_(paddings),
has_paddings_(has_paddings) {}
template <int Rank>
void TF_ATTRIBUTE_NOINLINE MaybeUnpadAndAssign(
const Device& device, absl::Span<const Tensor> inputs, Tensor* output) {
for (int i = 0; i < num_slices_; ++i) {
MaybeUnpadAndAssignState<Rank> r(num_concats_, inputs[0], output, i);
if (r.num_complete_pad_dims_ == Rank) {
continue;
} else if (r.num_complete_pad_dims_ > 0 || r.num_partial_pad_dims_ > 0) {
output->tensor<T, Rank>()
.slice(r.slice_indices_, r.non_padded_slice_shape_dsizes_)
.device(device) = inputs[i].tensor<T, Rank>().slice(
Eigen::DSizes<Eigen::DenseIndex, Rank>(),
r.non_padded_slice_shape_dsizes_);
} else {
output->tensor<T, Rank>()
.slice(r.slice_indices_, r.slice_shape_dsizes_)
.device(device) = inputs[i].tensor<T, Rank>();
}
}
}
};
}
#endif
#include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tensorflow {
namespace sharding_internal {
absl::Status ValidateShapesForSlice(absl::string_view input_name,
const Tensor* input,
const std::vector<int32_t>& num_splits,
const std::vector<int32_t>& paddings) {
const auto& ishape = input->shape();
Status s;
const int rank = ishape.dims();
const auto& input_shape = ishape.dim_sizes();
if (rank <= 0 || rank > 8) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " must have rank in range (0, 8], but got ", rank, "."));
} else if (rank != num_splits.size()) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " rank must be the same as 'num_splits' length ",
num_splits.size(), ", but got rank ", rank, "."));
} else {
for (int dim = 0; dim < rank; ++dim) {
const auto input_shape_dim = input_shape[dim];
const auto paddings_dim = paddings[dim];
const auto num_splits_dim = num_splits[dim];
if ((input_shape_dim + paddings_dim) % num_splits_dim != 0) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " shape dimension ", dim, " (", input_shape_dim,
") with padding ", paddings_dim,
" must be evenly divisible by 'num_splits' ", num_splits_dim, "."));
break;
}
}
}
return s;
}
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 1> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 1>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 1> subscript;
subscript[0] = index * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 2> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 2>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 2> subscript;
subscript[1] = (index % num_partitions[1]) * slice_shape[1];
subscript[0] = (index / num_partitions[1]) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 3> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 3> subscript;
subscript[2] = (index % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / num_partitions[2]) % num_partitions[1]) * slice_shape[1];
subscript[0] =
(index / (num_partitions[2] * num_partitions[1])) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 4> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 4>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 4> subscript;
subscript[3] = (index % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / num_partitions[3]) % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / (num_partitions[3] * num_partitions[2])) % num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 5> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 5>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 5> subscript;
subscript[4] = (index % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / num_partitions[4]) % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / (num_partitions[4] * num_partitions[3])) % num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] = (index / (num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 6> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 6>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 6> subscript;
subscript[5] = (index % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / num_partitions[5]) % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / (num_partitions[5] * num_partitions[4])) % num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[5] * num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] = ((index / (num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[5] * num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 7> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 7>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 7> subscript;
subscript[6] = (index % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / num_partitions[6]) % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / (num_partitions[6] * num_partitions[5])) % num_partitions[4]) *
slice_shape[4];
subscript[3] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] = ((index / (num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 8> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 8>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 8> subscript;
subscript[7] = (index % num_partitions[7]) * slice_shape[7];
subscript[6] =
((index / num_partitions[7]) % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / (num_partitions[7] * num_partitions[6])) % num_partitions[5]) *
slice_shape[5];
subscript[4] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5])) %
num_partitions[4]) *
slice_shape[4];
subscript[3] = ((index / (num_partitions[7] * num_partitions[6] *
num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2] *
num_partitions[1])) *
slice_shape[0];
return subscript;
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace {
Eigen::ThreadPoolDevice CreateThreadPoolDevice() {
constexpr int kMaxParallelism = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "Resharding", kMaxParallelism);
Eigen::ThreadPoolDevice device(thread_pool->AsEigenThreadPool(),
kMaxParallelism);
return device;
}
TEST(XlaNDSplitterTest, NoSplits) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings(num_splits.size(), 0);
const int num_outputs = 1;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
false)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, NoSplitsWithPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings = {0, 1, 1};
const int num_outputs = 1;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(3 * 3 * 3);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 0, 0, 0, 1, 0, 0, 0},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, SplitNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({4, 4});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings(num_splits.size(), 0);
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 4, 5}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 3, 6, 7}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({8, 9, 12, 13}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({10, 11, 14, 15}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({3, 3});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
const int num_outputs = 4;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 3, 4}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 0, 5, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({6, 7, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({8, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {2, 3};
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 0, 1, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDConcatenatorTest, NoConcats) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const TensorShape output_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 1;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDConcatenatorTest, ConcatNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({4, 4});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 5, 2, 3, 6, 7, 8, 9,
12, 13, 10, 11, 14, 15},
TensorShape({4, 4})));
}
TEST(XlaNDConcatenatorTest, ConcatPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({3, 3});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {1, 1};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 2, 3, 6, 8, 9, 12},
TensorShape({3, 3})));
}
TEST(XlaNDConcatenatorTest, ConcatCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({2, 2});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {2, 2};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 2, 3}, TensorShape({2, 2})));
}
}
} |
1,334 | cpp | tensorflow/tensorflow | tf_host_callback | tensorflow/core/tfrt/ifrt/tf_host_callback.cc | tensorflow/core/tfrt/ifrt/tf_host_callback_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_TF_HOST_CALLBACK_H_
#define TENSORFLOW_CORE_TFRT_IFRT_TF_HOST_CALLBACK_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace ifrt_serving {
class TfHostCallback {
public:
static absl::StatusOr<std::unique_ptr<TfHostCallback>> Create(
absl::Span<const tensorflow::FunctionDef> functions,
absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shapes,
tensorflow::DeviceMgr* device_mgr);
absl::Status Call(void** inputs, void** outputs);
private:
TfHostCallback(absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shape,
tensorflow::EagerContextPtr ctx)
: ctx_(std::move(ctx)),
entry_function_name_(entry_function_name),
operand_type_and_shapes_(operand_type_and_shapes.begin(),
operand_type_and_shapes.end()),
result_type_and_shapes_(result_type_and_shape.begin(),
result_type_and_shape.end()) {}
tensorflow::EagerContextPtr ctx_;
std::string entry_function_name_;
std::vector<DtypeAndShape> operand_type_and_shapes_;
std::vector<DtypeAndShape> result_type_and_shapes_;
};
absl::StatusOr<std::unique_ptr<tensorflow::StaticDeviceMgr>>
CreateTfStaticDeviceMgr();
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using RefCountHandle = ::tsl::core::RefCountPtr<tensorflow::TensorHandle>;
size_t GetSizeInBytes(const tensorflow::Tensor& tensor) {
return tensor.shape().num_elements() * DataTypeSize(tensor.dtype());
}
tensorflow::Tensor GetTensor(const DtypeAndShape& dtype_and_shape, void* src) {
DCHECK(DataTypeCanUseMemcpy(dtype_and_shape.dtype));
tensorflow::Tensor t(dtype_and_shape.dtype, dtype_and_shape.shape);
std::memcpy(t.data(), src, GetSizeInBytes(t));
return t;
}
void CopyToBuffer(void* dst, const tensorflow::Tensor& tensor) {
DCHECK(DataTypeCanUseMemcpy(tensor.dtype()));
std::memcpy(dst, tensor.data(), GetSizeInBytes(tensor));
}
}
absl::Status TfHostCallback::Call(void** inputs, void** outputs) {
tsl::profiler::TraceMe trace_me("TfHostCallback::Call");
tensorflow::ImmediateOpPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(
op->Reset(entry_function_name_.c_str(), nullptr));
ctx_->StartStep();
absl::Cleanup cleanup_step = [this]() { ctx_->EndStep(); };
for (int i = 0; i < operand_type_and_shapes_.size(); ++i) {
tensorflow::Tensor t = GetTensor(operand_type_and_shapes_[i], inputs[i]);
RefCountHandle handle(tensorflow::down_cast<tensorflow::TensorHandle*>(
ctx_->CreateLocalHandleFromTFTensor(t, nullptr)));
TF_RETURN_IF_ERROR(op->AddInput(handle.get()));
}
int num_outputs = result_type_and_shapes_.size();
absl::FixedArray<tensorflow::AbstractTensorHandle*> output_raw_handles(
num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::MakeSpan(output_raw_handles), &num_outputs));
std::vector<RefCountHandle> output_handles;
output_handles.reserve(num_outputs);
for (auto* output_raw_handle : output_raw_handles) {
output_handles.emplace_back(
tensorflow::down_cast<tensorflow::TensorHandle*>(output_raw_handle));
}
if (result_type_and_shapes_.size() != num_outputs) {
return absl::InternalError(absl::StrCat(
"TF host callback invocation expected ", result_type_and_shapes_.size(),
" results, instead got ", num_outputs));
}
for (int i = 0; i < num_outputs; ++i) {
const tensorflow::Tensor* tensor;
TF_RETURN_IF_ERROR(output_handles[i]->Tensor(&tensor));
CopyToBuffer(outputs[i], *tensor);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<TfHostCallback>> TfHostCallback::Create(
absl::Span<const tensorflow::FunctionDef> functions,
absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shapes,
tensorflow::DeviceMgr* device_mgr) {
tensorflow::SessionOptions options;
options.config.add_device_filters("/device:CPU:*");
DCHECK(device_mgr != nullptr);
tensorflow::EagerContextPtr ctx(new tensorflow::EagerContext(
options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr,
false,
nullptr,
nullptr,
nullptr,
true));
for (const tensorflow::FunctionDef& function : functions) {
TF_RETURN_IF_ERROR(ctx->AddFunctionDef(function));
}
return absl::WrapUnique(
new TfHostCallback(entry_function_name, operand_type_and_shapes,
result_type_and_shapes, std::move(ctx)));
}
absl::StatusOr<std::unique_ptr<tensorflow::StaticDeviceMgr>>
CreateTfStaticDeviceMgr() {
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddCpuDevices(
tensorflow::SessionOptions(), "/job:localhost/replica:0/task:0",
&devices));
return std::make_unique<tensorflow::StaticDeviceMgr>(std::move(devices));
}
}
} | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
absl::StatusOr<tensorflow::FunctionDef> ToFunctionDef(
tensorflow::Scope scope, const std::string& function_name) {
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::GraphToFunctionDef(*graph, function_name, &function_def));
return function_def;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddOneFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
auto const0_value = tensorflow::test::AsScalar<float>(1);
auto const0 =
tensorflow::ops::Const(scope.WithOpName("const0"),
tensorflow::Input::Initializer(const0_value));
auto add0 = tensorflow::ops::Add(scope.WithOpName("add0"), arg0, const0);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add0, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>>
MakeAddOneWithCallFunctionDef(const std::string& function_name) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
MakeAddOneFunctionDef("add"));
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
tensorflow::NameAttrList f;
f.set_name("add");
auto call = tensorflow::ops::StatefulPartitionedCall(
scope.WithOpName("call"), {arg0.output}, {tensorflow::DT_FLOAT}, f);
auto retval0 = tensorflow::ops::_Retval(scope.WithOpName("retval0"),
call.output[0], 0);
}
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
ToFunctionDef(std::move(scope), function_name));
return function_defs;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAssignVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
arg0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
auto read = tensorflow::ops::ReadVariableOp(scope.WithOpName("read"), var,
tensorflow::DT_INT32);
auto add = tensorflow::ops::Add(scope.WithOpName("add"), read, arg0);
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
add);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
TEST(TfHostCallbackTest, Simple) {
ASSERT_OK_AND_ASSIGN(auto function_defs,
MakeAddOneWithCallFunctionDef("main"));
auto in = AsTensor<float>({2.5f}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back({.dtype = in.dtype(), .shape = in.shape()});
auto out = AsTensor<float>({0.0f}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back({.dtype = out.dtype(), .shape = out.shape()});
ASSERT_OK_AND_ASSIGN(auto device_mgr, CreateTfStaticDeviceMgr());
ASSERT_OK_AND_ASSIGN(auto tf_host_callback,
tensorflow::ifrt_serving::TfHostCallback::Create(
function_defs, "main", in_dtype_shapes,
out_dtype_shapes, device_mgr.get()));
ASSERT_OK(tf_host_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out,
TensorEq(AsTensor<float>({3.5f}, tensorflow::TensorShape({1}))));
}
TEST(TfHostCallbackTest, SharedState) {
tensorflow::ConfigProto session_config;
ASSERT_OK_AND_ASSIGN(auto state, CreateTfStaticDeviceMgr());
std::unique_ptr<TfHostCallback> assign_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAssignVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
ASSERT_OK_AND_ASSIGN(
assign_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
std::unique_ptr<TfHostCallback> incr_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAddVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
ASSERT_OK_AND_ASSIGN(
incr_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
constexpr int32_t kInit = 2;
{
auto in = AsTensor<int32_t>({kInit}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
void* out_ptrs[0];
ASSERT_OK(assign_callback->Call(in_ptrs, out_ptrs));
}
for (int i = 0; i < 3; ++i) {
auto in = AsTensor<int32_t>({1}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
auto out = AsTensor<int32_t>({0}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
ASSERT_OK(incr_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out, TensorEq(AsTensor<int32_t>({kInit + i + 1},
tensorflow::TensorShape({1}))));
}
}
}
}
} |
1,335 | cpp | tensorflow/tensorflow | ifrt_serving_executable | tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_EXECUTABLE_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_SERVING_EXECUTABLE_H_
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
class IfrtServingExecutable {
public:
static absl::StatusOr<std::unique_ptr<IfrtServingExecutable>> Create(
int64_t program_id, absl::string_view model_name,
absl::string_view signature_name,
mlir::OwningOpRef<mlir::ModuleOp> module,
std::shared_ptr<xla::ifrt::Client> client,
const tsl::thread::ThreadPool* thread_pool,
IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry,
const IfrtRestoreTensorRegistry* ifrt_restore,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
tensorflow::DeviceMgr* device_mgr,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn,
IfrtServingCoreSelector* ifrt_serving_core_selector,
tsl::protobuf::Message* compilation_environment_proto);
IfrtServingExecutable(IfrtServingExecutable&& other) = default;
IfrtServingExecutable& operator=(IfrtServingExecutable&& other) = default;
IfrtServingExecutable(const IfrtServingExecutable& other) = delete;
IfrtServingExecutable& operator=(const IfrtServingExecutable& other) = delete;
absl::string_view model_name() const { return model_name_; }
absl::string_view signature_name() const { return signature_name_; }
absl::StatusOr<std::vector<tensorflow::Tensor>> Execute(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices);
void Freeze();
int num_executables() const {
absl::MutexLock lock(&mutex_);
return executable_bundles_.size();
}
private:
struct Key {
std::vector<tensorflow::TensorShape> input_shapes;
template <typename H>
friend H AbslHashValue(H h, const Key& key) {
for (const auto& shape : key.input_shapes) {
for (auto size : shape.dim_sizes()) {
h = H::combine(std::move(h), size);
}
}
return h;
}
friend bool operator==(const Key& x, const Key& y) {
return x.input_shapes == y.input_shapes;
}
};
struct CachedExecutableBundle {
std::unique_ptr<xla::ifrt::LoadedExecutable> ifrt_executable;
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
std::vector<std::unique_ptr<TfHostCallback>> host_callbacks;
CachedExecutableBundle() = default;
CachedExecutableBundle(CachedExecutableBundle&& other) = default;
CachedExecutableBundle& operator=(CachedExecutableBundle&& other) = default;
CachedExecutableBundle(const CachedExecutableBundle& other) = delete;
CachedExecutableBundle& operator=(const CachedExecutableBundle& other) =
delete;
};
IfrtServingExecutable(
int64_t program_id, absl::string_view model_name,
absl::string_view signature_name,
mlir::OwningOpRef<mlir::ModuleOp> module,
std::shared_ptr<xla::ifrt::Client> client,
const tsl::thread::ThreadPool* thread_pool,
IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry,
const IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
tensorflow::DeviceMgr* device_mgr,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn,
IfrtServingCoreSelector* ifrt_serving_core_selector,
tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata,
tsl::protobuf::Message* compilation_environment_proto)
: program_id_(program_id),
model_name_(std::string(model_name)),
signature_name_(std::string(signature_name)),
module_(std::move(module)),
original_compile_metadata_(std::move(original_compile_metadata)),
ifrt_client_(std::move(client)),
thread_pool_(*thread_pool),
ifrt_loaded_variable_registry_(*ifrt_loaded_variable_registry),
ifrt_restore_tensor_registry_(*ifrt_restore_tensor_registry),
checkpoint_loader_queue_(checkpoint_loader_queue),
device_mgr_(device_mgr),
shape_representation_fn_(std::move(shape_representation_fn)),
ifrt_serving_core_selector_(std::move(ifrt_serving_core_selector)),
compilation_environment_proto_(compilation_environment_proto) {}
int64_t program_id_;
using SharedCachedExecutableBundle = std::shared_ptr<CachedExecutableBundle>;
std::string model_name_;
std::string signature_name_;
mlir::OwningOpRef<mlir::ModuleOp> module_ ABSL_GUARDED_BY(mutex_);
tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata_;
std::shared_ptr<xla::ifrt::Client> ifrt_client_;
const tsl::thread::ThreadPool& thread_pool_;
IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry_;
const IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry_;
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue_;
tensorflow::DeviceMgr* device_mgr_;
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn_;
IfrtServingCoreSelector* ifrt_serving_core_selector_;
tsl::protobuf::Message*
compilation_environment_proto_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<Key, xla::ifrt::Future<SharedCachedExecutableBundle>>
executable_bundles_ ABSL_GUARDED_BY(mutex_);
bool is_frozen_ ABSL_GUARDED_BY(mutex_) = false;
absl::Status AsyncLoadIfrtArray(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const CachedExecutableBundle& executable_bundle,
const std::vector<xla::ifrt::Device*>& devices);
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> ConvertTensorToArray(
const tensorflow::Tensor& tensor,
const xla::ifrt::DeviceList& device_list,
const xla::OpSharding& sharding);
xla::ifrt::Future<SharedCachedExecutableBundle> LookUpOrCreateExecutable(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes);
absl::StatusOr<IfrtServingExecutable::SharedCachedExecutableBundle>
CreateExecutableSynchronously(
mlir::OwningOpRef<mlir::ModuleOp> module_copy,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes);
absl::StatusOr<std::unique_ptr<xla::ifrt::Sharding>> CreateSharding(
int num_devices, const xla::ifrt::Shape& arg_xla_shape,
const xla::ifrt::Shape& sharded_shapes);
std::vector<xla::ifrt::Shape> GetArgShape(
int arg_index, const CachedExecutableBundle& entry);
bool UsePortableExecution(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata);
};
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/extract_callback.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include "tensorflow/compiler/tf2xla/host_compute_metadata.pb.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/pjrt_host_callback.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_tensor_utils.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
bool IsSingleDevice(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return compile_metadata.num_replicas() == 1 &&
compile_metadata.num_cores_per_replica() == 1;
}
absl::StatusOr<std::vector<DtypeAndShape>> BuildDtypeAndShape(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry) {
std::vector<DtypeAndShape> dtypes_and_shapes;
dtypes_and_shapes.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs.size(); i++) {
if (variable_index < variable_arg_indices.size() &&
i == variable_arg_indices[variable_index]) {
TF_ASSIGN_OR_RETURN(auto dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(
inputs[i].scalar<tsl::tstring>()()));
dtypes_and_shapes.push_back(std::move(dtype_and_shape));
variable_index++;
} else {
dtypes_and_shapes.push_back(DtypeAndShape{.dtype = inputs[i].dtype(),
.shape = inputs[i].shape()});
}
}
return dtypes_and_shapes;
}
absl::StatusOr<xla::DeviceAssignment> GetXlaDeviceAssignment(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
if (!compile_metadata.has_device_assignment()) {
return absl::InternalError("No device assignment found.");
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::DeviceAssignment> da,
xla::DeviceAssignment::Deserialize(compile_metadata.device_assignment()));
return *da;
}
absl::StatusOr<std::vector<xla::ifrt::Device*>> GetAssignedDevices(
const xla::ifrt::Client& ifrt_client,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
TF_ASSIGN_OR_RETURN(auto device_assignment,
GetXlaDeviceAssignment(compile_metadata));
const int num_devices =
device_assignment.replica_count() * device_assignment.computation_count();
std::vector<xla::ifrt::Device*> devices;
devices.reserve(num_devices);
for (int replica_idx = 0; replica_idx < device_assignment.replica_count();
replica_idx++) {
for (int computation_idx = 0;
computation_idx < device_assignment.computation_count();
computation_idx++) {
auto device_id = device_assignment(replica_idx, computation_idx);
TF_ASSIGN_OR_RETURN(
xla::ifrt::Device * device,
ifrt_client.LookupDevice(xla::ifrt::DeviceId(device_id)));
devices.push_back(device);
}
}
return devices;
}
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
IfrtServingExecutable::Create(
int64_t program_id, absl::string_view model_name,
absl::string_view signature_name, mlir::OwningOpRef<mlir::ModuleOp> module,
std::shared_ptr<xla::ifrt::Client> client,
const tsl::thread::ThreadPool* thread_pool,
IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry,
const IfrtRestoreTensorRegistry* ifrt_restore,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
tensorflow::DeviceMgr* device_mgr,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn,
IfrtServingCoreSelector* ifrt_serving_core_selector,
tsl::protobuf::Message* compilation_environement_proto) {
TF_ASSIGN_OR_RETURN(
tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata,
GetCompileMetadata(*module, *client));
auto executable = absl::WrapUnique(new IfrtServingExecutable(
program_id, model_name, signature_name, std::move(module),
std::move(client), thread_pool, ifrt_loaded_variable_registry,
ifrt_restore, checkpoint_loader_queue, device_mgr,
std::move(shape_representation_fn), ifrt_serving_core_selector,
std::move(original_compile_metadata), compilation_environement_proto));
return executable;
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
IfrtServingExecutable::ConvertTensorToArray(
const tensorflow::Tensor& tensor, const xla::ifrt::DeviceList& device_list,
const xla::OpSharding& sharding) {
xla::ifrt::Shape input_shape = ToIfrtShape(tensor.shape());
VLOG(2) << "Converting tensor of shape " << input_shape;
TF_ASSIGN_OR_RETURN(auto hlo_sharding, xla::HloSharding::FromProto(sharding));
return MakeArrayFromTensor(*ifrt_client_, tensor, device_list,
std::move(hlo_sharding), thread_pool_);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>> BuildFunctionDef(
mlir::ModuleOp module) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_RETURN_IF_ERROR(ExportFunctionDefs(
module,
[&](tensorflow::FunctionDef function_def) {
function_defs.push_back(std::move(function_def));
return absl::OkStatus();
},
false));
return function_defs;
}
struct HostCallbackBuilderInfo {
tensorflow::tf2xla::HostTransferMetadata device_to_host;
tensorflow::tf2xla::HostTransferMetadata host_to_device;
};
absl::StatusOr<absl::flat_hash_map<std::string, HostCallbackBuilderInfo>>
GroupHostCallbackByKey(const Tf2HloResult& tf2hlo_result) {
absl::flat_hash_map<std::string, HostCallbackBuilderInfo> host_callbacks;
for (const auto& device_to_host :
tf2hlo_result.host_compute_metadata.device_to_host()) {
auto& host_callback = host_callbacks[device_to_host.key()];
host_callback.device_to_host = device_to_host;
}
for (const auto& host_to_device :
tf2hlo_result.host_compute_metadata.host_to_device()) {
auto& host_callback = host_callbacks[host_to_device.key()];
host_callback.host_to_device = host_to_device;
}
return host_callbacks;
}
absl::StatusOr<xla::HostCallback> BuildHostCallback(
absl::string_view key, const HostCallbackBuilderInfo& builder_info,
mlir::ModuleOp module, tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
VLOG(2) << "BuildHostCallback for key: " << key;
DCHECK(device_mgr);
xla::HostCallback host_callback;
std::vector<DtypeAndShape> operand_type_and_shapes;
std::vector<DtypeAndShape> result_type_and_shapes;
auto to_xla_shape = [](tensorflow::DataType data_type,
const tensorflow::TensorShapeProto& shape)
-> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_ASSIGN_OR_RETURN(tensorflow::TensorShape tensor_shape,
tensorflow::TensorShape::BuildTensorShape(shape));
if (absl::Status status = tensorflow::TensorShapeToXLAShape(
data_type, tensor_shape, &xla_shape);
status.ok()) {
return xla_shape;
} else {
return status;
}
};
operand_type_and_shapes.reserve(builder_info.device_to_host.metadata_size());
result_type_and_shapes.reserve(builder_info.host_to_device.metadata_size());
for (const auto& metadata : builder_info.device_to_host.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.operands.push_back(
{.channel_id = channel_id, .shape = shape});
operand_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
for (const auto& metadata : builder_info.host_to_device.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.results.push_back(
{.channel_id = channel_id, .shape = std::move(shape)});
result_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> callback_module,
ExtractCallbackModule(module, key));
TF_ASSIGN_OR_RETURN(std::vector<tensorflow::FunctionDef> function_defs,
BuildFunctionDef(*callback_module));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TfHostCallback> tf_host_callback,
TfHostCallback::Create(function_defs, key, operand_type_and_shapes,
result_type_and_shapes, device_mgr));
host_callback.callback = [tf_host_callback = tf_host_callback.get()](
void** output, void** input) {
return tf_host_callback->Call(input, output);
};
tf_host_callbacks.push_back(std::move(tf_host_callback));
return host_callback;
}
absl::StatusOr<std::vector<xla::HostCallback>> BuildHostCallbacks(
const Tf2HloResult& tf2hlo_result, mlir::ModuleOp module,
tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
TF_ASSIGN_OR_RETURN(auto host_callback_maps,
GroupHostCallbackByKey(tf2hlo_result));
std::vector<xla::HostCallback> host_callbacks;
host_callbacks.reserve(host_callback_maps.size());
for (const auto& [entry_function, builder_info] : host_callback_maps) {
TF_ASSIGN_OR_RETURN(auto host_callback,
BuildHostCallback(entry_function, builder_info, module,
device_mgr, tf_host_callbacks));
host_callbacks.push_back(std::move(host_callback));
}
return host_callbacks;
}
absl::StatusOr<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::CreateExecutableSynchronously(
mlir::OwningOpRef<mlir::ModuleOp> module_copy,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
TF_ASSIGN_OR_RETURN(
Tf2HloResult tf2hlo_result,
CompileTfToHlo(*module_copy, dtypes_and_shapes, signature_name(),
*ifrt_client_, compile_metadata,
shape_representation_fn_));
const int num_replicas = tf2hlo_result.compile_metadata.num_replicas();
const int num_partitions =
tf2hlo_result.compile_metadata.num_cores_per_replica();
VLOG(2) << " Number of replcas is " << num_replicas
<< " and num_partitions is " << num_partitions;
if (num_replicas > 1) {
return absl::UnimplementedError(
absl::StrCat("Only support single replica, but replica number is ",
num_replicas, " and num_partitions is ", num_partitions));
}
xla::CompileOptions xla_compile_options;
if (compilation_environment_proto_) {
tsl::protobuf::Message* comp_env_copy =
compilation_environment_proto_->New();
comp_env_copy->CopyFrom(*compilation_environment_proto_);
TF_RETURN_IF_ERROR(
xla_compile_options.executable_build_options.mutable_comp_envs()
->AddEnv(absl::WrapUnique<tsl::protobuf::Message>(comp_env_copy)));
}
xla_compile_options.executable_build_options.set_num_replicas(num_replicas);
xla_compile_options.executable_build_options.set_num_partitions(
num_partitions);
xla_compile_options.executable_build_options.set_use_spmd_partitioning(true);
xla_compile_options.parameter_is_tupled_arguments = false;
if (UsePortableExecution(compile_metadata)) {
xla_compile_options.compile_portable_executable = true;
} else {
TF_ASSIGN_OR_RETURN(xla::DeviceAssignment da,
GetXlaDeviceAssignment(tf2hlo_result.compile_metadata));
VLOG(2) << "Device assignment :" << da.ToString();
xla_compile_options.executable_build_options.set_device_assignment(da);
}
std::vector<std::unique_ptr<TfHostCallback>> tf_host_callbacks;
TF_ASSIGN_OR_RETURN(auto host_callbacks,
BuildHostCallbacks(tf2hlo_result, *module_copy,
device_mgr_, tf_host_callbacks));
std::vector<tsl::RCReference<xla::ifrt::LoadedHostCallback>>
loaded_host_callbacks;
loaded_host_callbacks.reserve(host_callbacks.size());
for (const auto& host_callback : host_callbacks) {
loaded_host_callbacks.push_back(
tsl::MakeRef<xla::ifrt::PjRtHostSendAndRecvLoadedHostCallback>(
ifrt_client_.get(),
std::make_unique<xla::HostCallback>(host_callback)));
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::ifrt::LoadedExecutable> ifrt_executable,
ifrt_client_->GetDefaultCompiler()->Compile(
std::make_unique<xla::ifrt::HloProgram>(
tf2hlo_result.mlir_hlo_module.get()),
std::make_unique<xla::ifrt::XlaCompileOptions>(
xla_compile_options, loaded_host_callbacks)));
SharedCachedExecutableBundle executable_bundle =
std::make_shared<CachedExecutableBundle>();
executable_bundle->ifrt_executable = std::move(ifrt_executable);
executable_bundle->compile_metadata =
std::move(tf2hlo_result.compile_metadata);
executable_bundle->host_callbacks = std::move(tf_host_callbacks);
return executable_bundle;
}
xla::ifrt::Future<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::LookUpOrCreateExecutable(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
std::vector<tensorflow::TensorShape> input_shapes;
for (const auto& dtype_and_shape : dtypes_and_shapes) {
input_shapes.push_back(dtype_and_shape.shape);
}
Key key = {.input_shapes = std::move(input_shapes)};
xla::ifrt::Promise<SharedCachedExecutableBundle> promise;
xla::ifrt::Future<SharedCachedExecutableBundle> future;
mlir::OwningOpRef<mlir::ModuleOp> module_copy;
{
absl::MutexLock lock(&mutex_);
const auto it = executable_bundles_.find(key);
if (it != executable_bundles_.end()) {
return it->second;
}
if (is_frozen_) {
xla::ifrt::Future<SharedCachedExecutableBundle> frozen_future(
absl::FailedPreconditionError(
"Cannot compile for new input shapes after the executable is "
"already frozen."));
return frozen_future;
}
promise = xla::ifrt::Future<SharedCachedExecutableBundle>::CreatePromise();
future = xla::ifrt::Future<SharedCachedExecutableBundle>(promise);
executable_bundles_.emplace(key, future);
module_copy = mlir::OwningOpRef<mlir::ModuleOp>(module_->clone());
}
LOG(INFO) << "Cache missed. Building executable";
absl::StatusOr<SharedCachedExecutableBundle> executable_bundle =
CreateExecutableSynchronously(std::move(module_copy), compile_metadata,
dtypes_and_shapes);
promise.Set(std::move(executable_bundle));
return future;
}
void IfrtServingExecutable::Freeze() {
LOG(INFO) << "Freezing executable. Program id: " << program_id_;
absl::MutexLock lock(&mutex_);
is_frozen_ = true;
module_ = nullptr;
}
bool IfrtServingExecutable::UsePortableExecution(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return IsSingleDevice(compile_metadata) && ifrt_serving_core_selector_;
}
absl::StatusOr<std::vector<tensorflow::Tensor>> IfrtServingExecutable::Execute(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices) {
for (int i = 1; i < variable_arg_indices.size(); i++) {
if (variable_arg_indices[i] <= variable_arg_indices[i - 1]) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected variable_arg_indices in ascending order. But subsequence "
"starting at ",
i - 1, ": (", variable_arg_indices[i - 1], ", ",
variable_arg_indices[i], ")", " is not in ascending order"));
}
}
if (!variable_arg_indices.empty() &&
inputs.size() <= variable_arg_indices.back()) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected at most ", inputs.size(), " inputs, but got up to ",
variable_arg_indices.back(), " variables."));
}
for (const int i : variable_arg_indices) {
if (inputs[i].dtype() != tensorflow::DT_STRING ||
!tensorflow::TensorShapeUtils::IsScalar(inputs[i].shape())) {
return absl::FailedPreconditionError(
absl::StrCat("Expected a scalar tensor as loaded variable array key, "
"but got type ",
inputs[i].dtype(), " and shape ",
inputs[i].shape().DebugString(), " at index ", i));
}
}
TF_ASSIGN_OR_RETURN(std::vector<DtypeAndShape> dtypes_and_shapes,
BuildDtypeAndShape(inputs, variable_arg_indices,
ifrt_restore_tensor_registry_));
tensorflow::tpu::TPUCompileMetadataProto compile_metadata =
original_compile_metadata_;
TF_RETURN_IF_ERROR(
UpdateCompileMetadata(compile_metadata, dtypes_and_shapes));
tsl::DeviceReservation device_reservation(kNoCoreSelectedIndex, nullptr);
std::vector<xla ::ifrt::Device*> devices;
if (UsePortableExecution(compile_metadata)) {
device_reservation =
ifrt_serving_core_selector_->ReserveDevice(program_id_);
compile_metadata.clear_device_assignment();
TF_ASSIGN_OR_RETURN(xla::ifrt::Device * device,
ifrt_client_->LookupDevice(xla::ifrt::DeviceId(
device_reservation.device_index())));
devices.push_back(device);
} else {
TF_ASSIGN_OR_RETURN(devices,
GetAssignedDevices(*ifrt_client_, compile_metadata));
}
TF_ASSIGN_OR_RETURN(SharedCachedExecutableBundle executable_bundle,
LookUpOrCreateExecutable(
compile_metadata, absl::MakeSpan(dtypes_and_shapes))
.Await());
xla::ifrt::DeviceList device_list(
xla::ifrt::DeviceList::Devices(devices.begin(), devices.end()));
if (executable_bundle->compile_metadata.args().size() !=
dtypes_and_shapes.size()) {
return absl::InternalError(absl::StrCat(
"Expected ", executable_bundle->compile_metadata.args().size(),
" but got ", dtypes_and_shapes.size(), " arguments"));
}
TF_RETURN_IF_ERROR(AsyncLoadIfrtArray(inputs, variable_arg_indices,
*executable_bundle, devices));
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
args.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs. | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Return;
using ::tsl::testing::StatusIs;
struct VariableInputTestParam {
std::vector<tensorflow::Tensor> in_tensors;
std::vector<bool>
is_variable;
std::vector<tensorflow::Tensor> expected_out_tensors;
};
using VariableInputTest = ::testing::TestWithParam<VariableInputTestParam>;
class IfrtServingExecutableTest : public ::testing::Test {
protected:
explicit IfrtServingExecutableTest() {
helper_ = std::make_unique<test_utils::IfrtServingExecutableTestHelper>(
&selector_);
}
tsl::test_util::MockServingDeviceSelector selector_;
std::unique_ptr<test_utils::IfrtServingExecutableTestHelper> helper_;
};
TEST_F(IfrtServingExecutableTest, Basic) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
const auto expected_out =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, MultipleShapes) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(6)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
const auto expected_out2 =
AsTensor<int32_t>({30}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs1, outputs2;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
TF_ASSERT_OK_AND_ASSIGN(outputs2,
executable->Execute(absl::MakeSpan(inputs2), {}));
}
ASSERT_EQ(executable->num_executables(), 2);
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
EXPECT_THAT(outputs2, ElementsAre(TensorEq(expected_out2)));
}
TEST_F(IfrtServingExecutableTest, ReturnFailOnUncompiledShapeAfterFrozen) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(3)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
std::vector<tensorflow::Tensor> outputs1;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
executable->Freeze();
outputs1.clear();
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs2;
auto status = executable->Execute(absl::MakeSpan(inputs2), {});
EXPECT_THAT(status, StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_F(IfrtServingExecutableTest, Spmd) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, SpmdTwoReturns) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable_two_returns.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out0 = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
const auto expected_out1 = AsTensor<int32_t>({20, 20, 20, 20, 20, 20, 20, 20},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result,
ElementsAre(TensorEq(expected_out0), TensorEq(expected_out1)));
}
TEST_F(IfrtServingExecutableTest, NoReturn) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("executable_no_return.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
ASSERT_EQ(result.size(), 0);
}
TEST_P(VariableInputTest, InterleaveVariable) {
tsl::test_util::MockServingDeviceSelector device_selector;
test_utils::IfrtServingExecutableTestHelper helper(&device_selector);
int64_t program_id = 111111;
EXPECT_CALL(device_selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper.MakeExecutable(
program_id, GetMlirModulePath("executable_long_inputs.mlir"));
IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry =
helper.ifrt_restore_tensor_registry();
std::vector<tensorflow::Tensor> inputs;
std::vector<int> loaded_variable_indices;
for (int i = 0; i < GetParam().in_tensors.size(); i++) {
if (GetParam().is_variable[i]) {
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restore_tensor_info = {
.dtype_and_shape{.dtype = GetParam().in_tensors[i].dtype(),
.shape = GetParam().in_tensors[i].shape()},
.tensor_future = input_tensor_future};
std::string variable_name = absl::StrCat("variable_", i);
ASSERT_OK(ifrt_restore_tensor_registry->TryRegister(variable_name,
restore_tensor_info));
loaded_variable_indices.push_back(i);
input_tensor_promise.Set(GetParam().in_tensors[i]);
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = variable_name;
inputs.push_back(key_tensor);
} else {
inputs.push_back(GetParam().in_tensors[i]);
}
}
ASSERT_EQ(inputs.size(), GetParam().is_variable.size());
for (int i = 0; i < helper.num_cores(); i++) {
TF_ASSERT_OK(executable
->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices))
.status());
}
TF_ASSERT_OK_AND_ASSIGN(
auto result,
executable->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices)));
EXPECT_THAT(result,
ElementsAre(TensorEq(GetParam().expected_out_tensors[0]),
TensorEq(GetParam().expected_out_tensors[1]),
TensorEq(GetParam().expected_out_tensors[2])));
}
INSTANTIATE_TEST_SUITE_P(
VariableInputTests, VariableInputTest,
::testing::ValuesIn<VariableInputTestParam>(
{
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, true, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, false, false, true, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, true, true, false, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
}));
}
}
} |
1,336 | cpp | tensorflow/tensorflow | run_handler_util | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_RUN_HANDLER_UTIL_H_
#define TENSORFLOW_CORE_FRAMEWORK_RUN_HANDLER_UTIL_H_
#include <cstdint>
#include <string>
#include <vector>
namespace tensorflow {
void ComputeInterOpSchedulingRanges(int num_active_requests, int num_threads,
int min_threads_per_request,
std::vector<std::uint_fast32_t>* start_vec,
std::vector<std::uint_fast32_t>* end_vec);
void ComputeInterOpStealingRanges(int num_threads, int min_threads_per_domain,
std::vector<std::uint_fast32_t>* start_vec,
std::vector<std::uint_fast32_t>* end_vec);
std::vector<int> ChooseRequestsWithExponentialDistribution(
int num_active_requests, int num_threads);
double ParamFromEnvWithDefault(const char* var_name, double default_value);
std::vector<double> ParamFromEnvWithDefault(const char* var_name,
std::vector<double> default_value);
std::vector<int> ParamFromEnvWithDefault(const char* var_name,
std::vector<int> default_value);
bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value);
}
#endif
#include "tensorflow/core/framework/run_handler_util.h"
#include <cmath>
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/str_util.h"
namespace tensorflow {
double ParamFromEnvWithDefault(const char* var_name, double default_value) {
const char* val = std::getenv(var_name);
double num;
return (val && strings::safe_strtod(val, &num)) ? num : default_value;
}
std::vector<double> ParamFromEnvWithDefault(const char* var_name,
std::vector<double> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<string> splits = str_util::Split(val, ",");
std::vector<double> result;
result.reserve(splits.size());
for (auto& split : splits) {
double num;
if (strings::safe_strtod(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
std::vector<int> ParamFromEnvWithDefault(const char* var_name,
std::vector<int> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<string> splits = str_util::Split(val, ",");
std::vector<int> result;
result.reserve(splits.size());
for (auto& split : splits) {
int num;
if (strings::safe_strto32(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value) {
const char* val = std::getenv(var_name);
return (val) ? str_util::Lowercase(val) == "true" : default_value;
}
void ComputeInterOpSchedulingRanges(int num_active_requests, int num_threads,
int min_threads_per_request,
std::vector<std::uint_fast32_t>* start_vec,
std::vector<std::uint_fast32_t>* end_vec) {
float total_weight = 0.5f * num_active_requests * (num_active_requests + 1);
float demand_factor = static_cast<float>(num_threads) / total_weight;
float last_cumulative_weight = 0.0;
min_threads_per_request = std::max(1, min_threads_per_request);
for (int i = 0; i != num_active_requests; i++) {
float cumulative_weight =
static_cast<float>(i + 1) *
(num_active_requests - static_cast<float>(i) * 0.5f);
float weight = cumulative_weight - last_cumulative_weight;
int demand = std::max(
min_threads_per_request,
static_cast<int>(std::ceil(weight * demand_factor - 0.00001f)));
int start = last_cumulative_weight * demand_factor;
int end = std::min(num_threads, start + demand);
start = std::max(0, std::min(start, end - demand));
start_vec->at(i) = start;
end_vec->at(i) = end;
last_cumulative_weight = cumulative_weight;
}
}
void ComputeInterOpStealingRanges(int num_threads, int min_threads_per_domain,
std::vector<std::uint_fast32_t>* start_vec,
std::vector<std::uint_fast32_t>* end_vec) {
int steal_domain_size = std::min(min_threads_per_domain, num_threads);
unsigned steal_start = 0, steal_end = steal_domain_size;
for (int i = 0; i < num_threads; ++i) {
if (i >= steal_end) {
if (steal_end + steal_domain_size < num_threads) {
steal_start = steal_end;
steal_end += steal_domain_size;
} else {
steal_end = num_threads;
steal_start = steal_end - steal_domain_size;
}
}
start_vec->at(i) = steal_start;
end_vec->at(i) = steal_end;
}
}
std::vector<int> ChooseRequestsWithExponentialDistribution(
int num_active_requests, int num_threads) {
static const double kCapacityFractionForEvenDistribution =
ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_EVEN_FRACTION", 0.5);
static const double kPowerBase =
ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_POWER_BASE", 2.0);
static const int kMinEvenThreadsFromEnv = static_cast<int>(
ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_MIN_EVEN_THREADS", 1));
static const int kMaxEvenThreadsFromEnv = static_cast<int>(
ParamFromEnvWithDefault("TF_RUN_HANDLER_EXP_DIST_MAX_EVEN_THREADS", 3));
std::vector<int> request_idx_list;
request_idx_list.resize(num_threads);
int min_threads_per_request =
num_threads * kCapacityFractionForEvenDistribution / num_active_requests;
min_threads_per_request =
std::max(kMinEvenThreadsFromEnv, min_threads_per_request);
min_threads_per_request =
std::min(kMaxEvenThreadsFromEnv, min_threads_per_request);
int num_remaining_threads =
std::max(0, num_threads - num_active_requests * min_threads_per_request);
int request_idx = -1;
int num_threads_next_request = 0;
for (int tid = 0; tid < num_threads; ++tid) {
if (num_threads_next_request <= 0) {
request_idx = std::min(num_active_requests - 1, request_idx + 1);
int num_extra_threads_next_request =
std::ceil(num_remaining_threads * (kPowerBase - 1.0) / kPowerBase);
num_remaining_threads -= num_extra_threads_next_request;
num_threads_next_request =
num_extra_threads_next_request + min_threads_per_request;
}
num_threads_next_request--;
request_idx_list[tid] = request_idx;
}
return request_idx_list;
}
} | #include "tensorflow/core/framework/run_handler_util.h"
#include <vector>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void VerifySchedulingRanges(int num_active_requests, int num_threads,
int min_threads_per_request,
bool print_stats = false) {
if (print_stats) {
LOG(INFO) << "Test case# num_active_requests: " << num_active_requests
<< " num_threads: " << num_threads
<< " min_threads: " << min_threads_per_request;
}
std::vector<std::uint_fast32_t> start(num_active_requests);
std::vector<std::uint_fast32_t> end(num_active_requests);
ComputeInterOpSchedulingRanges(num_active_requests, num_threads,
min_threads_per_request, &start, &end);
string range_str = "";
for (int i = 0; i < num_active_requests; ++i) {
if (i > 0) range_str += " ";
range_str += strings::StrCat("[", start[i], ", ", end[i], ")");
ASSERT_GE(start[i], 0) << range_str;
ASSERT_LE(end[i], num_threads) << range_str;
if (i > 0) {
ASSERT_GE(end[i - 1] - start[i - 1], end[i] - start[i]) << range_str;
ASSERT_GE(end[i - 1], start[i]) << range_str;
}
ASSERT_GE((end[i] - start[i]), min_threads_per_request) << range_str;
float entry_weight = num_active_requests - i;
float total_weight = 0.5f * num_active_requests * (num_active_requests + 1);
float thread_demand = (entry_weight * num_threads) / total_weight;
if (thread_demand > min_threads_per_request) {
ASSERT_NEAR(end[i] - start[i], thread_demand, 1.0)
<< "Ranges: " << range_str << " thread_demand: " << thread_demand
<< " i: " << i;
}
}
ASSERT_EQ(end[num_active_requests - 1], num_threads);
ASSERT_EQ(start[0], 0);
if (print_stats) {
LOG(INFO) << "Assigned ranges: " << range_str;
}
}
TEST(RunHandlerUtilTest, TestComputeInterOpSchedulingRanges) {
const int kMinThreadsPerRequestBound = 12;
const int kMaxActiveRequests = 128;
const int kMaxThreads = 128;
for (int min_threads_per_request = 1;
min_threads_per_request <= kMinThreadsPerRequestBound;
++min_threads_per_request) {
for (int num_active_requests = 1; num_active_requests <= kMaxActiveRequests;
++num_active_requests) {
for (int num_threads = min_threads_per_request;
num_threads <= kMaxThreads; ++num_threads) {
VerifySchedulingRanges(num_active_requests, num_threads,
min_threads_per_request);
}
}
}
}
TEST(RunHandlerUtilTest, TestComputeInterOpStealingRanges) {
int num_inter_op_threads = 9;
std::vector<std::uint_fast32_t> start_vec(num_inter_op_threads);
std::vector<std::uint_fast32_t> end_vec(num_inter_op_threads);
ComputeInterOpStealingRanges(num_inter_op_threads, 6, &start_vec, &end_vec);
int stealing_ranges[2][2] = {{0, 6}, {3, 9}};
for (int i = 0; i < num_inter_op_threads; ++i) {
int expected_start = stealing_ranges[i / 6][0];
int expected_end = stealing_ranges[i / 6][1];
string message =
strings::StrCat("Stealing range of thread ", i, " should be [",
expected_start, ", ", expected_end, "]");
ASSERT_EQ(start_vec[i], expected_start) << message;
ASSERT_EQ(end_vec[i], expected_end) << message;
}
}
TEST(RunHandlerUtilTest, TestExponentialRequestDistribution) {
int num_active_requests = 3;
int num_threads = 10;
std::vector<int> actual_distribution =
ChooseRequestsWithExponentialDistribution(num_active_requests,
num_threads);
std::vector<int> expected_distribution{0, 0, 0, 0, 0, 1, 1, 1, 2, 2};
ASSERT_EQ(actual_distribution, expected_distribution);
}
TEST(RunHandlerUtilTest, TestParamFromEnvWithDefault) {
std::vector<double> result = ParamFromEnvWithDefault(
"RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 0);
EXPECT_EQ(result[1], 0);
EXPECT_EQ(result[2], 0);
std::vector<int> result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result2.size(), 3);
EXPECT_EQ(result2[0], 0);
EXPECT_EQ(result2[1], 0);
EXPECT_EQ(result2[2], 0);
bool result3 =
ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, false);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV", "1,2,3", true), 0);
result = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 1);
EXPECT_EQ(result[1], 2);
EXPECT_EQ(result[2], 3);
result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result2[0], 1);
EXPECT_EQ(result2[1], 2);
EXPECT_EQ(result2[2], 3);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV_BOOL", "true", true), 0);
result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, true);
}
}
} |
1,337 | cpp | tensorflow/tensorflow | run_handler_concurrent_work_queue | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue_test.cc | #ifndef TENSORFLOW_CORE_TFRT_RUN_HANDLER_THREAD_POOL_RUN_HANDLER_CONCURRENT_WORK_QUEUE_H_
#define TENSORFLOW_CORE_TFRT_RUN_HANDLER_THREAD_POOL_RUN_HANDLER_CONCURRENT_WORK_QUEUE_H_
#include <atomic>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <vector>
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/support/thread_environment.h"
#include "third_party/concurrent_work_queue/lib/blocking_work_queue.h"
#include "third_party/concurrent_work_queue/lib/non_blocking_work_queue.h"
namespace tfrt {
namespace tf {
class RunHandlerThreadWorkQueue
: public tensorflow::tfrt_stub::WorkQueueInterface {
public:
struct Options {
int num_main_threads;
int num_complementary_threads;
int64_t init_timeout_ms;
int max_concurrent_handler = 128;
int num_sub_thread_pool = 1;
std::vector<int> num_threads_in_sub_thread_pool = {1};
std::vector<double> sub_thread_request_percentage = {1.0};
int non_blocking_threads_sleep_time_micro_sec = 1000;
int blocking_threads_max_sleep_time_micro_sec = 1000;
bool use_adaptive_waiting_time = true;
bool wait_if_no_active_request = true;
bool enable_wake_up = true;
};
explicit RunHandlerThreadWorkQueue(const Options& options);
~RunHandlerThreadWorkQueue() override = default;
std::string name() const override {
return tensorflow::strings::StrCat(
"RunHandlerThreadWorkQueue C++ work queue (", options_.num_main_threads,
" main threads, ", options_.num_complementary_threads,
" complementary threads)");
}
absl::StatusOr<std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>>
InitializeRequest(int64_t request_id) const override;
int GetParallelismLevel() const override {
return options_.num_main_threads + options_.num_complementary_threads;
}
void AddTask(TaskFunction work) override;
std::optional<TaskFunction> AddBlockingTask(TaskFunction work,
bool allow_queuing) override;
void Quiesce() override;
void Await(ArrayRef<RCReference<AsyncValue>> values) override;
bool IsInWorkerThread() const override;
private:
Options options_;
std::unique_ptr<RunHandlerPool> handler_pool_;
static std::atomic_int_fast64_t step_id_counter_;
std::unique_ptr<::tfrt::internal::QuiescingState> quiescing_state_;
::tfrt::internal::NonBlockingWorkQueue<ThreadingEnvironment>
non_blocking_work_queue_;
::tfrt::internal::BlockingWorkQueue<ThreadingEnvironment>
blocking_work_queue_;
};
std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options);
}
}
#endif
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <ostream>
#include <utility>
#include "absl/strings/str_join.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
namespace tfrt {
namespace tf {
RunHandlerThreadWorkQueue::RunHandlerThreadWorkQueue(const Options& options)
: options_(options),
quiescing_state_(std::make_unique<::tfrt::internal::QuiescingState>()),
non_blocking_work_queue_(quiescing_state_.get(),
1),
blocking_work_queue_(quiescing_state_.get(),
1) {
CHECK(options.num_threads_in_sub_thread_pool.size() ==
options.num_sub_thread_pool);
CHECK(options.sub_thread_request_percentage.size() ==
options.num_sub_thread_pool);
RunHandlerPool::Options pool_options;
pool_options.num_inter_op_threads = options.num_main_threads;
pool_options.num_intra_op_threads = options.num_complementary_threads;
pool_options.max_concurrent_handler = options.max_concurrent_handler;
pool_options.blocking_threads_max_sleep_time_micro_sec =
options.blocking_threads_max_sleep_time_micro_sec;
pool_options.non_blocking_threads_sleep_time_micro_sec =
options.non_blocking_threads_sleep_time_micro_sec;
pool_options.num_sub_thread_pool = options.num_sub_thread_pool;
pool_options.num_threads_in_sub_thread_pool =
options.num_threads_in_sub_thread_pool;
pool_options.sub_thread_request_percentage =
options.sub_thread_request_percentage;
pool_options.enable_wake_up = options.enable_wake_up;
pool_options.wait_if_no_active_request = options.wait_if_no_active_request;
pool_options.use_adaptive_waiting_time = options.use_adaptive_waiting_time;
handler_pool_ = std::make_unique<RunHandlerPool>(pool_options);
}
absl::StatusOr<std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>>
RunHandlerThreadWorkQueue::InitializeRequest(int64_t request_id) const {
RunHandlerOptions options;
std::unique_ptr<RunHandler> handler =
handler_pool_->Get(request_id, options_.init_timeout_ms, options);
if (!handler) {
return tensorflow::errors::Internal(absl::StrCat(
"Could not obtain RunHandler for request after waiting for ",
options_.init_timeout_ms, " ms."));
}
return {std::make_unique<RunHandlerWorkQueue>(std::move(handler))};
}
void RunHandlerThreadWorkQueue::AddTask(TaskFunction work) {
non_blocking_work_queue_.AddTask(std::move(work));
}
std::optional<TaskFunction> RunHandlerThreadWorkQueue::AddBlockingTask(
TaskFunction work, bool allow_queuing) {
if (allow_queuing) {
return blocking_work_queue_.EnqueueBlockingTask(std::move(work));
} else {
return blocking_work_queue_.RunBlockingTask(std::move(work));
}
return std::nullopt;
}
void RunHandlerThreadWorkQueue::Quiesce() {
handler_pool_->Quiesce();
non_blocking_work_queue_.Quiesce();
blocking_work_queue_.Quiesce();
}
void RunHandlerThreadWorkQueue::Await(
ArrayRef<RCReference<AsyncValue>> values) {
tfrt::Await(values);
}
bool RunHandlerThreadWorkQueue::IsInWorkerThread() const {
return true;
}
std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options) {
return strm << "{"
<< "num_main_threads = " << options.num_main_threads
<< ", num_complementary_threads = "
<< options.num_complementary_threads
<< ", init_timeout_ms = " << options.init_timeout_ms
<< ", max_concurrent_handler = " << options.max_concurrent_handler
<< ", num_sub_thread_pool = " << options.num_sub_thread_pool
<< ", num_threads_in_sub_thread_pool = ["
<< absl::StrJoin(options.num_threads_in_sub_thread_pool, ",")
<< "]"
<< ", sub_thread_request_percentage = ["
<< absl::StrJoin(options.sub_thread_request_percentage, ",")
<< "]"
<< ", non_blocking_threads_sleep_time_micro_sec = "
<< options.non_blocking_threads_sleep_time_micro_sec
<< ", blocking_threads_max_sleep_time_micro_sec = "
<< options.blocking_threads_max_sleep_time_micro_sec
<< ", use_adaptive_waiting_time = "
<< options.use_adaptive_waiting_time
<< ", wait_if_no_active_request = "
<< options.wait_if_no_active_request
<< ", enable_wake_up = " << options.enable_wake_up << "}";
}
}
} | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <cstdio>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/mutex.h"
namespace tfrt {
namespace tf {
namespace {
const int kNumMainThreads = 1;
const int kNumComplementaryThreads = 1;
class RunHandlerThreadWorkQueueTest : public ::testing::Test {
protected:
void SetUp() override {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = kNumComplementaryThreads;
options.num_main_threads = kNumMainThreads;
options.init_timeout_ms = 100;
pool_ = std::make_unique<RunHandlerThreadWorkQueue>(options);
auto decoded_diagnostic_handler = [&](const DecodedDiagnostic& diag) {};
std::unique_ptr<ConcurrentWorkQueue> work_queue =
CreateSingleThreadedWorkQueue();
std::unique_ptr<HostAllocator> host_allocator = CreateMallocAllocator();
host_ = std::make_unique<HostContext>(decoded_diagnostic_handler,
std::move(host_allocator),
std::move(work_queue));
RequestContextBuilder req_ctx_builder{host_.get(),
nullptr};
auto queue = pool_->InitializeRequest(100);
TF_CHECK_OK(queue.status());
queue_ = std::move(*queue);
auto req_ctx = std::move(req_ctx_builder).build();
ASSERT_TRUE(static_cast<bool>(req_ctx));
exec_ctx_ = std::make_unique<ExecutionContext>(std::move(*req_ctx));
}
std::unique_ptr<RunHandlerThreadWorkQueue> pool_;
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> queue_;
std::unique_ptr<HostContext> host_;
std::unique_ptr<ExecutionContext> exec_ctx_;
};
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true);
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoQueueing) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
false));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTaskWithNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningMixedTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 20);
}
TEST_F(RunHandlerThreadWorkQueueTest, NameReturnsValidString) {
EXPECT_TRUE(absl::StrContains(pool_->name(), "RunHandlerThreadWorkQueue"));
}
TEST_F(RunHandlerThreadWorkQueueTest, GetParallelismLevelOk) {
EXPECT_EQ(pool_->GetParallelismLevel(),
kNumComplementaryThreads + kNumMainThreads);
}
TEST_F(RunHandlerThreadWorkQueueTest, IsWorkerThreadOk) {
EXPECT_TRUE(pool_->IsInWorkerThread());
}
TEST_F(RunHandlerThreadWorkQueueTest, NoHandlerReturnsError) {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = 0;
options.num_main_threads = 0;
options.init_timeout_ms = 1;
options.max_concurrent_handler = 0;
auto queue = std::make_unique<RunHandlerThreadWorkQueue>(options);
tfrt::RequestContextBuilder ctx_builder(nullptr, nullptr);
EXPECT_THAT(
queue->InitializeRequest(100),
tensorflow::testing::StatusIs(
tensorflow::error::INTERNAL,
"Could not obtain RunHandler for request after waiting for 1 ms."));
}
}
}
} |
1,338 | cpp | tensorflow/tensorflow | run_handler | tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_test.cc | #ifndef TENSORFLOW_CORE_FRAMEWORK_RUN_HANDLER_H_
#define TENSORFLOW_CORE_FRAMEWORK_RUN_HANDLER_H_
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/histogram/histogram.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace Eigen {
struct ThreadPoolDevice;
}
namespace tensorflow {
class RunHandler;
class RunHandlerPool {
public:
explicit RunHandlerPool(int num_inter_op_threads);
RunHandlerPool(int num_inter_op_threads, int num_intra_op_threads);
~RunHandlerPool();
std::unique_ptr<RunHandler> Get(
int64_t step_id = 0, int64_t timeout_in_ms = 0,
const RunOptions::Experimental::RunHandlerPoolOptions& options =
RunOptions::Experimental::RunHandlerPoolOptions());
std::vector<int64_t> GetActiveHandlerPrioritiesForTesting() const;
private:
class Impl;
friend class RunHandler;
std::unique_ptr<Impl> impl_;
};
class RunHandler {
public:
void ScheduleInterOpClosure(std::function<void()> fn);
thread::ThreadPoolInterface* AsIntraThreadPoolInterface();
~RunHandler();
private:
class Impl;
friend class RunHandlerPool::Impl;
explicit RunHandler(Impl* impl);
Impl* impl_;
};
namespace internal {
class RunHandlerEnvironment {
typedef Thread EnvThread;
struct TaskImpl {
std::function<void()> f;
Context context;
uint64 trace_id;
};
Env* const env_;
const ThreadOptions thread_options_;
const string name_;
public:
struct Task {
std::unique_ptr<TaskImpl> f;
};
RunHandlerEnvironment(Env* env, const ThreadOptions& thread_options,
const string& name);
EnvThread* CreateThread(std::function<void()> f,
const std::string& thread_name);
Task CreateTask(std::function<void()> f);
void ExecuteTask(const Task& t);
};
typedef typename RunHandlerEnvironment::Task Task;
typedef Eigen::RunQueue<Task, 1024> Queue;
struct Waiter {
Waiter() {
next = this;
prev = this;
}
condition_variable cv;
mutex mu;
Waiter* next;
Waiter* prev;
};
class ThreadWorkSource {
public:
ThreadWorkSource();
~ThreadWorkSource();
Task EnqueueTask(Task t, bool is_blocking);
Task PopBlockingTask();
Task PopNonBlockingTask(int start_index, bool search_from_all_queue);
void WaitForWork(int max_sleep_micros);
int TaskQueueSize(bool is_blocking);
int64_t GetTracemeId();
void SetTracemeId(int64_t value);
void SetWaiter(uint64 version, Waiter* waiter, mutex* mutex);
int64_t GetInflightTaskCount(bool is_blocking);
void IncrementInflightTaskCount(bool is_blocking);
void DecrementInflightTaskCount(bool is_blocking);
unsigned NonBlockingWorkShardingFactor();
std::string ToString();
private:
struct NonBlockingQueue {
mutex queue_op_mu;
char pad[128];
Queue queue;
};
int32 non_blocking_work_sharding_factor_;
Eigen::MaxSizeVector<NonBlockingQueue*> non_blocking_work_queues_;
std::atomic<int64_t> blocking_inflight_;
std::atomic<int64_t> non_blocking_inflight_;
Queue blocking_work_queue_;
mutex blocking_queue_op_mu_;
char pad_[128];
mutex waiters_mu_;
Waiter queue_waiters_ TF_GUARDED_BY(waiters_mu_);
std::atomic<int64_t> traceme_id_;
mutex run_handler_waiter_mu_;
uint64 version_ TF_GUARDED_BY(run_handler_waiter_mu_);
mutex* sub_thread_pool_waiter_mu_ TF_GUARDED_BY(run_handler_waiter_mu_);
Waiter* sub_thread_pool_waiter_ TF_GUARDED_BY(run_handler_waiter_mu_);
};
class RunHandlerThreadPool {
public:
struct PerThread {
constexpr PerThread() : pool(nullptr), thread_id(-1) {}
RunHandlerThreadPool* pool;
int thread_id;
};
RunHandlerThreadPool(int num_blocking_threads, int num_non_blocking_threads,
Env* env, const ThreadOptions& thread_options,
const string& name,
Eigen::MaxSizeVector<mutex>* waiters_mu,
Eigen::MaxSizeVector<Waiter>* queue_waiters);
~RunHandlerThreadPool();
void Start();
void StartOneThreadForTesting();
void AddWorkToQueue(ThreadWorkSource* tws, bool is_blocking,
std::function<void()> fn);
void SetThreadWorkSources(
int tid, int start_request_idx, uint64 version,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources);
PerThread* GetPerThread();
int CurrentThreadId() const;
int NumThreads() const;
int NumBlockingThreads() const;
int NumNonBlockingThreads() const;
void WorkerLoop(int thread_id, bool may_steal_blocking_work);
Task FindTask(
int searching_range_start, int searching_range_end, int thread_id,
int sub_thread_pool_id, int max_blocking_inflight,
bool may_steal_blocking_work,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources,
bool* task_from_blocking_queue, ThreadWorkSource** tws);
void WaitForWork(bool is_blocking, int thread_id,
int32_t max_blocking_inflight);
void WaitForWorkInSubThreadPool(bool is_blocking, int sub_thread_pool_id);
private:
struct ThreadData {
ThreadData();
mutex mu;
uint64 new_version;
condition_variable sources_not_empty;
std::unique_ptr<Thread> thread;
int current_index;
std::unique_ptr<Eigen::MaxSizeVector<ThreadWorkSource*>>
new_thread_work_sources TF_GUARDED_BY(mu);
uint64 current_version;
std::unique_ptr<Eigen::MaxSizeVector<ThreadWorkSource*>>
current_thread_work_sources;
int sub_thread_pool_id;
};
const int num_threads_;
const int num_blocking_threads_;
const int num_non_blocking_threads_;
Eigen::MaxSizeVector<ThreadData> thread_data_;
internal::RunHandlerEnvironment env_;
std::atomic<bool> cancelled_;
string name_;
Eigen::MaxSizeVector<mutex>* waiters_mu_;
Eigen::MaxSizeVector<Waiter>* queue_waiters_;
bool use_sub_thread_pool_;
std::vector<int> num_threads_in_sub_thread_pool_;
std::vector<double> sub_thread_pool_start_request_percentage_;
std::vector<double> sub_thread_pool_end_request_percentage_;
};
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/framework/run_handler.h"
#include <algorithm>
#include <cmath>
#include <list>
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/run_handler_util.h"
#include "tensorflow/core/lib/core/threadpool_interface.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/denormal.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/numa.h"
#include "tensorflow/core/platform/setround.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tsl/platform/tracing.h"
namespace tensorflow {
namespace {
static constexpr int32_t kMaxConcurrentHandlers = 128;
typedef typename internal::RunHandlerEnvironment::Task Task;
typedef Eigen::RunQueue<Task, 1024> Queue;
}
namespace internal {
RunHandlerEnvironment::RunHandlerEnvironment(
Env* env, const ThreadOptions& thread_options, const string& name)
: env_(env), thread_options_(thread_options), name_(name) {}
RunHandlerEnvironment::EnvThread* RunHandlerEnvironment::CreateThread(
std::function<void()> f, const std::string& thread_name) {
return env_->StartThread(thread_options_, thread_name, [=]() {
port::ScopedFlushDenormal flush;
port::ScopedSetRound round(FE_TONEAREST);
if (thread_options_.numa_node != port::kNUMANoAffinity) {
port::NUMASetThreadNodeAffinity(thread_options_.numa_node);
}
f();
});
}
RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask(
std::function<void()> f) {
uint64 id = 0;
if (tsl::tracing::EventCollector::IsEnabled()) {
id = tsl::tracing::GetUniqueArg();
tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure,
id);
}
return Task{
std::unique_ptr<TaskImpl>(new TaskImpl{
std::move(f),
Context(ContextKind::kThread),
id,
}),
};
}
void RunHandlerEnvironment::ExecuteTask(const Task& t) {
WithContext wc(t.f->context);
tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure,
t.f->trace_id);
t.f->f();
}
void WaitOnWaiter(Waiter* waiter, Waiter* queue_head, mutex* mutex,
int max_sleep_micros) {
{
mutex_lock l(*mutex);
CHECK_EQ(waiter->next, waiter);
CHECK_EQ(waiter->prev, waiter);
waiter->prev = queue_head;
waiter->next = queue_head->next;
waiter->next->prev = waiter;
waiter->prev->next = waiter;
}
{
mutex_lock l(waiter->mu);
waiter->cv.wait_for(l, std::chrono::microseconds(max_sleep_micros));
}
mutex_lock l(*mutex);
if (waiter->next != waiter) {
CHECK(waiter->prev != waiter);
waiter->next->prev = waiter->prev;
waiter->prev->next = waiter->next;
waiter->next = waiter;
waiter->prev = waiter;
} else {
CHECK_EQ(waiter->prev, waiter);
}
}
ThreadWorkSource::ThreadWorkSource()
: non_blocking_work_sharding_factor_(
static_cast<int32>(ParamFromEnvWithDefault(
"TF_RUN_HANDLER_NUM_OF_NON_BLOCKING_QUEUES", 1))),
non_blocking_work_queues_(non_blocking_work_sharding_factor_),
blocking_inflight_(0),
non_blocking_inflight_(0),
traceme_id_(0),
version_(0),
sub_thread_pool_waiter_(nullptr) {
queue_waiters_.next = &queue_waiters_;
queue_waiters_.prev = &queue_waiters_;
for (int i = 0; i < NonBlockingWorkShardingFactor(); ++i) {
non_blocking_work_queues_.emplace_back(new NonBlockingQueue());
}
}
ThreadWorkSource::~ThreadWorkSource() {
for (int i = 0; i < non_blocking_work_queues_.size(); ++i) {
delete non_blocking_work_queues_[i];
}
}
Task ThreadWorkSource::EnqueueTask(Task t, bool is_blocking) {
mutex* mu = nullptr;
Queue* task_queue = nullptr;
thread_local int64_t closure_counter = 0;
if (!is_blocking) {
int queue_index = ++closure_counter % non_blocking_work_sharding_factor_;
task_queue = &(non_blocking_work_queues_[queue_index]->queue);
mu = &non_blocking_work_queues_[queue_index]->queue_op_mu;
} else {
task_queue = &blocking_work_queue_;
mu = &blocking_queue_op_mu_;
}
{
mutex_lock l(*mu);
t = task_queue->PushFront(std::move(t));
}
Waiter* w = nullptr;
static const bool use_sub_thread_pool =
ParamFromEnvBoolWithDefault("TF_RUN_HANDLER_USE_SUB_THREAD_POOL", false);
Waiter* waiter_queue;
mutex* waiter_queue_mu;
if (use_sub_thread_pool) {
tf_shared_lock lock(run_handler_waiter_mu_);
waiter_queue = sub_thread_pool_waiter_;
waiter_queue_mu = sub_thread_pool_waiter_mu_;
} else {
waiter_queue = &queue_waiters_;
waiter_queue_mu = &waiters_mu_;
}
{
mutex_lock l(*waiter_queue_mu);
if (waiter_queue->next != waiter_queue) {
w = waiter_queue->next;
CHECK(w->prev != w);
CHECK(w->next != w);
w->next->prev = w->prev;
w->prev->next = w->next;
w->next = w;
w->prev = w;
}
}
if (w != nullptr) {
w->cv.notify_one();
}
VLOG(3) << "Added " << (is_blocking ? "inter" : "intra") << " work from "
<< traceme_id_.load(std::memory_order_relaxed);
return t;
}
Task ThreadWorkSource::PopBlockingTask() {
return blocking_work_queue_.PopBack();
}
Task ThreadWorkSource::PopNonBlockingTask(int start_index,
bool search_from_all_queue) {
Task t;
unsigned sharding_factor = NonBlockingWorkShardingFactor();
for (unsigned j = 0; j < sharding_factor; ++j) {
t = non_blocking_work_queues_[(start_index + j) % sharding_factor]
->queue.PopBack();
if (t.f) {
return t;
}
if (!search_from_all_queue) {
break;
}
}
return t;
}
void ThreadWorkSource::WaitForWork(int max_sleep_micros) {
thread_local Waiter waiter;
WaitOnWaiter(&waiter, &queue_waiters_, &waiters_mu_, max_sleep_micros);
}
int ThreadWorkSource::TaskQueueSize(bool is_blocking) {
if (is_blocking) {
return blocking_work_queue_.Size();
} else {
unsigned total_size = 0;
for (int i = 0; i < non_blocking_work_sharding_factor_; ++i) {
total_size += non_blocking_work_queues_[i]->queue.Size();
}
return total_size;
}
}
int64_t ThreadWorkSource::GetTracemeId() {
return traceme_id_.load(std::memory_order_relaxed);
}
void ThreadWorkSource::SetTracemeId(int64_t value) { traceme_id_ = value; }
void ThreadWorkSource::SetWaiter(uint64 version, Waiter* waiter, mutex* mutex) {
{
tf_shared_lock lock(run_handler_waiter_mu_);
if (sub_thread_pool_waiter_ == waiter) {
return;
}
if (version_ > version) {
return;
}
}
mutex_lock l(run_handler_waiter_mu_);
sub_thread_pool_waiter_ = waiter;
sub_thread_pool_waiter_mu_ = mutex;
version_ = version;
}
int64_t ThreadWorkSource::GetInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
return counter->load(std::memory_order_relaxed);
}
void ThreadWorkSource::IncrementInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
counter->fetch_add(1, std::memory_order_relaxed);
}
void ThreadWorkSource::DecrementInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
counter->fetch_sub(1, std::memory_order_relaxed);
}
unsigned ThreadWorkSource::NonBlockingWorkShardingFactor() {
return non_blocking_work_sharding_factor_;
}
std::string ThreadWorkSource::ToString() {
return strings::StrCat("traceme_id = ", GetTracemeId(),
", inter queue size = ", TaskQueueSize(true),
", inter inflight = ", GetInflightTaskCount(true),
", intra queue size = ", TaskQueueSize(false),
", intra inflight = ", GetInflightTaskCount(false));
}
RunHandlerThreadPool::RunHandlerThreadPool(
int num_blocking_threads, int num_non_blocking_threads, Env* env,
const ThreadOptions& thread_options, const string& name,
Eigen::MaxSizeVector<mutex>* waiters_mu,
Eigen::MaxSizeVector<Waiter>* queue_waiters)
: num_threads_(num_blocking_threads + num_non_blocking_threads),
num_blocking_threads_(num_blocking_threads),
num_non_blocking_threads_(num_non_blocking_threads),
thread_data_(num_threads_),
env_(env, thread_options, name),
name_(name),
waiters_mu_(waiters_mu),
queue_waiters_(queue_waiters),
use_sub_thread_pool_(ParamFromEnvBoolWithDefault(
"TF_RUN_HANDLER_USE_SUB_THREAD_POOL", false)),
num_threads_in_sub_thread_pool_(ParamFromEnvWithDefault(
"TF_RUN_HANDLER_NUM_THREADS_IN_SUB_THREAD_POOL",
std::vector<int>({num_blocking_threads / 2,
num_blocking_threads - num_blocking_threads / 2}))),
sub_thread_pool_start_request_percentage_(ParamFromEnvWithDefault(
"TF_RUN_HANDLER_SUB_THREAD_POOL_START_REQUEST_PERCENTAGE",
std::vector<double>({0, 0.4}))),
sub_thread_pool_end_request_percentage_(ParamFromEnvWithDefault(
"TF_RUN_HANDLER_SUB_THREAD_POOL_END_REQUEST_PERCENTAGE",
std::vector<double>({0.4, 1}))) {
thread_data_.resize(num_threads_);
VLOG(1) << "Creating RunHandlerThreadPool " << name << " with "
<< num_blocking_threads_ << " blocking threads and "
<< num_non_blocking_threads_ << " non-blocking threads.";
}
RunHandlerThreadPool::~RunHandlerThreadPool() {
VLOG(1) << "Exiting RunHandlerThreadPool " << name_;
cancelled_ = true;
for (size_t i = 0; i < thread_data_.size(); ++i) {
{
mutex_lock l(thread_data_[i].mu);
thread_data_[i].sources_not_empty.notify_all();
}
thread_data_[i].thread.reset();
}
}
void RunHandlerThreadPool::Start() {
cancelled_ = false;
int num_blocking_threads = num_blocking_threads_;
for (int i = 0; i < num_threads_; i++) {
int sub_thread_pool_id = num_threads_in_sub_thread_pool_.size() - 1;
for (int j = 0; j < num_threads_in_sub_thread_pool_.size(); ++j) {
if (i < num_threads_in_sub_thread_pool_[j]) {
sub_thread_pool_id = j;
break;
}
}
thread_data_[i].sub_thread_pool_id = sub_thread_pool_id;
const bool is_blocking_thread = (i < num_blocking_threads) ? true : false;
thread_data_[i].thread.reset(env_.CreateThread(
[this, is_blocking_thread, i, sub_thread_pool_id]() {
WorkerLoop(i, is_blocking_thread);
},
is_blocking_thread
? strings::StrCat(name_, "_blocking_thread_", sub_thread_pool_id)
: strings::StrCat(name_, "_non_blocking_thread")));
}
}
void RunHandlerThreadPool::StartOneThreadForTesting() {
cancelled_ = false;
thread_data_[0].sub_thread_pool_id = 0;
thread_data_[0].thread.reset(
env_.CreateThread([this]() { WorkerLoop(0, true); }, name_));
}
void RunHandlerThreadPool::AddWorkToQueue(ThreadWorkSource* tws,
bool is_blocking,
std::function<void()> fn) {
Task t = env_.CreateTask(std::move(fn));
t = tws->EnqueueTask(std::move(t), is_blocking);
if (t.f) {
VLOG(3) << "Running " << (is_blocking ? "inter" : "intra") << " work for "
<< tws->GetTracemeId();
env_.ExecuteTask(t);
}
}
void RunHandlerThreadPool::SetThreadWorkSources(
int tid, int start_request_idx, uint64 version,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources) {
mutex_lock l(thread_data_[tid].mu);
if (version > thread_data_[tid].new_version) {
thread_data_[tid].new_version = version;
} else {
return;
}
thread_data_[tid].new_thread_work_sources->resize(0);
if (use_sub_thread_pool_) {
for (int i = 0; i < thread_work_sources.size(); ++i) {
thread_data_[tid].new_thread_work_sources->emplace_back(
thread_work_sources[i]);
}
} else {
thread_data_[tid].new_thread_work_sources->emplace_back(
thread_work_sources[start_request_idx]);
static const int num_shards =
ParamFromEnvWithDefault("TF_RUN_HANDLER_QUEUE_SHARDS", 1);
int token = tid % num_shards;
for (int i = 0; i < num_shards; ++i) {
for (int j = token; j < thread_work_sources.size(); j += num_shards) {
if (j != start_request_idx) {
thread_data_[tid].new_thread_work_sources->emplace_back(
thread_work_sources[j]);
}
}
token = (token + 1) % num_shards;
}
thread_data_[tid].sources_not_empty.notify_all();
}
}
RunHandlerThreadPool::PerThread* RunHandlerThreadPool::GetPerThread() {
thread_local RunHandlerThreadPool::PerThread per_thread_;
RunHandlerThreadPool::PerThread* pt = &per_thread_;
return pt;
}
int RunHandlerThreadPool::CurrentThreadId() const {
const PerThread* pt = const_cast<RunHandlerThreadPool*>(this)->GetPerThread();
if (pt->pool == this) {
return pt->thread_id;
} else {
return -1;
}
}
int RunHandlerThreadPool::NumThreads() const { return num_threads_; }
int RunHandlerThreadPool::NumBlockingThreads() const {
return num_blocking_threads_;
}
int RunHandlerThreadPool::NumNonBlockingThreads() const {
return num_non_blocking_threads_;
}
RunHandlerThreadPool::ThreadData::ThreadData()
: new_version(0),
current_index(0),
new_thread_work_sources(
new Eigen::MaxSizeVector<ThreadWorkSource*>(static_cast<int32>(
ParamFromEnvWithDefault("TF_RUN_HANDLER_MAX_CONCURRENT_HANDLERS",
kMaxConcurrentHandlers)))),
current_version(0),
current_thread_work_sources(
new Eigen::MaxSizeVector<ThreadWorkSource*>(static_cast<int32>(
ParamFromEnvWithDefault("TF_RUN_HANDLER_MAX_CONCURRENT_HANDLERS",
kMaxConcurrentHandlers)))) {}
Task RunHandlerThreadPool::FindTask(
int searching_range_start, int searching_range_end, int thread_id,
int sub_thread_pool_id, int max_blocking_inflight,
bool may_steal_blocking_work,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources,
bool* task_from_blocking_queue, ThreadWorkSource** tws) {
Task t;
int current_index = thread_data_[thread_id].current_index;
*task_from_blocking_queue = false;
for (int i = 0; i < searching_range_end - searching_range_start; ++i) {
if (current_index >= searching_range_end ||
current_index < searching_range_start) {
current_index = searching_range_start;
}
*tws = thread_work_sources[current_index];
++current_index;
if (may_steal_blocking_work &&
(*tws)->GetInflightTaskCount(true) < max_blocking_inflight) {
t = (*tws)->PopBlockingTask();
if (t.f) {
*task_from_blocking_queue = true;
break;
}
}
t = (*tws)->PopNonBlockingTask(thread_id, true);
if (t.f) {
break;
}
}
thread_data_[thread_id].current_index = current_index;
return t;
}
void RunHandlerThreadPool::WorkerLoop(int thread_id,
bool may_steal_blocking_work) {
PerThread* pt = GetPerThread();
pt->pool = this;
pt->thread_id = thread_id;
static constexpr int32_t kMaxBlockingInflight = 10;
while (!cancelled_) {
Task t;
ThreadWorkSource* tws = nullptr;
bool task_from_blocking_queue = true;
int sub_thread_pool_id;
{
mutex_lock l(thread_data_[thread_id].mu);
if (thread_data_[thread_id].current_version <
thread_data_[thread_id].new_version) {
thread_data_[thread_id].current_version =
thread_data_[thread_id].new_version;
thread_data_[thread_id].current_thread_work_sources.swap(
thread_data_[thread_id].new_thread_work_sources);
}
}
Eigen::MaxSizeVector<ThreadWorkSource*>* thread_work_sources =
thread_data_[thread_id].current_thread_work_sources.get();
if (use_sub_thread_pool_) {
sub_thread_pool_id = thread_data_[thread_id].sub_thread_pool_id;
int active_requests = thread_work_sources->size();
if (may_steal_blocking_work) {
int search_range_start =
active_requests *
sub_thread_pool_start_request_percentage_[sub_thread_pool_id];
int search_range_end =
active_requests *
sub_thread_pool_end_request_percentage_[sub_thread_pool_id];
search_range_end =
std::min(active_requests,
std::max(search_range_end, search_range_start + 1));
t = FindTask(search_range_start, search_range_end, thread_id,
sub_thread_pool_id, kMaxBlockingInflight,
true, *thread_work_sources,
&task_from_blocking_queue, &tws);
if (!t.f) {
t = FindTask(0, active_requests, thread_id, sub_thread_pool_id,
kMaxBlockingInflight,
true, *thread_work_sources,
&task_from_blocking_queue, &tws);
}
} else {
t = FindTask(0, active_requests, thread_id, sub_thread_pool_id,
kMaxBlockingInflight,
false, *thread_work_sources,
&task_from_blocking_queue, &tws);
}
} else {
for (int i = 0; i < thread_work_sources->size(); ++i) {
tws = (*thread_work_sources)[i];
if (may_steal_blocking_work &&
tws->GetInflightTaskCount(true) < kMaxBlockingInflight) {
t = tws->PopBlockingTask();
if (t.f) {
break;
}
}
if (i == 0) {
t = tws->PopNonBlockingTask(thread_id, true);
if (t.f) {
task_from_blocking_queue = false;
break;
}
if (t.f) {
break;
}
} else {
t = tws->PopNonBlockingTask(thread_id, false);
if (t.f) {
task_from_blocking_queue = false;
break;
}
}
}
}
if (t.f) {
tsl::profiler::TraceMe activity(
[=] {
return strings::StrCat(task_from_blocking_queue ? "inter" : "intra",
" #id = ", tws->GetTracemeId(), " ",
thread_id, "#");
}, | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/run_handler.h"
#include <memory>
#include <vector>
#define EIGEN_USE_THREADS
#include "absl/memory/memory.h"
#include "absl/synchronization/barrier.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(RunHandlerUtilTest, TestBasicScheduling) {
int num_threads = 2;
int num_handlers = 10;
std::unique_ptr<RunHandlerPool> pool(
new RunHandlerPool(num_threads, num_threads));
absl::Barrier barrier(num_threads);
BlockingCounter counter(2 * num_handlers * num_threads);
thread::ThreadPool test_pool(Env::Default(), "test", num_handlers);
for (int i = 0; i < num_handlers; ++i) {
test_pool.Schedule([&counter, &barrier, &pool, i, num_threads]() {
auto handler = pool->Get(i);
BlockingCounter local_counter(2 * num_threads);
auto intra_thread_pool = handler->AsIntraThreadPoolInterface();
for (int j = 0; j < num_threads; ++j) {
handler->ScheduleInterOpClosure(
[&local_counter, &counter, &barrier, i]() {
if (i == 2) {
barrier.Block();
}
counter.DecrementCount();
local_counter.DecrementCount();
});
intra_thread_pool->Schedule([&local_counter, &counter]() {
counter.DecrementCount();
local_counter.DecrementCount();
});
}
local_counter.Wait();
});
}
counter.Wait();
}
TEST(RunHandlerUtilTest, PrioritySchedulingTest) {
int num_threads = 2;
std::unique_ptr<RunHandlerPool> pool(
new RunHandlerPool(num_threads, num_threads));
RunOptions::Experimental::RunHandlerPoolOptions options =
RunOptions::Experimental::RunHandlerPoolOptions();
options.set_priority(2);
auto handler1 = pool->Get(1, 0, options);
options.set_priority(1);
auto handler2 = pool->Get(2, 0, options);
options.set_priority(3);
auto handler3 = pool->Get(3, 0, options);
std::vector<int64_t> sorted_active_list =
pool->GetActiveHandlerPrioritiesForTesting();
EXPECT_EQ(sorted_active_list.size(), 3);
EXPECT_EQ(sorted_active_list[0], 3);
EXPECT_EQ(sorted_active_list[1], 2);
EXPECT_EQ(sorted_active_list[2], 1);
handler1.reset();
options.set_priority(5);
auto handler4 = pool->Get(4, 0, options);
options.set_priority(4);
auto handler5 = pool->Get(5, 0, options);
sorted_active_list = pool->GetActiveHandlerPrioritiesForTesting();
EXPECT_EQ(sorted_active_list.size(), 4);
EXPECT_EQ(sorted_active_list[0], 5);
EXPECT_EQ(sorted_active_list[1], 4);
EXPECT_EQ(sorted_active_list[2], 3);
EXPECT_EQ(sorted_active_list[3], 1);
}
TEST(RunHandlerThreadPool, EnqueueTask) {
Eigen::MaxSizeVector<mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool run_handler_thread_pool(
0, 0,
Env::Default(), ThreadOptions(), "tf_run_handler_pool", &waiters_mu,
&waiters);
internal::ThreadWorkSource tws;
int result = 0;
std::function<void()> fn = [&result] { result = 1; };
std::function<void()> fn2 = [&result] { result = 2; };
run_handler_thread_pool.AddWorkToQueue(&tws, true, fn);
EXPECT_EQ(tws.TaskQueueSize(true), 1);
run_handler_thread_pool.AddWorkToQueue(&tws, true, fn2);
EXPECT_EQ(tws.TaskQueueSize(true), 2);
tws.PopBlockingTask().f->f();
EXPECT_EQ(result, 1);
tws.PopBlockingTask().f->f();
EXPECT_EQ(result, 2);
run_handler_thread_pool.AddWorkToQueue(&tws, false, fn);
EXPECT_EQ(tws.TaskQueueSize(false), 1);
run_handler_thread_pool.AddWorkToQueue(&tws, false, fn2);
EXPECT_EQ(tws.TaskQueueSize(false), 2);
tws.PopNonBlockingTask(0, true).f->f();
EXPECT_EQ(result, 1);
tws.PopNonBlockingTask(0, true).f->f();
EXPECT_EQ(result, 2);
}
TEST(RunHandlerThreadPool, FindTask) {
Eigen::MaxSizeVector<mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool run_handler_thread_pool(
1, 0,
Env::Default(), ThreadOptions(), "tf_run_handler_pool", &waiters_mu,
&waiters);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(5);
thread_work_sources.resize(5);
for (int i = 0; i < 5; ++i) {
thread_work_sources[i] = new internal::ThreadWorkSource();
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[3],
true,
[&result] { result = 3; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[3],
true,
[&result] { result = 3; });
const auto find_blocking_task_from_all_handlers =
[&](bool* task_from_blocking_queue, internal::Task* t) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[3],
true,
[&result] { result = 3; });
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 3);
EXPECT_EQ(t.f, nullptr);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[3],
true,
[&result] { result = 3; });
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 3, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[3],
true,
[&result] { result = 3; });
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 3);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
false,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
const auto blocking_thread_find_task_from_all_handler =
[&](bool* task_from_blocking_queue, internal::Task* t) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
blocking_thread_find_task_from_all_handler(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
blocking_thread_find_task_from_all_handler(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, false);
t.f->f();
EXPECT_EQ(result, 2);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
false,
[&result] { result = 2; });
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
const auto find_task_from_all_handler = [&](bool* task_from_blocking_queue,
internal::Task* t,
bool is_blocking_thread) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
is_blocking_thread, thread_work_sources, task_from_blocking_queue,
&tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(task_from_blocking_queue, false);
t.f->f();
EXPECT_EQ(result, 2);
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(t.f, nullptr);
find_task_from_all_handler(&task_from_blocking_queue, &t,
true);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(thread_work_sources[2],
true,
[&result] { result = 2; });
const auto find_task_from_all_handler = [&](bool* task_from_blocking_queue,
internal::Task* t,
bool is_blocking_thread) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
is_blocking_thread, thread_work_sources, task_from_blocking_queue,
&tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(task_from_blocking_queue, false);
EXPECT_EQ(t.f, nullptr);
find_task_from_all_handler(&task_from_blocking_queue, &t,
true);
}
for (int i = 0; i < 5; ++i) {
delete thread_work_sources[i];
}
}
TEST(RunHandlerThreadPool, RoundRobinExecution) {
setenv("TF_RUN_HANDLER_USE_SUB_THREAD_POOL", "true", true);
setenv("TF_RUN_HANDLER_NUM_THREADS_IN_SUB_THREAD_POOL", "1", true);
setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_START_REQUEST_PERCENTAGE", "0", true);
setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_END_REQUEST_PERCENTAGE", "1", true);
Eigen::MaxSizeVector<mutex> waiters_mu(1);
waiters_mu.resize(1);
Eigen::MaxSizeVector<internal::Waiter> waiters(1);
waiters.resize(1);
internal::RunHandlerThreadPool* run_handler_thread_pool =
new internal::RunHandlerThreadPool(
1, 0,
Env::Default(), ThreadOptions(), "tf_run_handler_pool", &waiters_mu,
&waiters);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(3);
thread_work_sources.resize(3);
internal::ThreadWorkSource tws[3];
for (int i = 0; i < 3; ++i) {
tws[i].SetWaiter(1, &waiters[0], &waiters_mu[0]);
thread_work_sources[i] = &tws[i];
}
int result = 0;
mutex mu;
bool ok_to_execute = false;
bool ok_to_validate = false;
condition_variable function_start;
condition_variable function_end;
std::vector<std::function<void()>> fns;
for (int i = 0; i < 3; ++i) {
fns.push_back([&result, &mu, &function_start, &function_end, &ok_to_execute,
&ok_to_validate, i] {
mutex_lock l(mu);
while (!ok_to_execute) {
function_start.wait(l);
}
result = i;
ok_to_execute = false;
ok_to_validate = true;
function_end.notify_one();
});
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
fns[i]);
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
fns[i]);
}
run_handler_thread_pool->Start();
run_handler_thread_pool->SetThreadWorkSources(
0, 0, 1, thread_work_sources);
mutex_lock l(mu);
for (int round = 0; round < 2; ++round) {
for (int i = 0; i < 3; ++i) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i);
}
}
delete run_handler_thread_pool;
}
TEST(RunHandlerThreadPool, MultipleSubThreadPool) {
setenv("TF_RUN_HANDLER_USE_SUB_THREAD_POOL", "true", true);
setenv("TF_RUN_HANDLER_NUM_THREADS_IN_SUB_THREAD_POOL", "2", true);
setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_START_REQUEST_PERCENTAGE", "0,0.5",
true);
setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_END_REQUEST_PERCENTAGE", "0.5,1",
true);
Eigen::MaxSizeVector<mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool* run_handler_thread_pool =
new internal::RunHandlerThreadPool(
2, 0,
Env::Default(), ThreadOptions(), "tf_run_handler_pool", &waiters_mu,
&waiters);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(4);
thread_work_sources.resize(4);
internal::ThreadWorkSource tws[4];
for (int i = 0; i < 4; ++i) {
tws[i].SetWaiter(1, &waiters[i / 2], &waiters_mu[i / 2]);
thread_work_sources[i] = &tws[i];
}
int result = 0;
mutex mu;
bool ok_to_execute = false;
bool ok_to_validate = false;
condition_variable function_start;
condition_variable function_end;
std::vector<std::function<void()>> fns;
for (int i = 0; i < 4; ++i) {
fns.push_back([&result, &mu, &function_start, &function_end, &ok_to_execute,
&ok_to_validate, i] {
mutex_lock l(mu);
while (!ok_to_execute) {
function_start.wait(l);
}
result = i;
ok_to_execute = false;
ok_to_validate = true;
function_end.notify_one();
});
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
fns[i]);
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
fns[i]);
}
run_handler_thread_pool->StartOneThreadForTesting();
run_handler_thread_pool->SetThreadWorkSources(
0, 0, 1, thread_work_sources);
run_handler_thread_pool->SetThreadWorkSources(
1, 0, 1, thread_work_sources);
mutex_lock l(mu);
for (int round = 0; round < 2; ++round) {
for (int i = 0; i < 2; ++i) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i);
}
}
for (int i = 0; i < 2; ++i) {
for (int round = 0; round < 2; ++round) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i + 2);
}
}
delete run_handler_thread_pool;
}
SessionOptions DefaultSessionOptions() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
return options;
}
std::unique_ptr<Session> CreateSession() {
return std::unique_ptr<Session>(NewSession(DefaultSessionOptions()));
}
class RunHandlerTest : public ::testing::Test {
public:
void Initialize(std::initializer_list<float> a_values) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, a_values);
Node* a = test::graph::Constant(&graph, a_tensor);
a->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
a_ = a->name();
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {1, 1});
Node* x = test::graph::Constant(&graph, x_tensor);
x->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
x_ = x->name();
Node* y = test::graph::Matmul(&graph, a, x, false, false);
y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
y_ = y->name();
Node* y_neg = test::graph::Unary(&graph, "Neg", y);
y_neg_ = y_neg->name();
y_neg->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
Node* z = test::graph::Unary(&graph, "Identity", y_neg);
z_ = z->name();
z->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
graph.ToGraphDef(&def_);
ASSERT_EQ(setenv("TF_RUN_HANDLER_NUM_SUB_THREAD_POOL", "2", true), 0);
ASSERT_EQ(
setenv("TF_RUN_HANDLER_NUM_THREADS_IN_SUB_THREAD_POOL", "8,8", true),
0);
ASSERT_EQ(setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_START_REQUEST_PERCENTAGE",
"0,0.4", true),
0);
ASSERT_EQ(setenv("TF_RUN_HANDLER_SUB_THREAD_POOL_END_REQUEST_PERCENTAGE",
"0.4,1", true),
0);
ASSERT_EQ(setenv("TF_NUM_INTEROP_THREADS", "16", true), 0);
}
string a_;
string x_;
string y_;
string y_neg_;
string z_;
GraphDef def_;
};
TEST_F(RunHandlerTest, UseRunHandlerPoolEnableSubPool) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
EXPECT_EQ(absl::OkStatus(), session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.mutable_experimental()->set_use_run_handler_pool(true);
Status s = session->Run(run_options, inputs, output_names, target_nodes,
&outputs, nullptr);
EXPECT_EQ(absl::OkStatus(), s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
TEST_F(RunHandlerTest, TestConcurrencyUseRunHandlerPool) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
EXPECT_EQ(absl::OkStatus(), session->Create(def_));
RunOptions run_options;
run_options.mutable_experimental()->set_use_run_handler_pool(true);
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
std::vector<string> output_names = {y_ + ":0"};
auto fn = [&session, output_names, run_options]() {
for (int i = 0; i < 1000; ++i) {
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
Status s = session->Run(run_options, inputs, output_names, {}, &outputs,
nullptr);
EXPECT_EQ(absl::OkStatus(), s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(3.0, mat(0, 0));
}
};
for (int i = 0; i < 4; ++i) {
tp->Schedule(fn);
}
delete tp;
}
TEST_F(RunHandlerTest, UseRunHandlerPoolEnableSubPoolWithPriority) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
EXPECT_EQ(absl::OkStatus(), session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.mutable_experimental()->set_use_run_handler_pool(true);
run_options.mutable_experimental()
->mutable_run_handler_pool_options()
->set_priority(1);
Status s = session->Run(run_options, inputs, output_names, target_nodes,
&outputs, nullptr);
EXPECT_EQ(absl::OkStatus(), s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
TEST_F(RunHandlerTest, TestConcurrencyUseRunHandlerPoolWithPriority) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
EXPECT_EQ(absl::OkStatus(), session->Create(def_));
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
std::vector<string> output_names = {y_ + ":0"};
auto fn = [&session, output_names]() {
for (int i = 0; i < 1000; ++i) {
RunOptions run_options;
run_options.mutable_experimental()->set_use_run_handler_pool(true);
run_options.mutable_experimental()
->mutable_run_handler_pool_options()
->set_priority(i % 4);
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
Status s = session->Run(run_options, inputs, output_names, {}, &outputs,
nullptr);
EXPECT_EQ(absl::OkStatus(), s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(3.0, mat(0, 0));
}
};
for (int i = 0; i < 4; ++i) {
tp->Schedule(fn);
}
delete tp;
}
TEST_F(RunHandlerTest, TestWaitTimeout) {
std::unique_ptr<RunHandlerPool> pool(new RunHandlerPool(1, 1));
std::vector<std::unique_ptr<RunHandler>> blocking_handles;
const int32_t kMaxConcurrentHandlers = 128;
blocking_handles.reserve(kMaxConcurrentHandlers);
for (int i = 0; i < kMaxConcurrentHandlers; ++i) {
blocking_handles.push_back(pool->Get(i));
}
auto null_handle = pool->Get(128, 1);
EXPECT_EQ(null_handle.get(), nullptr);
auto tp = std::make_unique<thread::ThreadPool>(Env::Default(), "test", 4);
std::atomic<int64_t> release_time;
tp->Schedule([&blocking_handles, &release_time]() {
Env::Default()->SleepForMicroseconds(5000);
release_time = EnvTime::NowNanos();
blocking_handles[0].reset();
});
auto next_handle = pool->Get(129, 0);
EXPECT_GT(EnvTime::NowNanos(), release_time);
EXPECT_NE(next_handle.get(), nullptr);
}
}
} |
1,339 | cpp | tensorflow/tensorflow | tf_threadpool_concurrent_work_queue | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc | #ifndef TENSORFLOW_CORE_TFRT_RUNTIME_TF_THREADPOOL_CONCURRENT_WORK_QUEUE_H_
#define TENSORFLOW_CORE_TFRT_RUNTIME_TF_THREADPOOL_CONCURRENT_WORK_QUEUE_H_
#include <memory>
#include <optional>
#include <string>
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace tfrt_stub {
class TfThreadPoolWorkQueue : public WorkQueueInterface {
public:
TfThreadPoolWorkQueue(
tensorflow::thread::ThreadPoolInterface* intra_op_threadpool,
tensorflow::thread::ThreadPoolInterface* inter_op_threadpool)
: TfThreadPoolWorkQueue(0, intra_op_threadpool,
inter_op_threadpool) {}
TfThreadPoolWorkQueue(
int64_t id, tensorflow::thread::ThreadPoolInterface* intra_op_threadpool,
tensorflow::thread::ThreadPoolInterface* inter_op_threadpool)
: WorkQueueInterface(id, intra_op_threadpool),
intra_op_threadpool_(intra_op_threadpool),
inter_op_threadpool_(inter_op_threadpool) {}
absl::StatusOr<std::unique_ptr<WorkQueueInterface>> InitializeRequest(
int64_t request_id) const override;
int GetParallelismLevel() const override {
return inter_op_threadpool_->NumThreads();
}
std::string name() const override { return "TfThreadPoolWorkQueue"; }
void AddTask(tfrt::TaskFunction work) override;
std::optional<tfrt::TaskFunction> AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) override;
ABSL_DEPRECATED("Use the destructor instead.")
void Quiesce() override;
void Await(
tfrt::ArrayRef<::tfrt::RCReference<::tfrt::AsyncValue>> values) override;
bool IsInWorkerThread() const override;
private:
tensorflow::thread::ThreadPoolInterface* intra_op_threadpool_ = nullptr;
tensorflow::thread::ThreadPoolInterface* inter_op_threadpool_ = nullptr;
};
std::unique_ptr<TfThreadPoolWorkQueue> CreateDefaultTfThreadPoolWorkQueue(
int num_inter_op_threads, int num_intra_op_threads);
}
}
#endif
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <utility>
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
using ::tensorflow::thread::ThreadPoolInterface;
absl::StatusOr<std::unique_ptr<WorkQueueInterface>>
TfThreadPoolWorkQueue::InitializeRequest(int64_t request_id) const {
return {std::make_unique<TfThreadPoolWorkQueue>(
request_id, intra_op_threadpool_, inter_op_threadpool_)};
}
void TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) {
auto* copy = new tfrt::TaskFunction(
tensorflow::tfrt_stub::WrapWork(id(), "inter", std::move(work)));
inter_op_threadpool_->Schedule([copy] {
(*copy)();
delete copy;
});
}
std::optional<tfrt::TaskFunction> TfThreadPoolWorkQueue::AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) {
AddTask(std::move(work));
return std::nullopt;
}
void TfThreadPoolWorkQueue::Quiesce() {
}
void TfThreadPoolWorkQueue::Await(
tfrt::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) {
tfrt::latch values_remaining(values.size());
for (auto& value : values) {
value->AndThen([&values_remaining]() { values_remaining.count_down(); });
}
values_remaining.wait();
}
bool TfThreadPoolWorkQueue::IsInWorkerThread() const {
return true;
}
std::unique_ptr<TfThreadPoolWorkQueue> CreateDefaultTfThreadPoolWorkQueue(
int num_inter_op_threads, int num_intra_op_threads) {
struct ThreadPools {
TfThreadPool inter_op_threadpool;
TfThreadPool intra_op_threadpool;
ThreadPools(int num_inter_op_threads, int num_intra_op_threads)
: inter_op_threadpool("default_work_queue_inter", num_inter_op_threads),
intra_op_threadpool("default_work_queue_intra",
num_intra_op_threads) {}
};
class Wrapper : public TfThreadPoolWorkQueue {
public:
explicit Wrapper(std::unique_ptr<ThreadPools> thread_pools)
: TfThreadPoolWorkQueue(
&thread_pools->intra_op_threadpool,
&thread_pools->inter_op_threadpool),
thread_pools_(std::move(thread_pools)) {}
~Wrapper() override = default;
private:
std::unique_ptr<ThreadPools> thread_pools_;
};
return std::make_unique<Wrapper>(std::make_unique<ThreadPools>(
num_inter_op_threads, num_intra_op_threads));
}
}
} | #include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
const int32_t kNumThreads = 2;
class TfThreadpoolWorkQueueTest : public ::testing::Test {
protected:
TfThreadpoolWorkQueueTest()
: tf_threadpool_cwq_(CreateDefaultTfThreadPoolWorkQueue(
kNumThreads,
kNumThreads)) {}
std::unique_ptr<TfThreadPoolWorkQueue> tf_threadpool_cwq_;
};
TEST_F(TfThreadpoolWorkQueueTest, GetParallelismLevelOk) {
EXPECT_GT(tf_threadpool_cwq_->GetParallelismLevel(), 0);
}
TEST_F(TfThreadpoolWorkQueueTest, GetNameOk) {
EXPECT_EQ(tf_threadpool_cwq_->name(), "TfThreadPoolWorkQueue");
}
TEST_F(TfThreadpoolWorkQueueTest, InitializeRequestOk) {
tfrt::RequestContextBuilder ctx_builder(nullptr,
nullptr);
auto queue = tf_threadpool_cwq_->InitializeRequest(0);
TF_ASSERT_OK(queue.status());
EXPECT_NE(*queue, nullptr);
EXPECT_NE((*queue)->GetIntraOpThreadPool(), nullptr);
}
TEST_F(TfThreadpoolWorkQueueTest, IsInWorkerThreadOk) {
EXPECT_TRUE(tf_threadpool_cwq_->IsInWorkerThread());
}
TEST_F(TfThreadpoolWorkQueueTest, RunningBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningNonBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningMixedTask) {
tfrt::latch latch(20);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 20);
}
}
}
} |
1,340 | cpp | tensorflow/tensorflow | work_queue_interface | tensorflow/core/tfrt/runtime/work_queue_interface.cc | tensorflow/core/tfrt/runtime/work_queue_interface_test.cc | #ifndef TENSORFLOW_CORE_TFRT_RUNTIME_WORK_QUEUE_INTERFACE_H_
#define TENSORFLOW_CORE_TFRT_RUNTIME_WORK_QUEUE_INTERFACE_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/support/error_util.h"
namespace tensorflow {
namespace tfrt_stub {
class WorkQueueInterface : public tfrt::ConcurrentWorkQueue {
public:
WorkQueueInterface() = default;
explicit WorkQueueInterface(int64_t id) : id_(id) {}
explicit WorkQueueInterface(int64_t id,
thread::ThreadPoolInterface* intra_op_threadpool)
: id_(id), intra_op_threadpool_(intra_op_threadpool) {}
~WorkQueueInterface() override = 0;
int64_t id() const { return id_; }
thread::ThreadPoolInterface* GetIntraOpThreadPool() const {
return intra_op_threadpool_;
}
ABSL_DEPRECATED("Create the instance directly instead.")
virtual absl::StatusOr<std::unique_ptr<WorkQueueInterface>> InitializeRequest(
int64_t request_id) const {
return {nullptr};
}
private:
int64_t id_ = 0;
thread::ThreadPoolInterface* intra_op_threadpool_ = nullptr;
};
inline WorkQueueInterface::~WorkQueueInterface() = default;
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue);
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool);
template <typename Callable>
tfrt::TaskFunction WrapWork(int64_t id, absl::string_view name,
Callable&& work) {
tensorflow::Context context(tensorflow::ContextKind::kThread);
tsl::profiler::TraceMeProducer producer(
[&]() { return absl::StrCat("producer_", name); },
tsl::profiler::ContextType::kTfrtExecutor);
return tfrt::TaskFunction([traceme_id = producer.GetContextId(),
name = std::string(name),
context = std::move(context),
work = std::forward<Callable>(work)]() mutable {
tsl::profiler::TraceMeConsumer consumer(
[&]() { return absl::StrCat("consumer_", name); },
tsl::profiler::ContextType::kTfrtExecutor, traceme_id,
tsl::profiler::TraceMeLevel::kInfo);
tensorflow::WithContext wc(context);
std::forward<Callable>(work)();
});
}
}
}
#endif
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tfrt/host_context/execution_context.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
class DefaultWorkQueueWrapper : public WorkQueueInterface {
public:
explicit DefaultWorkQueueWrapper(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue)
: WorkQueueInterface(0),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(0, intra_thread_pool),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(int64_t request_id,
tfrt::ConcurrentWorkQueue* work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(request_id, intra_thread_pool),
work_queue_(work_queue) {}
~DefaultWorkQueueWrapper() override = default;
private:
std::string name() const override { return work_queue_->name(); }
void AddTask(tfrt::TaskFunction work) override {
work_queue_->AddTask(WrapWork(id(), "inter", std::move(work)));
}
std::optional<tfrt::TaskFunction> AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) override {
return work_queue_->AddBlockingTask(
WrapWork(id(), "blocking", std::move(work)), allow_queuing);
}
void Await(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) override {
work_queue_->Await(values);
}
void Quiesce() override { work_queue_->Quiesce(); }
int GetParallelismLevel() const override {
return work_queue_->GetParallelismLevel();
}
bool IsInWorkerThread() const override {
return work_queue_->IsInWorkerThread();
}
absl::StatusOr<std::unique_ptr<WorkQueueInterface>> InitializeRequest(
int64_t request_id) const override {
return {std::make_unique<DefaultWorkQueueWrapper>(request_id, work_queue_,
GetIntraOpThreadPool())};
}
private:
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue_owner_;
tfrt::ConcurrentWorkQueue* work_queue_ = nullptr;
};
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue));
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue),
intra_thread_pool);
}
}
} | #include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <thread>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(DefaultWorkQueueWrapperTest, Name) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->name(), work_queue_ptr->name());
}
TEST(DefaultWorkQueueWrapperTest, AddTask_OnlyTask) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
work_queue_wrapper->AddTask(
tfrt::TaskFunction([av] { av->emplace<int>(0); }));
work_queue_wrapper->Await(std::move(av));
}
TEST(DefaultWorkQueueWrapperTest, AddBlockingTask_TaskAndAllowQueueing) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
std::thread thread{[&] {
auto work = work_queue_wrapper->AddBlockingTask(
tfrt::TaskFunction([&] { av->emplace<int>(0); }),
true);
}};
work_queue_wrapper->Await(std::move(av));
thread.join();
}
TEST(DefaultWorkQueueWrapperTest, GetParallelismLevel) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->GetParallelismLevel(),
work_queue_ptr->GetParallelismLevel());
}
TEST(DefaultWorkQueueWrapperTest, IsInWorkerThread) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->IsInWorkerThread(),
work_queue_ptr->IsInWorkerThread());
}
TEST(DefaultWorkQueueWrapperTest, IntraOpThreadPool) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
TfThreadPool intra_op_thread_pool("tf_intra",
1);
auto work_queue_wrapper =
WrapDefaultWorkQueue(std::move(work_queue), &intra_op_thread_pool);
TF_ASSERT_OK_AND_ASSIGN(auto queue, work_queue_wrapper->InitializeRequest(
0));
EXPECT_NE(queue, nullptr);
EXPECT_EQ(queue->GetIntraOpThreadPool(), &intra_op_thread_pool);
}
}
}
} |
1,341 | cpp | tensorflow/tensorflow | stream | tensorflow/core/tfrt/runtime/stream.cc | third_party/xla/xla/stream_executor/stream_test.cc | #ifndef XLA_STREAM_EXECUTOR_STREAM_H_
#define XLA_STREAM_EXECUTOR_STREAM_H_
#include <cstdint>
#include <variant>
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
namespace stream_executor {
class StreamExecutor;
class Stream {
public:
struct PlatformSpecificHandle {
void *stream = nullptr;
};
virtual ~Stream() = default;
virtual PlatformSpecificHandle platform_specific_handle() const = 0;
virtual bool ok() const = 0;
virtual absl::Status RefreshStatus() {
return absl::UnimplementedError(
"RefreshStatus is not supported on this stream.");
}
virtual absl::StatusOr<Stream *> GetOrCreateSubStream() = 0;
virtual void ReturnSubStream(Stream *sub_stream) = 0;
template <typename... Params, typename... Args>
absl::Status ThenLaunch(ThreadDim thread_dims, BlockDim block_dims,
const TypedKernel<Params...> &kernel, Args... args);
template <typename... Params, typename... Args>
absl::Status ThenLaunch(ThreadDim thread_dims, BlockDim block_dims,
int32_t shmem_bytes,
const TypedKernel<Params...> &kernel, Args... args);
virtual absl::Status WaitFor(Stream *other) = 0;
virtual absl::Status WaitFor(Event *event) = 0;
virtual absl::Status RecordEvent(Event *event) = 0;
virtual absl::Status Memcpy(void *host_dst, const DeviceMemoryBase &gpu_src,
uint64_t size) = 0;
virtual absl::Status Memcpy(DeviceMemoryBase *gpu_dst, const void *host_src,
uint64_t size) = 0;
template <typename T>
absl::Status MemcpyD2H(const DeviceMemory<T> &gpu_src,
absl::Span<T> host_dst) {
auto host_size = host_dst.size() * sizeof(T);
if (gpu_src.size() == 0 || host_size >= gpu_src.size()) {
return Memcpy(host_dst.begin(), gpu_src, host_size);
}
return absl::InternalError("Bad source size.");
}
template <typename T>
absl::Status MemcpyH2D(absl::Span<const T> host_src,
DeviceMemory<T> *gpu_dst) {
auto host_size = host_src.size() * sizeof(T);
if (gpu_dst->size() == 0 || gpu_dst->size() >= host_size) {
return Memcpy(gpu_dst, host_src.begin(), host_size);
}
return absl::InternalError("Bad destination size.");
}
virtual absl::Status Memcpy(DeviceMemoryBase *gpu_dst,
const DeviceMemoryBase &gpu_src, uint64_t size) {
return absl::UnimplementedError(
"Memcpy from device to device is not implemented for this "
"stream.");
}
absl::Status MemcpyD2D(DeviceMemoryBase *gpu_dst,
const DeviceMemoryBase &gpu_src, uint64_t size) {
return Memcpy(gpu_dst, gpu_src, size);
}
virtual absl::Status MemZero(DeviceMemoryBase *location, uint64_t size) {
return absl::UnimplementedError("MemZero is not supported on this stream.");
}
virtual absl::Status Memset32(DeviceMemoryBase *location, uint32_t pattern,
uint64_t size) {
return absl::UnimplementedError(
"Memset32 is not supported on this stream.");
}
virtual absl::Status BlockHostUntilDone() = 0;
virtual absl::Status DoHostCallback(
absl::AnyInvocable<void() &&> callback) = 0;
virtual absl::Status DoHostCallbackWithStatus(
absl::AnyInvocable<absl::Status() &&> callback) = 0;
virtual StreamExecutor *parent() const = 0;
virtual CudaComputeCapability GetCudaComputeCapability() const = 0;
virtual RocmComputeCapability GetRocmComputeCapability() const = 0;
virtual std::variant<StreamPriority, int> priority() const = 0;
virtual absl::Status Launch(const ThreadDim &thread_dims,
const BlockDim &block_dims, const Kernel &k,
const KernelArgs &args) = 0;
virtual absl::Status Launch(const ThreadDim &thread_dims,
const BlockDim &block_dims,
const ClusterDim &cluster_dims, const Kernel &k,
const KernelArgs &args) = 0;
};
template <typename... Params, typename... Args>
inline absl::Status Stream::ThenLaunch(ThreadDim thread_dims,
BlockDim block_dims,
const TypedKernel<Params...> &kernel,
Args... args) {
auto kernel_args = PackKernelArgs(kernel, args...);
return Launch(thread_dims, block_dims, *kernel, *kernel_args);
}
template <typename... Params, typename... Args>
inline absl::Status Stream::ThenLaunch(ThreadDim thread_dims,
BlockDim block_dims, int32_t shmem_bytes,
const TypedKernel<Params...> &kernel,
Args... args) {
auto kernel_args = PackKernelArgs(shmem_bytes, args...);
return Launch(thread_dims, block_dims, *kernel, *kernel_args);
}
}
#endif
#include "tensorflow/core/tfrt/runtime/stream.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/utility/utility.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/platform/random.h"
#include "tsl/platform/threadpool_interface.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::optional<StreamCallbackId>> CreateStreamCallbackId(
absl::string_view model_name, mlir::ModuleOp module) {
mlir::Builder builder(module.getContext());
std::vector<mlir::TF::PwStreamResultsOp> ops;
module->walk([&](mlir::TF::PwStreamResultsOp op) { ops.push_back(op); });
if (ops.empty()) {
return std::nullopt;
}
auto& stream_interface = GetGlobalStreamCallbackRegistry().stream_interface();
auto controller_address = stream_interface.controller_address();
auto controller_address_attr = builder.getStringAttr(controller_address);
auto model_name_attr = builder.getStringAttr(model_name);
const StreamCallbackId callback_id(
static_cast<int64_t>(tsl::random::New64()));
auto callback_id_attr = builder.getI64IntegerAttr(callback_id.id);
for (auto op : ops) {
op->setAttr("_controller_address", controller_address_attr);
op->setAttr("_model_name", model_name_attr);
op->setAttr("_callback_id", callback_id_attr);
}
return callback_id;
}
absl::Status StreamCallbackRegistry::CallbackState::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamedResult result) {
{
absl::MutexLock lock(&mu_);
if (closed_) {
return absl::InternalError(
"Failed to invole the callback that is closed.");
}
++num_outstanding_;
}
thread_pool->Schedule([this, result = std::move(result)]() mutable {
InvokeCallback(std::move(result));
absl::MutexLock lock(&mu_);
--num_outstanding_;
});
return absl::OkStatus();
}
void StreamCallbackRegistry::CallbackState::Close() {
{
absl::MutexLock lock(&mu_);
closed_ = true;
auto not_running = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) {
return num_outstanding_ == 0;
};
mu_.Await(absl::Condition(¬_running));
}
}
void StreamCallbackRegistry::CallbackState::InvokeCallback(
StreamedResult result) {
absl::Duration dequeue_latency = absl::Now() - result.enqueued_time;
interface().RecordDequeueLatency(model_name_, dequeue_latency);
tsl::profiler::TraceMe trace_me("StreamCallbackInvocation");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_.id},
{"step_id", step_id_.id},
});
});
absl::Time start_time = absl::Now();
callback_(std::move(result.tensors));
interface().RecordCallbackLatency(model_name_, absl::Now() - start_time);
}
absl::StatusOr<ScopedStreamCallback> StreamCallbackRegistry::Register(
absl::string_view model_name, StreamCallbackId callback_id, StepId step_id,
absl::AnyInvocable<
void(absl::flat_hash_map<std::string, tensorflow::Tensor>)>
callback) {
absl::MutexLock l(&mu_);
const auto [it, inserted] =
stream_callbacks_.insert({std::make_pair(callback_id, step_id), nullptr});
if (!inserted) {
return absl::AlreadyExistsError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id, " already exists"));
}
it->second = std::make_unique<CallbackState>(this, model_name, callback_id,
step_id, std::move(callback));
return ScopedStreamCallback(this, callback_id, step_id);
}
absl::Status StreamCallbackRegistry::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamCallbackId callback_id,
StepId step_id, StreamedResult result) {
absl::MutexLock lock(&mu_);
auto iter = stream_callbacks_.find({callback_id, step_id});
if (iter == stream_callbacks_.end()) {
return absl::NotFoundError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id,
" does not exist; this usually indicates that a streaming signature "
"was called by a non-streaming request"));
}
auto* state = iter->second.get();
DCHECK(state);
return state->Invoke(thread_pool, std::move(result));
}
std::unique_ptr<StreamCallbackRegistry::CallbackState>
StreamCallbackRegistry::Unregister(StreamCallbackId callback_id,
StepId step_id) {
absl::MutexLock l(&mu_);
const auto it = stream_callbacks_.find({callback_id, step_id});
if (it == stream_callbacks_.end()) {
return nullptr;
}
auto state = std::move(it->second);
stream_callbacks_.erase(it);
return state;
}
ScopedStreamCallback::ScopedStreamCallback(ScopedStreamCallback&& other)
: registry_(other.registry_),
callback_id_(other.callback_id_),
step_id_(other.step_id_) {
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
}
ScopedStreamCallback& ScopedStreamCallback::operator=(
ScopedStreamCallback&& other) {
Unregister();
registry_ = other.registry_;
callback_id_ = other.callback_id_;
step_id_ = other.step_id_;
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
return *this;
}
void ScopedStreamCallback::Unregister() {
if (!callback_id_.has_value()) {
return;
}
tsl::profiler::TraceMe trace_me("ScopedStreamCallback::Unregister");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_->id},
{"step_id", step_id_.id},
});
});
DCHECK(registry_);
auto state = registry_->Unregister(*callback_id_, step_id_);
DCHECK(state);
state->Close();
callback_id_.reset();
}
StreamInterfaceFactory& GetGlobalStreamInterfaceFactory() {
static auto* stream_interface_factory = new StreamInterfaceFactory;
return *stream_interface_factory;
}
StreamCallbackRegistry& GetGlobalStreamCallbackRegistry() {
static auto* stream_callback_registry =
new StreamCallbackRegistry(GetGlobalStreamInterfaceFactory()
.CreateControllerStreamInterface()
.value());
return *stream_callback_registry;
}
}
} | #include <memory>
#include "absl/log/check.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
class StreamTest : public ::testing::Test {
protected:
std::unique_ptr<StreamExecutor> NewStreamExecutor() {
Platform* platform = PlatformManager::PlatformWithName("Host").value();
StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
};
TEST_F(StreamTest, InitOk) {
std::unique_ptr<StreamExecutor> executor = NewStreamExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
}
TEST_F(StreamTest, InitWithIntPriorityOk) {
std::unique_ptr<StreamExecutor> executor = NewStreamExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream(1));
}
TEST_F(StreamTest, InitWithStreamPriorityOk) {
std::unique_ptr<StreamExecutor> executor = NewStreamExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream,
executor->CreateStream(StreamPriority::Highest));
}
TEST_F(StreamTest, OneSubStream) {
std::unique_ptr<StreamExecutor> executor = NewStreamExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream1, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream1->ok());
stream->ReturnSubStream(sub_stream1);
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream2, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream2->ok());
stream->ReturnSubStream(sub_stream1);
EXPECT_EQ(sub_stream1, sub_stream2);
}
TEST_F(StreamTest, TwoSubStreams) {
std::unique_ptr<StreamExecutor> executor = NewStreamExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream1, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream1->ok());
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream2, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream2->ok());
EXPECT_NE(sub_stream1, sub_stream2);
stream->ReturnSubStream(sub_stream1);
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream3, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream3->ok());
EXPECT_EQ(sub_stream1, sub_stream3);
EXPECT_NE(sub_stream2, sub_stream3);
stream->ReturnSubStream(sub_stream2);
TF_ASSERT_OK_AND_ASSIGN(Stream * sub_stream4, stream->GetOrCreateSubStream());
EXPECT_TRUE(sub_stream4->ok());
EXPECT_EQ(sub_stream2, sub_stream4);
EXPECT_NE(sub_stream3, sub_stream4);
}
}
} |
1,342 | cpp | tensorflow/tensorflow | gpu_runner | tensorflow/core/tfrt/gpu/kernel/gpu_runner.cc | tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc | #ifndef TENSORFLOW_CORE_TFRT_GPU_KERNEL_GPU_RUNNER_H_
#define TENSORFLOW_CORE_TFRT_GPU_KERNEL_GPU_RUNNER_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/gpu_variables_table.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/execution_context.h"
namespace tensorflow {
namespace gpu {
constexpr char kGpuRunnerResourceName[] = "GpuRunnerResource";
struct GpuRunInputs {
llvm::SmallVector<tfrt_stub::FallbackTensor>* args;
int num_outputs;
tfrt::ArrayRef<int64_t> resource_indices;
tfrt::ArrayRef<int64_t> used_output_indices;
std::string func_name;
Device* cpu_device;
absl::flat_hash_map<int, Device*>* gpu_devices;
const tfd::KernelFallbackCompatRequestState* fallback_request_state;
const tfrt::ExecutionContext* exec_ctx;
};
class GpuRunner {
public:
explicit GpuRunner(tsl::ServingDeviceSelector* serving_device_selector)
: serving_device_selector_(serving_device_selector) {}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
Run(const GpuRunInputs& run_inputs);
private:
tsl::ServingDeviceSelector* serving_device_selector_;
tfrt::gpu::GpuVariablesTable vars_table_;
};
}
}
#endif
#include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_launch_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/framework/device_id_manager.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/gpu_variables_table.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/kernel_registry.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace gpu {
namespace {
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorToDevice(
const tfrt::ExecutionContext& exec_ctx,
const tfrt_stub::FallbackTensor& tensor, Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
attr.set_use_pjrt_allocator(true);
tensorflow::Tensor dst(gpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
exec_ctx.host(),
[result = result.CopyRef(), gpu_device, pjrt_device_context, src,
dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyCPUTensorToDevice(
&src, gpu_device, &dst, [&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorFromDevice(
const tfrt::ExecutionContext& exec_ctx,
const tfrt_stub::FallbackTensor& tensor, Device* cpu_device,
Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
tensorflow::Tensor dst(cpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
exec_ctx.host(),
[result = result.CopyRef(), gpu_device, pjrt_device_context, src,
dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyDeviceTensorToCPU(
&src, "tensor_name", gpu_device, &dst,
[&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
PopulateResultsFromPjRtExecutableOutputs(
const XlaCompiler::CompilationResult& compilation_result,
std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs,
Device* device, int num_outputs) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
fallback_tensor_results;
for (int i = 0; i < num_outputs; ++i) {
const DataType& dtype = compilation_result.outputs[i].type;
CHECK(!compilation_result.outputs[i].is_constant);
CHECK(dtype != DT_RESOURCE);
xla::PjRtBuffer* output_buffer = executable_outputs[i].get();
if (output_buffer->IsTuple()) {
return absl::InvalidArgumentError(
"Tuple PJRT buffer output is not supported.");
}
absl::Span<const int64_t> dims;
std::optional<std::vector<int64_t>> logical_dims_storage;
if (output_buffer->has_dynamic_dimensions()) {
TF_ASSIGN_OR_RETURN(std::vector<int64_t> logical_dims,
output_buffer->logical_dimensions());
logical_dims_storage.emplace(std::move(logical_dims));
dims = *logical_dims_storage;
} else {
dims = output_buffer->dimensions();
}
TensorShape tensor_shape;
for (int i = 0; i < dims.size(); ++i) {
TF_RETURN_IF_ERROR(tensor_shape.AddDimWithStatus(dims[i]));
}
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
MakeTensorFromPjRtBuffer(dtype, tensor_shape,
std::move(executable_outputs[i])));
auto result = tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
output_tensor);
fallback_tensor_results.emplace_back(std::move(result));
}
return fallback_tensor_results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferOutputsToHostIfNeeded(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
tfrt::ArrayRef<int64_t> used_output_indices, Device* cpu_device,
Device* gpu_device, const tfrt::ExecutionContext& exec_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
for (int i = 0, j = 0; i < outputs.size(); ++i) {
if (j < used_output_indices.size() && i == used_output_indices[j]) {
CHECK(outputs[i].IsAvailable());
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> output_on_cpu =
TransferTensorFromDevice(exec_ctx, outputs[i].get(), cpu_device,
gpu_device);
results.push_back(std::move(output_on_cpu));
++j;
} else {
results.push_back(std::move(outputs[i]));
}
}
return results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferVariablesAndInputs(
int device_idx, const llvm::SmallVector<tfrt_stub::FallbackTensor>& args,
tfrt::ArrayRef<int64_t> resource_indices, Device* cpu_device,
absl::flat_hash_map<int, Device*> gpu_devices,
tfrt::gpu::GpuVariablesTable& vars_table,
const tfrt::ExecutionContext& exec_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
tsl::PlatformDeviceId platform_device_id;
DeviceType device_type(DEVICE_GPU);
TF_RETURN_IF_ERROR(tsl::DeviceIdManager::TfToPlatformDeviceId(
device_type, tsl::TfDeviceId(device_idx), &platform_device_id));
TF_ASSIGN_OR_RETURN(const std::vector<tsl::TfDeviceId> devices_on_platform,
tsl::DeviceIdManager::GetTfDevicesOnPlatform(
device_type, platform_device_id));
const int platform_idx = platform_device_id.value();
absl::flat_hash_set<int64_t> resource_indices_set(resource_indices.begin(),
resource_indices.end());
for (int i = 0, resource_idx = 0; i < args.size(); ++i) {
if (resource_indices_set.contains(i)) {
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor;
auto cached_device_variable =
vars_table.GetDeviceVariable(args[i], platform_idx);
if (cached_device_variable) {
VLOG(2) << "Cache hit for resource arg[" << i << "]";
device_tensor = cached_device_variable.CopyRef();
} else {
VLOG(2) << "Cache miss for resource arg[" << i << "]";
const int idx = resource_idx % devices_on_platform.size();
const int gpu_device_idx = devices_on_platform[idx].value();
device_tensor = TransferTensorToDevice(exec_ctx, args[i],
gpu_devices.at(gpu_device_idx));
vars_table.AddOrUpdateDeviceVariable(args[i], platform_idx,
std::move(device_tensor));
device_tensor =
vars_table.GetDeviceVariable(args[i], platform_idx).CopyRef();
}
results.push_back(device_tensor);
++resource_idx;
} else {
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor =
TransferTensorToDevice(exec_ctx, args[i], gpu_devices.at(device_idx));
results.push_back(device_tensor);
}
}
return results;
}
absl::StatusOr<uint64_t> GenerateFingerprint(
const std::string& function_name,
const tfd::KernelFallbackCompatRequestState* fallback_request_state) {
const FunctionLibraryDefinition* flib_def =
fallback_request_state->cpu_function_library_runtime()
->GetFunctionLibraryDefinition();
const FunctionDef* fdef = flib_def->Find(function_name);
if (!fdef) {
return absl::InternalError(
absl::StrCat("Failed to find the function ", function_name));
}
return tsl::Fingerprint64(
absl::StrCat(fallback_request_state->session_metadata().name(),
fallback_request_state->session_metadata().version(),
tsl::LegacyUnredactedDebugString(fdef->signature())));
}
std::vector<XlaCompiler::Argument> BuildXlaCompilerArguments(
const llvm::SmallVector<tfrt_stub::FallbackTensor>& inputs) {
std::vector<XlaCompiler::Argument> out;
out.resize(inputs.size());
for (int input_num = 0; input_num < inputs.size(); ++input_num) {
const tensorflow::Tensor& input = inputs[input_num].tensor();
CHECK_GT(input.NumElements(), 0);
CHECK(input.dtype() != DT_RESOURCE);
XlaCompiler::Argument& arg = out[input_num];
arg.kind = XlaCompiler::Argument::kParameter;
arg.type = input.dtype();
arg.shape = input.shape();
}
return out;
}
Status CompileProgram(const GpuRunInputs& run_inputs, int device_idx,
const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** pjrt_client,
xla::PjRtLoadedExecutable** pjrt_executable) {
std::vector<XlaCompiler::Argument> xla_compiler_args =
BuildXlaCompilerArguments(*run_inputs.args);
DeviceBase* device = run_inputs.gpu_devices->at(device_idx);
FunctionLibraryRuntime* flr =
run_inputs.fallback_request_state->process_function_library_runtime()
.GetFLR(run_inputs.gpu_devices->at(device_idx)->name());
XlaPlatformInfo platform_info =
XlaPlatformInfoFromDevice(run_inputs.gpu_devices->at(device_idx));
NameAttrList function;
function.set_name(run_inputs.func_name);
ResourceMgr* rm = tfrt_global::GetTFGlobalResourceMgr();
return CompileToPjRtLoadedExecutable(
device, platform_info, function, xla_compiler_args,
DeviceCompileMode::kStrict,
false,
false, flr, rm, compilation_result,
pjrt_client, pjrt_executable);
}
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
GpuRunner::Run(const GpuRunInputs& run_inputs) {
TF_ASSIGN_OR_RETURN(uint64_t fingerprint,
GenerateFingerprint(run_inputs.func_name,
run_inputs.fallback_request_state));
tsl::DeviceReservation device_reservation =
serving_device_selector_->ReserveDevice(absl::StrCat(fingerprint));
const int device_idx = device_reservation.device_index();
const XlaCompiler::CompilationResult* compilation_result;
xla::PjRtClient* pjrt_client;
xla::PjRtLoadedExecutable* pjrt_executable;
TF_RETURN_IF_ERROR(CompileProgram(run_inputs, device_idx, &compilation_result,
&pjrt_client, &pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
transferred_args,
TransferVariablesAndInputs(device_idx, *run_inputs.args,
run_inputs.resource_indices,
run_inputs.cpu_device, *run_inputs.gpu_devices,
vars_table_, *run_inputs.exec_ctx));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4>
transferred_args_to_wait;
for (const auto& arg : transferred_args) {
if (!arg.IsAvailable()) {
transferred_args_to_wait.push_back(arg.CopyRCRef());
}
}
run_inputs.exec_ctx->host()->Await(transferred_args_to_wait);
std::vector<const Tensor*> inputs;
for (const auto& arg : transferred_args) {
if (arg.IsError()) {
return absl::InternalError(
absl::StrCat("Data transfer failed: ", arg.GetError().message()));
}
inputs.push_back(&arg->tensor());
}
if (compilation_result->collective_info.has_value()) {
return absl::UnimplementedError(
"Execution with collectives is not supported yet.");
}
TF_ASSIGN_OR_RETURN(
xla::PjRtDevice * pjrt_device,
pjrt_client->LookupAddressableDevice(xla::PjRtLocalDeviceId(device_idx)));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> executable_outputs,
RunPjRtExecutable(0, inputs,
{}, {},
DeviceType(DEVICE_GPU),
true, *compilation_result,
pjrt_device, pjrt_client, pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results,
PopulateResultsFromPjRtExecutableOutputs(
*compilation_result, executable_outputs,
run_inputs.gpu_devices->at(device_idx), run_inputs.num_outputs));
return TransferOutputsToHostIfNeeded(
results, run_inputs.used_output_indices, run_inputs.cpu_device,
run_inputs.gpu_devices->at(device_idx), *run_inputs.exec_ctx);
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace gpu {
namespace {
constexpr int kNumVirtualGpuDevices = 1;
constexpr char kFunctionName[] = "foo";
StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
StatusOr<FunctionDef> SampleFunctionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
Status GetDevices(const tensorflow::tfd::KernelFallbackCompatRequestState*
fallback_request_state,
Device** cpu_device,
absl::flat_hash_map<int, Device*>& gpu_devices) {
*cpu_device = fallback_request_state->device_manager().HostCPU();
if (!*cpu_device) {
return absl::InternalError(
"Fallback request state must have a valid host cpu device.");
}
for (Device* device :
fallback_request_state->device_manager().ListDevices()) {
if (device->device_type() != DEVICE_GPU) continue;
if (!gpu_devices.try_emplace(device->parsed_name().id, device).second) {
return absl::InternalError(absl::StrCat(
"A device with the same device ID already exists when adding ",
device->name()));
}
}
if (gpu_devices.empty()) {
return absl::InternalError("No GPU device is found.");
}
for (const auto& [id, device] : gpu_devices) {
if (id >= gpu_devices.size()) {
return absl::InternalError("Device IDs are not consecutive.");
}
}
return OkStatus();
}
template <typename T>
Tensor CreateTensor(const TensorShape& input_shape,
gtl::ArraySlice<T> input_data,
Allocator* allocator = nullptr) {
Tensor tensor(DataTypeToEnum<T>::value, input_shape);
test::FillValues<T>(&tensor, input_data);
return tensor;
}
class GpuRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::SessionOptions session_options;
TF_ASSERT_OK_AND_ASSIGN(FunctionDef fdef,
SampleFunctionAddXY(kFunctionName));
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options, fdef_lib));
std::function<void(std::function<void()>)> runner =
[](const std::function<void()>& f) { f(); };
tfrt_stub::OpKernelRunnerTable runner_table;
tfd::FallbackResourceArray resource_array;
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner, &fallback_state_->device_manager(), 0,
&runner_table, &resource_array,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
auto host_allocator = tfrt::CreateMallocAllocator();
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
2, 2);
host_context_ = std::make_unique<tfrt::HostContext>(
[&](const tfrt::DecodedDiagnostic& diag) {}, std::move(host_allocator),
std::move(work_queue));
tfrt::RequestContextBuilder req_ctx_builder =
tfrt::RequestContextBuilder(host_context_.get(), nullptr);
tfrt::Expected<tfrt::RCReference<tfrt::RequestContext>> req_ctx(
std::move(req_ctx_builder).build());
ASSERT_TRUE(!!req_ctx);
exec_ctx_ = std::make_unique<tfrt::ExecutionContext>(std::move(*req_ctx));
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
serving_device_selector_ = std::make_unique<GpuServingDeviceSelector>(
kNumVirtualGpuDevices, std::move(policy));
gpu_runner_ = std::make_unique<GpuRunner>(serving_device_selector_.get());
}
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<tfrt::HostContext> host_context_;
std::unique_ptr<tfrt::ExecutionContext> exec_ctx_;
std::unique_ptr<GpuServingDeviceSelector> serving_device_selector_;
std::unique_ptr<GpuRunner> gpu_runner_;
};
TEST_F(GpuRunnerTest, Basic) {
GpuRunInputs run_inputs;
llvm::SmallVector<tfrt_stub::FallbackTensor> args;
Tensor tensor1 = CreateTensor<int32>(TensorShape({1, 2}), {1, 2});
Tensor tensor2 = CreateTensor<int32>(TensorShape({1, 2}), {3, 4});
args.push_back(tfrt_stub::FallbackTensor(tensor1));
args.push_back(tfrt_stub::FallbackTensor(tensor2));
run_inputs.args = &args;
run_inputs.num_outputs = 1;
run_inputs.resource_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.used_output_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.func_name = kFunctionName;
absl::flat_hash_map<int, Device*> gpu_devices;
ASSERT_OK(GetDevices(fallback_request_state_.get(), &run_inputs.cpu_device,
gpu_devices));
run_inputs.gpu_devices = &gpu_devices;
run_inputs.fallback_request_state = fallback_request_state_.get();
run_inputs.exec_ctx = exec_ctx_.get();
TF_ASSERT_OK_AND_ASSIGN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
gpu_runner_->Run(run_inputs));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4> outputs_to_wait;
for (const auto& output : outputs) {
if (!output.IsAvailable()) {
outputs_to_wait.push_back(output.CopyRCRef());
}
}
exec_ctx_->host()->Await(outputs_to_wait);
ASSERT_EQ(outputs.size(), 1);
auto expected = CreateTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(expected, outputs[0].get().tensor());
}
}
}
}
#endif |
1,343 | cpp | tensorflow/tensorflow | example_parser_configuration | tensorflow/core/example/example_parser_configuration.cc | tensorflow/core/example/example_parser_configuration_test.cc | #ifndef TENSORFLOW_CORE_EXAMPLE_EXAMPLE_PARSER_CONFIGURATION_H_
#define TENSORFLOW_CORE_EXAMPLE_EXAMPLE_PARSER_CONFIGURATION_H_
#include <string>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/example_parser_configuration.pb.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/example_proto_helper.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
Status ExtractExampleParserConfiguration(
const tensorflow::GraphDef& graph, const string& node_name,
tensorflow::Session* session,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features);
Status ExampleParserConfigurationProtoToFeatureVectors(
const ExampleParserConfiguration& config_proto,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features);
}
#endif
#include "tensorflow/core/example/example_parser_configuration.h"
#include <vector>
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
Status FindNodeIndexByName(const tensorflow::GraphDef& graph,
const string& node_name, int* node_idx) {
for (int i = 0; i < graph.node_size(); ++i) {
const auto& node = graph.node(i);
if (node.name() == node_name) {
*node_idx = i;
return absl::OkStatus();
}
}
return errors::InvalidArgument(node_name, " not found in GraphDef");
}
Status ExtractExampleParserConfiguration(
const tensorflow::GraphDef& graph, const string& node_name,
tensorflow::Session* session,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
int node_idx;
TF_RETURN_IF_ERROR(FindNodeIndexByName(graph, node_name, &node_idx));
const auto& node = graph.node(node_idx);
if (node.op() != "ParseExample") {
return errors::InvalidArgument(node_name, " node is not a ParseExample op");
}
auto& attr_map = node.attr();
auto num_sparse = attr_map.at("Nsparse").i();
auto num_dense = attr_map.at("Ndense").i();
fixed_len_features->resize(num_dense);
var_len_features->resize(num_sparse);
auto tdense = attr_map.at("Tdense");
auto dense_shapes = attr_map.at("dense_shapes");
auto sparse_types = attr_map.at("sparse_types");
if (tdense.list().type_size() != num_dense) {
return errors::InvalidArgument("Node attr Tdense has ",
tdense.list().type_size(),
" elements != Ndense attr: ", num_dense);
}
if (dense_shapes.list().shape_size() != num_dense) {
return errors::InvalidArgument("Node attr dense_shapes has ",
dense_shapes.list().shape_size(),
" elements != Ndense attr: ", num_dense);
}
if (sparse_types.list().type_size() != num_sparse) {
return errors::InvalidArgument("Node attr sparse_types has ",
sparse_types.list().type_size(),
" elements != NSparse attr: ", num_sparse);
}
for (int i = 0; i < tdense.list().type_size(); ++i) {
(*fixed_len_features)[i].dtype = tdense.list().type(i);
(*fixed_len_features)[i].shape = TensorShape(dense_shapes.list().shape(i));
}
for (int i = 0; i < sparse_types.list().type_size(); ++i) {
(*var_len_features)[i].dtype = sparse_types.list().type(i);
}
std::vector<string> fetch_names(node.input_size() - 1);
for (int i = 1; i < node.input_size(); ++i) {
fetch_names[i - 1] = node.input(i);
}
std::vector<Tensor> op_input_tensors;
TF_RETURN_IF_ERROR(session->Run({},
fetch_names, {},
&op_input_tensors));
int sparse_keys_start = 1;
int dense_keys_start = sparse_keys_start + num_sparse;
int dense_defaults_start = dense_keys_start + num_dense;
for (int i = 0; i < num_sparse; ++i) {
int input_idx = sparse_keys_start + i;
(*var_len_features)[i].key =
op_input_tensors[input_idx].scalar<tstring>()();
}
for (int i = 0; i < num_dense; ++i) {
FixedLenFeature& config = (*fixed_len_features)[i];
int dense_keys_offset = dense_keys_start + i;
config.key = op_input_tensors[dense_keys_offset].scalar<tstring>()();
int defaults_offset = dense_defaults_start + i;
config.default_value = op_input_tensors[defaults_offset];
}
int sparse_indices_output_start = 0;
int sparse_values_output_start = sparse_indices_output_start + num_sparse;
int sparse_shapes_output_start = sparse_values_output_start + num_sparse;
int dense_values_output_start = sparse_shapes_output_start + num_sparse;
string node_output_prefix = strings::StrCat(node_name, ":");
for (int i = 0; i < num_sparse; ++i) {
VarLenFeature& config = (*var_len_features)[i];
int indices_offset = sparse_indices_output_start + i;
config.indices_output_tensor_name =
strings::StrCat(node_output_prefix, indices_offset);
int values_offset = sparse_values_output_start + i;
config.values_output_tensor_name =
strings::StrCat(node_output_prefix, values_offset);
int shapes_offset = sparse_shapes_output_start + i;
config.shapes_output_tensor_name =
strings::StrCat(node_output_prefix, shapes_offset);
}
for (int i = 0; i < num_dense; ++i) {
int output_idx = dense_values_output_start + i;
(*fixed_len_features)[i].values_output_tensor_name =
strings::StrCat(node_output_prefix, output_idx);
}
return absl::OkStatus();
}
Status ExampleParserConfigurationProtoToFeatureVectors(
const ExampleParserConfiguration& config_proto,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
const auto& feature_map = config_proto.feature_map();
for (auto it = feature_map.cbegin(); it != feature_map.cend(); ++it) {
string key = it->first;
const auto& config = it->second;
if (config.has_fixed_len_feature()) {
const auto& fixed_config = config.fixed_len_feature();
FixedLenFeature f;
f.key = key;
f.dtype = fixed_config.dtype();
f.shape = TensorShape(fixed_config.shape());
Tensor default_value(f.dtype, f.shape);
if (!default_value.FromProto(fixed_config.default_value())) {
return errors::InvalidArgument(
"Invalid default_value in config proto ",
fixed_config.default_value().DebugString());
}
f.default_value = default_value;
f.values_output_tensor_name = fixed_config.values_output_tensor_name();
fixed_len_features->push_back(f);
} else {
const auto& var_len_config = config.var_len_feature();
VarLenFeature v;
v.key = key;
v.dtype = var_len_config.dtype();
v.values_output_tensor_name = var_len_config.values_output_tensor_name();
v.indices_output_tensor_name =
var_len_config.indices_output_tensor_name();
v.shapes_output_tensor_name = var_len_config.shapes_output_tensor_name();
var_len_features->push_back(v);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/example/example_parser_configuration.h"
#include <memory>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_helper.h"
namespace tensorflow {
namespace {
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
std::unique_ptr<Session> CreateSession() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
return std::unique_ptr<Session>(NewSession(options));
}
class ExtractExampleParserConfigurationTest : public ::testing::Test {
protected:
void SetUp() override {
string proto_string;
string filename =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/example/testdata/parse_example_graph_def.pbtxt");
ReadFileToStringOrDie(Env::Default(), filename, &proto_string);
protobuf::TextFormat::ParseFromString(proto_string, &graph_def_);
session_ = CreateSession();
TF_CHECK_OK(session_->Create(graph_def_));
}
NodeDef* parse_example_node() {
for (auto& node : *graph_def_.mutable_node()) {
if (node.name() == "ParseExample/ParseExample") {
return &node;
}
}
return nullptr;
}
GraphDef graph_def_;
std::unique_ptr<Session> session_;
};
TEST_F(ExtractExampleParserConfigurationTest, OpNotFound) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "BlarseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNsparse) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Nsparse"].set_i(3);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNdense) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Ndense"].set_i(2);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, Basic) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(2, sparse_vec.size());
EXPECT_EQ(3, dense_vec.size());
EXPECT_EQ("sf0", sparse_vec[0].key);
EXPECT_EQ(DT_STRING, sparse_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:0",
sparse_vec[0].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:2",
sparse_vec[0].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:4",
sparse_vec[0].shapes_output_tensor_name);
EXPECT_EQ("sf1", sparse_vec[1].key);
EXPECT_EQ(DT_STRING, sparse_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:1",
sparse_vec[1].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:3",
sparse_vec[1].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:5",
sparse_vec[1].shapes_output_tensor_name);
EXPECT_EQ("x", dense_vec[0].key);
EXPECT_EQ(DT_FLOAT, dense_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:6",
dense_vec[0].values_output_tensor_name);
EXPECT_EQ("y", dense_vec[1].key);
EXPECT_EQ(DT_FLOAT, dense_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:7",
dense_vec[1].values_output_tensor_name);
EXPECT_EQ("z", dense_vec[2].key);
EXPECT_EQ(DT_FLOAT, dense_vec[2].dtype);
EXPECT_EQ("ParseExample/ParseExample:8",
dense_vec[2].values_output_tensor_name);
}
static const char kExampleParseConfigurationProto[] = R"( feature_map {
key: "x"
value {
fixed_len_feature {
dtype: DT_FLOAT
shape {
dim {
size: 1
}
}
default_value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 33.0
}
values_output_tensor_name: "ParseExample/ParseExample:3"
}
}
}
feature_map {
key: "y"
value {
var_len_feature {
dtype: DT_STRING
values_output_tensor_name: "ParseExample/ParseExample:1"
indices_output_tensor_name: "ParseExample/ParseExample:0"
shapes_output_tensor_name: "ParseExample/ParseExample:2"
}
}
}
)";
class ExampleParserConfigurationProtoToFeatureVectorsTest
: public ::testing::Test {
protected:
void SetUp() override {
CHECK(protobuf::TextFormat::ParseFromString(kExampleParseConfigurationProto,
&config_proto_));
}
ExampleParserConfiguration config_proto_;
};
TEST_F(ExampleParserConfigurationProtoToFeatureVectorsTest, Basic) {
std::vector<FixedLenFeature> fixed_len_features;
std::vector<VarLenFeature> var_len_features;
TF_ASSERT_OK(ExampleParserConfigurationProtoToFeatureVectors(
config_proto_, &fixed_len_features, &var_len_features));
ASSERT_EQ(1, fixed_len_features.size());
ASSERT_EQ(1, var_len_features.size());
const FixedLenFeature& f = fixed_len_features[0];
ASSERT_EQ(DT_FLOAT, f.dtype);
ASSERT_EQ("x", f.key);
ASSERT_EQ("ParseExample/ParseExample:3", f.values_output_tensor_name);
TensorShape expected_shape({1});
ASSERT_EQ(expected_shape.dims(), f.shape.dims());
ASSERT_EQ(1, f.shape.dim_size(0));
Tensor expected_default(DT_FLOAT, TensorShape({1}));
test::FillIota<float>(&expected_default, 33.0);
test::ExpectTensorEqual<float>(expected_default, f.default_value);
const VarLenFeature& v = var_len_features[0];
ASSERT_EQ(DT_STRING, v.dtype);
ASSERT_EQ("ParseExample/ParseExample:0", v.indices_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:1", v.values_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:2", v.shapes_output_tensor_name);
}
}
} |
1,344 | cpp | tensorflow/tensorflow | feature_util | tensorflow/core/example/feature_util.cc | tensorflow/core/example/feature_util_test.cc | #ifndef TENSORFLOW_CORE_EXAMPLE_FEATURE_UTIL_H_
#define TENSORFLOW_CORE_EXAMPLE_FEATURE_UTIL_H_
#include <algorithm>
#include <iterator>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/strings/string_view.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/stringpiece.h"
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
namespace tensorflow {
namespace internal {
ABSL_DEPRECATED("Use GetFeature instead.")
Feature& ExampleFeature(absl::string_view name, Example* example);
template <typename FeatureType>
struct RepeatedFieldTrait;
template <>
struct RepeatedFieldTrait<protobuf_int64> {
using Type = protobuf::RepeatedField<protobuf_int64>;
};
template <>
struct RepeatedFieldTrait<float> {
using Type = protobuf::RepeatedField<float>;
};
template <>
struct RepeatedFieldTrait<tstring> {
using Type = protobuf::RepeatedPtrField<std::string>;
};
template <>
struct RepeatedFieldTrait<std::string> {
using Type = protobuf::RepeatedPtrField<std::string>;
};
template <typename ValueType, class Enable = void>
struct FeatureTrait;
template <typename ValueType>
struct FeatureTrait<ValueType, typename std::enable_if<
std::is_integral<ValueType>::value>::type> {
using Type = protobuf_int64;
};
template <typename ValueType>
struct FeatureTrait<
ValueType,
typename std::enable_if<std::is_floating_point<ValueType>::value>::type> {
using Type = float;
};
template <typename T>
struct is_string
: public std::integral_constant<
bool,
std::is_same<char*, typename std::decay<T>::type>::value ||
std::is_same<const char*, typename std::decay<T>::type>::value> {
};
template <>
struct is_string<std::string> : std::true_type {};
template <>
struct is_string<::tensorflow::StringPiece> : std::true_type {};
template <>
struct is_string<tstring> : std::true_type {};
template <typename ValueType>
struct FeatureTrait<
ValueType, typename std::enable_if<is_string<ValueType>::value>::type> {
using Type = std::string;
};
template <typename... T, typename F>
constexpr bool Requires(F) {
return std::is_invocable<F, T...>::value;
}
struct NoneSuch {};
inline constexpr bool kFeatureMapHasHeterogeneousLookup =
Requires<decltype(Features::default_instance().feature())>(
[](auto&& c) -> decltype(c.find(NoneSuch{})) {});
inline auto ProtoMapKey(absl::string_view str) {
if constexpr (kFeatureMapHasHeterogeneousLookup) {
#ifdef ABSL_USES_STD_STRING_VIEW
return str;
#else
#ifdef ABSL_HAVE_STD_STRING_VIEW
return std::string_view(str.data(), str.size());
#else
return std::string(str);
#endif
#endif
} else {
return std::string(str);
}
}
}
bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example);
template <typename T>
struct TypeHasFeatures : std::false_type {};
template <>
struct TypeHasFeatures<SequenceExample> : std::true_type {};
template <>
struct TypeHasFeatures<Example> : std::true_type {};
template <>
struct TypeHasFeatures<Features> : std::true_type {};
template <typename ProtoType>
typename std::enable_if<TypeHasFeatures<ProtoType>::value, Features*>::type
GetFeatures(ProtoType* proto);
template <>
Features* GetFeatures<Features>(Features* proto);
template <>
Features* GetFeatures<Example>(Example* proto);
template <>
Features* GetFeatures<SequenceExample>(SequenceExample* proto);
template <typename ProtoType>
typename std::enable_if<TypeHasFeatures<ProtoType>::value,
const Features&>::type
GetFeatures(const ProtoType& proto);
template <>
const Features& GetFeatures<Features>(const Features& proto);
template <>
const Features& GetFeatures<Example>(const Example& proto);
template <>
const Features& GetFeatures<SequenceExample>(const SequenceExample& proto);
template <typename FeatureType>
const typename internal::RepeatedFieldTrait<FeatureType>::Type&
GetFeatureValues(const Feature& feature);
template <>
const protobuf::RepeatedField<protobuf_int64>& GetFeatureValues<protobuf_int64>(
const Feature& feature);
template <>
const protobuf::RepeatedField<float>& GetFeatureValues<float>(
const Feature& feature);
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<tstring>(
const Feature& feature);
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<std::string>(
const Feature& feature);
template <typename FeatureType, typename ProtoType>
const typename internal::RepeatedFieldTrait<FeatureType>::Type&
GetFeatureValues(absl::string_view key, const ProtoType& proto) {
return GetFeatureValues<FeatureType>(
GetFeatures(proto).feature().at(internal::ProtoMapKey(key)));
}
template <typename FeatureType>
typename internal::RepeatedFieldTrait<FeatureType>::Type* GetFeatureValues(
Feature* feature);
template <>
protobuf::RepeatedField<protobuf_int64>* GetFeatureValues<protobuf_int64>(
Feature* feature);
template <>
protobuf::RepeatedField<float>* GetFeatureValues<float>(Feature* feature);
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<tstring>(
Feature* feature);
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<std::string>(
Feature* feature);
template <typename FeatureType, typename ProtoType>
typename internal::RepeatedFieldTrait<FeatureType>::Type* GetFeatureValues(
absl::string_view key, ProtoType* proto) {
::tensorflow::Feature& feature =
(*GetFeatures(proto)->mutable_feature())[internal::ProtoMapKey(key)];
return GetFeatureValues<FeatureType>(&feature);
}
template <typename ProtoType>
const Feature& GetFeature(absl::string_view key, const ProtoType& proto) {
return GetFeatures(proto).feature().at(internal::ProtoMapKey(key));
}
template <typename ProtoType>
const Feature* MaybeGetFeature(absl::string_view key, const ProtoType& proto) {
const protobuf::Map<std::string, Feature>& feature_map =
GetFeatures(proto).feature();
auto it = feature_map.find(internal::ProtoMapKey(key));
if (it == feature_map.end()) {
return nullptr;
}
return &it->second;
}
template <typename FeatureType>
const typename internal::RepeatedFieldTrait<FeatureType>::Type*
MaybeGetFeatureValues(const Feature& feature);
template <>
const protobuf::RepeatedField<protobuf_int64>*
MaybeGetFeatureValues<protobuf_int64>(const Feature& feature);
template <>
const protobuf::RepeatedField<float>* MaybeGetFeatureValues<float>(
const Feature& feature);
template <>
const protobuf::RepeatedPtrField<std::string>* MaybeGetFeatureValues<tstring>(
const Feature& feature);
template <>
const protobuf::RepeatedPtrField<std::string>*
MaybeGetFeatureValues<std::string>(const Feature& feature);
template <typename FeatureType, typename ProtoType>
const typename internal::RepeatedFieldTrait<FeatureType>::Type*
MaybeGetFeatureValues(absl::string_view key, const ProtoType& proto) {
const Feature* feature = MaybeGetFeature(key, proto);
if (feature == nullptr) {
return nullptr;
}
return &GetFeatureValues<FeatureType>(*feature);
}
template <typename ProtoType>
Feature* GetFeature(absl::string_view key, ProtoType* proto) {
return &(*GetFeatures(proto)->mutable_feature())[internal::ProtoMapKey(key)];
}
const protobuf::RepeatedPtrField<Feature>& GetFeatureList(
absl::string_view key, const SequenceExample& sequence_example);
protobuf::RepeatedPtrField<Feature>* GetFeatureList(
absl::string_view feature_list_key, SequenceExample* sequence_example);
template <typename IteratorType>
void AppendFeatureValues(IteratorType first, IteratorType last,
Feature* feature) {
using FeatureType = typename internal::FeatureTrait<
typename std::iterator_traits<IteratorType>::value_type>::Type;
auto& values = *GetFeatureValues<FeatureType>(feature);
values.Reserve(std::distance(first, last));
for (auto it = first; it != last; ++it) {
*values.Add() = *it;
}
}
template <typename ValueType>
void AppendFeatureValues(std::initializer_list<ValueType> container,
Feature* feature) {
using FeatureType = typename internal::FeatureTrait<ValueType>::Type;
auto& values = *GetFeatureValues<FeatureType>(feature);
values.Reserve(container.size());
for (auto& elt : container) {
*values.Add() = std::move(elt);
}
}
namespace internal {
template <typename T, typename = void>
struct HasSize : std::false_type {};
template <typename T>
struct HasSize<T, absl::void_t<decltype(std::declval<T>().size())>>
: std::true_type {};
template <typename ContainerType, typename RepeatedFieldType>
auto ReserveIfSizeAvailable(const ContainerType& container,
RepeatedFieldType& values) ->
typename std::enable_if_t<HasSize<ContainerType>::value, void> {
values.Reserve(container.size());
}
template <typename ContainerType, typename RepeatedFieldType>
auto ReserveIfSizeAvailable(const ContainerType& container,
RepeatedFieldType& values) ->
typename std::enable_if_t<!HasSize<ContainerType>::value, void> {}
}
template <typename ContainerType>
void AppendFeatureValues(const ContainerType& container, Feature* feature) {
using IteratorType = typename ContainerType::const_iterator;
using FeatureType = typename internal::FeatureTrait<
typename std::iterator_traits<IteratorType>::value_type>::Type;
auto* values = GetFeatureValues<FeatureType>(feature);
internal::ReserveIfSizeAvailable(container, *values);
for (const auto& elt : container) {
if constexpr (internal::is_string<FeatureType>::value) {
*values->Add() = std::string(elt);
} else {
*values->Add() = elt;
}
}
}
template <typename IteratorType, typename ProtoType>
void AppendFeatureValues(IteratorType first, IteratorType last,
absl::string_view key, ProtoType* proto) {
AppendFeatureValues(first, last, GetFeature(key, GetFeatures(proto)));
}
template <typename ContainerType, typename ProtoType>
void AppendFeatureValues(const ContainerType& container, absl::string_view key,
ProtoType* proto) {
AppendFeatureValues<ContainerType>(container,
GetFeature(key, GetFeatures(proto)));
}
template <typename ValueType, typename ProtoType>
void AppendFeatureValues(std::initializer_list<ValueType> container,
absl::string_view key, ProtoType* proto) {
AppendFeatureValues<ValueType>(container,
GetFeature(key, GetFeatures(proto)));
}
template <typename... FeatureType>
void ClearFeatureValues(Feature* feature);
template <>
void ClearFeatureValues<protobuf_int64>(Feature* feature);
template <>
void ClearFeatureValues<float>(Feature* feature);
template <>
void ClearFeatureValues<std::string>(Feature* feature);
template <>
void ClearFeatureValues<tstring>(Feature* feature);
template <typename IteratorType>
void SetFeatureValues(IteratorType first, IteratorType last, Feature* feature) {
using FeatureType = typename internal::FeatureTrait<
typename std::iterator_traits<IteratorType>::value_type>::Type;
ClearFeatureValues<FeatureType>(feature);
AppendFeatureValues(first, last, feature);
}
template <typename ValueType>
void SetFeatureValues(std::initializer_list<ValueType> container,
Feature* feature) {
using FeatureType = typename internal::FeatureTrait<ValueType>::Type;
ClearFeatureValues<FeatureType>(feature);
AppendFeatureValues(container, feature);
}
template <typename ContainerType>
void SetFeatureValues(const ContainerType& container, Feature* feature) {
using IteratorType = typename ContainerType::const_iterator;
using FeatureType = typename internal::FeatureTrait<
typename std::iterator_traits<IteratorType>::value_type>::Type;
ClearFeatureValues<FeatureType>(feature);
AppendFeatureValues(container, feature);
}
template <typename IteratorType, typename ProtoType>
void SetFeatureValues(IteratorType first, IteratorType last,
absl::string_view key, ProtoType* proto) {
SetFeatureValues(first, last, GetFeature(key, GetFeatures(proto)));
}
template <typename ContainerType, typename ProtoType>
void SetFeatureValues(const ContainerType& container, absl::string_view key,
ProtoType* proto) {
SetFeatureValues<ContainerType>(container,
GetFeature(key, GetFeatures(proto)));
}
template <typename ValueType, typename ProtoType>
void SetFeatureValues(std::initializer_list<ValueType> container,
absl::string_view key, ProtoType* proto) {
SetFeatureValues<ValueType>(container, GetFeature(key, GetFeatures(proto)));
}
template <typename... FeatureType>
bool HasFeature(absl::string_view key, const Features& features);
template <>
bool HasFeature<>(absl::string_view key, const Features& features);
template <>
bool HasFeature<protobuf_int64>(absl::string_view key,
const Features& features);
template <>
bool HasFeature<float>(absl::string_view key, const Features& features);
template <>
bool HasFeature<std::string>(absl::string_view key, const Features& features);
template <>
bool HasFeature<tstring>(absl::string_view key, const Features& features);
template <typename... FeatureType>
bool HasFeature(absl::string_view key, const Example& example) {
return HasFeature<FeatureType...>(key, GetFeatures(example));
}
template <typename... FeatureType>
bool HasFeature(absl::string_view key,
const SequenceExample& sequence_example) {
return HasFeature<FeatureType...>(key, GetFeatures(sequence_example));
}
template <typename... FeatureType>
ABSL_DEPRECATED("Use HasFeature instead.")
bool ExampleHasFeature(absl::string_view key, const Example& example) {
return HasFeature<FeatureType...>(key, example);
}
}
#endif
#include "tensorflow/core/example/feature_util.h"
#include <string>
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace internal {
Feature& ExampleFeature(absl::string_view name, Example* example) {
return *GetFeature(name, example);
}
}
template <>
bool HasFeature<>(absl::string_view key, const Features& features) {
return features.feature().contains(internal::ProtoMapKey(key));
}
template <>
bool HasFeature<protobuf_int64>(absl::string_view key,
const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kInt64List);
}
template <>
bool HasFeature<float>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kFloatList);
}
template <>
bool HasFeature<std::string>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
template <>
bool HasFeature<tstring>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example) {
return sequence_example.feature_lists().feature_list().contains(
internal::ProtoMapKey(key));
}
template <>
const protobuf::RepeatedField<protobuf_int64>& GetFeatureValues<protobuf_int64>(
const Feature& feature) {
return feature.int64_list().value();
}
template <>
protobuf::RepeatedField<protobuf_int64>* GetFeatureValues<protobuf_int64>(
Feature* feature) {
return feature->mutable_int64_list()->mutable_value();
}
template <>
const protobuf::RepeatedField<float>& GetFeatureValues<float>(
const Feature& feature) {
return feature.float_list().value();
}
template <>
protobuf::RepeatedField<float>* GetFeatureValues<float>(Feature* feature) {
return feature->mutable_float_list()->mutable_value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<tstring>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<std::string>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<tstring>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<std::string>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
const protobuf::RepeatedPtrField<Feature>& GetFeatureList(
absl::string_view key, const SequenceExample& sequence_example) {
return sequence_example.feature_lists()
.feature_list()
.at(internal::ProtoMapKey(key))
.feature();
}
protobuf::RepeatedPtrField<Feature>* GetFeatureList(
absl::string_view feature_list_key, SequenceExample* sequence_example) {
return (*sequence_example->mutable_feature_lists()
->mutable_feature_list())[internal::ProtoMapKey(
feature_list_key)]
.mutable_feature();
}
template <>
void ClearFeatureValues<protobuf_int64>(Feature* feature) {
feature->mutable_int64_list()->Clear();
}
template <>
void ClearFeatureValues<float>(Feature* feature) {
feature->mutable_float_list()->Clear();
}
template <>
void ClearFeatureValues<std::string>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
void ClearFeatureValues<tstring>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
Features* GetFeatures<Features>(Features* proto) {
return proto;
}
template <>
Features* GetFeatures<Example>(Example* proto) {
return proto->mutable_features();
}
template <>
Features* GetFeatures<SequenceExample>(SequenceExample* proto) {
return proto->mutable_context();
}
template <>
const Features& GetFeatures<Features>(const Features& proto) {
return proto;
}
template <>
const Features& GetFeatures<Example>(const Example& proto) {
return proto.features();
}
template <>
const Features& GetFeatures<SequenceExample>(const SequenceExample& proto) {
return proto.context();
}
} | #include "tensorflow/core/example/feature_util.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
const float kTolerance = 1e-5;
TEST(GetFeatureValuesInt64Test, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_int64_list()->add_value(42);
auto values = GetFeatureValues<protobuf_int64>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromSequenceExampleContext) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValue) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1,
example.features().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.features().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<protobuf_int64>(&feature)->Add(42);
ASSERT_EQ(1, feature.int64_list().value_size());
EXPECT_EQ(42, feature.int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1, example.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.context().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistence) {
Example example;
ASSERT_FALSE(HasFeature("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(0);
EXPECT_TRUE(HasFeature("tag", example));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistenceForSequenceExample) {
SequenceExample seq_example;
ASSERT_FALSE(HasFeature("tag", seq_example));
GetFeatureValues<protobuf_int64>("tag", &seq_example)->Add(0);
EXPECT_TRUE(HasFeature("tag", seq_example));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistenceForSequenceExample) {
SequenceExample sequence_example;
GetFeatureValues<float>("tag", &sequence_example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", sequence_example));
GetFeatureValues<protobuf_int64>("tag", &sequence_example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", sequence_example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", sequence_example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CopyIterableToAField) {
Example example;
std::vector<int> values{1, 2, 3};
std::copy(values.begin(), values.end(),
protobuf::RepeatedFieldBackInserter(
GetFeatureValues<protobuf_int64>("tag", &example)));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_float_list()->add_value(3.14);
auto values = GetFeatureValues<float>(feature);
ASSERT_EQ(1, values.size());
EXPECT_NEAR(3.14, values.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<float>(&feature)->Add(3.14);
ASSERT_EQ(1, feature.float_list().value_size());
EXPECT_NEAR(3.14, feature.float_list().value(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValue) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1,
example.features().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14,
example.features().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1, example.context().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14, example.context().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(HasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistenceForDeprecatedMethod) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(ExampleHasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(ExampleHasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_bytes_list()->add_value("FOO");
auto values = GetFeatureValues<std::string>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ("FOO", values.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueToFeature) {
Feature feature;
*GetFeatureValues<std::string>(&feature)->Add() = "FOO";
ASSERT_EQ(1, feature.bytes_list().value_size());
EXPECT_EQ("FOO", feature.bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValue) {
Example example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1,
example.features().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO",
example.features().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueSequenceExample) {
SequenceExample example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1, example.context().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO", example.context().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<std::string>("tag", example));
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
EXPECT_TRUE(HasFeature<std::string>("tag", example));
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainer) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainerWithStringViewKey) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
absl::string_view key("tag");
AppendFeatureValues(values, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest,
FloatValuesUsingInitializerListWithStringViewKey) {
Example example;
absl::string_view key("tag");
AppendFeatureValues({1.1, 2.2, 3.3}, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIterators) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIteratorsWithStringViewKey) {
Example example;
absl::string_view key("tag");
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
SetFeatureValues({10.1, 20.2, 30.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(10.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(20.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(30.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, ContainerOfStringView) {
Example example;
std::vector<std::string> values = {"hello", "world"};
std::vector<absl::string_view> values_string_view(values.begin(),
values.end());
SetFeatureValues(values_string_view, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(tag_ro.size(), 2);
EXPECT_EQ(tag_ro.Get(0), "hello");
EXPECT_EQ(tag_ro.Get(1), "world");
}
TEST(AppendFeatureValuesTest, Int64ValuesUsingInitializerList) {
Example example;
std::vector<protobuf_int64> values{1, 2, 3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringValuesUsingInitializerList) {
Example example;
AppendFeatureValues({"FOO", "BAR", "BAZ"}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringVariablesUsingInitializerList) {
Example example;
string string1("FOO");
string string2("BAR");
string string3("BAZ");
AppendFeatureValues({string1, string2, string3}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingInitializerList) {
Example example;
AppendFeatureValues({absl::string_view("FOO"), absl::string_view("BAR"),
absl::string_view("BAZ")},
"tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingIterators) {
Example example;
std::vector<absl::string_view> strings;
strings.push_back("FOO");
strings.push_back("BAR");
strings.push_back("BAZ");
AppendFeatureValues(strings.begin(), strings.end(), "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(GetFeatureTest, WritesAVectorToFeature) {
Example example;
Feature* feature = GetFeature("tag", &example);
AppendFeatureValues<float>({1.1, 2.2, 3.3}, feature);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(GetFeatureTest, ReadsAVectorFromFeature) {
Example example;
AppendFeatureValues<float>({1.1, 2.2, 3.3}, "tag", &example);
const Feature& feature = GetFeature("tag", example);
auto tag_ro = GetFeatureValues<float>(feature);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, ReadsASingleValueFromContext) {
SequenceExample se;
(*se.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto values = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(SequenceExampleTest, WritesASingleValueToContext) {
SequenceExample se;
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
ASSERT_EQ(1, se.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, se.context().feature().at("tag").int64_list().value(0));
}
TEST(SequenceExampleTest, AppendFeatureValuesToContextSingleArg) {
SequenceExample se;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", se.mutable_context());
auto tag_ro = GetFeatureValues<float>("tag", se.context());
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, CheckTypedFieldExistence) {
SequenceExample se;
GetFeatureValues<float>("tag", se.mutable_context())->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", se.context()));
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", se.context()));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(SequenceExampleTest, ReturnsExistingFeatureLists) {
SequenceExample se;
(*se.mutable_feature_lists()->mutable_feature_list())["tag"]
.mutable_feature()
->Add();
auto feature = GetFeatureList("tag", se);
ASSERT_EQ(1, feature.size());
}
TEST(SequenceExampleTest, CreatesNewFeatureLists) {
SequenceExample se;
GetFeatureList("tag", &se)->Add();
EXPECT_EQ(1, se.feature_lists().feature_list().at("tag").feature_size());
}
TEST(SequenceExampleTest, CheckFeatureListExistence) {
SequenceExample se;
ASSERT_FALSE(HasFeatureList("tag", se));
GetFeatureList("tag", &se)->Add();
ASSERT_TRUE(HasFeatureList("tag", se));
}
TEST(SequenceExampleTest, AppendFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-1", "cam2-2"},
GetFeatureList("images", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-2\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, AppendFeatureValuesWithVectors) {
SequenceExample se;
std::vector<float> readings{1.0, 2.5, 5.0};
AppendFeatureValues(readings, GetFeatureList("movie_ratings", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"feature_lists {\n"
" feature_list {\n"
" key: \"movie_ratings\"\n"
" value {\n"
" feature {\n"
" float_list {\n"
" value: 1\n"
" value: 2.5\n"
" value: 5\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetContextFeatureValuesWithInitializerList) {
SequenceExample se;
SetFeatureValues({101, 102, 103}, "ids", se.mutable_context());
SetFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({4, 5, 6}, "ids", se.mutable_context());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
SetFeatureValues({4, 5, 6}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"}, GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("more-images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"},
GetFeatureList("more-images", &se)->Mutable(0));
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
" feature_list {\n"
" key: \"more-images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(MaybeGetFeatureValuesTest, ReturnsNullPtr) {
const Example example;
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(tag, nullptr);
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleInt) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ(42, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleFloat) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(0.3);
auto tag = MaybeGetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_FLOAT_EQ(0.3, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleString) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("entry");
auto tag = MaybeGetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ("entry", tag->Get(0));
}
}
} |
1,345 | cpp | tensorflow/tensorflow | platform_strings | tensorflow/core/platform/platform_strings.cc | tensorflow/core/platform/platform_strings_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_PLATFORM_STRINGS_H_
#define TENSORFLOW_CORE_PLATFORM_PLATFORM_STRINGS_H_
#include <stdio.h>
#include <string>
#include <vector>
#define TF_PLAT_STR_VERSION_ "1.0"
#define TF_PLAT_STR_MAGIC_PREFIX_ "\0S\\s\":^p*L}"
#define TF_PLAT_STR_STR_1_(x) #x
#define TF_PLAT_STR_AS_STR_(x) TF_PLAT_STR_STR_1_(x)
#define TF_PLAT_STR_TERMINATOR_
#define TF_PLAT_STR_(x) TF_PLAT_STR_MAGIC_PREFIX_ #x "=" TF_PLAT_STR_AS_STR_(x)
#include "tensorflow/core/platform/platform_strings_computed.h"
#define TF_PLAT_STR_LIST___x86_64__() \
TF_PLAT_STR__M_IX86_FP \
TF_PLAT_STR__NO_PREFETCHW \
TF_PLAT_STR___3dNOW_A__ \
TF_PLAT_STR___3dNOW__ \
TF_PLAT_STR___ABM__ \
TF_PLAT_STR___ADX__ \
TF_PLAT_STR___AES__ \
TF_PLAT_STR___AVX2__ \
TF_PLAT_STR___AVX512BW__ \
TF_PLAT_STR___AVX512CD__ \
TF_PLAT_STR___AVX512DQ__ \
TF_PLAT_STR___AVX512ER__ \
TF_PLAT_STR___AVX512F__ \
TF_PLAT_STR___AVX512IFMA__ \
TF_PLAT_STR___AVX512PF__ \
TF_PLAT_STR___AVX512VBMI__ \
TF_PLAT_STR___AVX512VL__ \
TF_PLAT_STR___AVX__ \
TF_PLAT_STR___BMI2__ \
TF_PLAT_STR___BMI__ \
TF_PLAT_STR___CLFLUSHOPT__ \
TF_PLAT_STR___CLZERO__ \
TF_PLAT_STR___F16C__ \
TF_PLAT_STR___FMA4__ \
TF_PLAT_STR___FMA__ \
TF_PLAT_STR___FP_FAST_FMA \
TF_PLAT_STR___FP_FAST_FMAF \
TF_PLAT_STR___FSGSBASE__ \
TF_PLAT_STR___FXSR__ \
TF_PLAT_STR___LWP__ \
TF_PLAT_STR___LZCNT__ \
TF_PLAT_STR___MMX__ \
TF_PLAT_STR___MWAITX__ \
TF_PLAT_STR___PCLMUL__ \
TF_PLAT_STR___PKU__ \
TF_PLAT_STR___POPCNT__ \
TF_PLAT_STR___PRFCHW__ \
TF_PLAT_STR___RDRND__ \
TF_PLAT_STR___RDSEED__ \
TF_PLAT_STR___RTM__ \
TF_PLAT_STR___SHA__ \
TF_PLAT_STR___SSE2_MATH__ \
TF_PLAT_STR___SSE2__ \
TF_PLAT_STR___SSE_MATH__ \
TF_PLAT_STR___SSE__ \
TF_PLAT_STR___SSE3__ \
TF_PLAT_STR___SSE4A__ \
TF_PLAT_STR___SSE4_1__ \
TF_PLAT_STR___SSE4_2__ \
TF_PLAT_STR___SSSE3__ \
TF_PLAT_STR___TBM__ \
TF_PLAT_STR___XOP__ \
TF_PLAT_STR___XSAVEC__ \
TF_PLAT_STR___XSAVEOPT__ \
TF_PLAT_STR___XSAVES__ \
TF_PLAT_STR___XSAVE__ \
TF_PLAT_STR_TERMINATOR_
#define TF_PLAT_STR_LIST___powerpc64__() \
TF_PLAT_STR__SOFT_DOUBLE \
TF_PLAT_STR__SOFT_FLOAT \
TF_PLAT_STR___ALTIVEC__ \
TF_PLAT_STR___APPLE_ALTIVEC__ \
TF_PLAT_STR___CRYPTO__ \
TF_PLAT_STR___FLOAT128_HARDWARE__ \
TF_PLAT_STR___FLOAT128_TYPE__ \
TF_PLAT_STR___FP_FAST_FMA \
TF_PLAT_STR___FP_FAST_FMAF \
TF_PLAT_STR___HTM__ \
TF_PLAT_STR___NO_FPRS__ \
TF_PLAT_STR___NO_LWSYNC__ \
TF_PLAT_STR___POWER8_VECTOR__ \
TF_PLAT_STR___POWER9_VECTOR__ \
TF_PLAT_STR___PPC405__ \
TF_PLAT_STR___QUAD_MEMORY_ATOMIC__ \
TF_PLAT_STR___RECIPF__ \
TF_PLAT_STR___RECIP_PRECISION__ \
TF_PLAT_STR___RECIP__ \
TF_PLAT_STR___RSQRTEF__ \
TF_PLAT_STR___RSQRTE__ \
TF_PLAT_STR___TM_FENCE__ \
TF_PLAT_STR___UPPER_REGS_DF__ \
TF_PLAT_STR___UPPER_REGS_SF__ \
TF_PLAT_STR___VEC__ \
TF_PLAT_STR___VSX__ \
TF_PLAT_STR_TERMINATOR_
#define TF_PLAT_STR_LIST___aarch64__() \
TF_PLAT_STR___ARM_ARCH \
TF_PLAT_STR___ARM_FEATURE_CLZ \
TF_PLAT_STR___ARM_FEATURE_CRC32 \
TF_PLAT_STR___ARM_FEATURE_CRC32 \
TF_PLAT_STR___ARM_FEATURE_CRYPTO \
TF_PLAT_STR___ARM_FEATURE_DIRECTED_ROUNDING \
TF_PLAT_STR___ARM_FEATURE_DSP \
TF_PLAT_STR___ARM_FEATURE_FMA \
TF_PLAT_STR___ARM_FEATURE_IDIV \
TF_PLAT_STR___ARM_FEATURE_LDREX \
TF_PLAT_STR___ARM_FEATURE_NUMERIC_MAXMIN \
TF_PLAT_STR___ARM_FEATURE_QBIT \
TF_PLAT_STR___ARM_FEATURE_QRDMX \
TF_PLAT_STR___ARM_FEATURE_SAT \
TF_PLAT_STR___ARM_FEATURE_SIMD32 \
TF_PLAT_STR___ARM_FEATURE_UNALIGNED \
TF_PLAT_STR___ARM_FP \
TF_PLAT_STR___ARM_NEON_FP \
TF_PLAT_STR___ARM_NEON__ \
TF_PLAT_STR___ARM_WMMX \
TF_PLAT_STR___IWMMXT2__ \
TF_PLAT_STR___IWMMXT__ \
TF_PLAT_STR___VFP_FP__ \
TF_PLAT_STR_TERMINATOR_
#define TF_PLAT_STR_LIST___generic__() \
TF_PLAT_STR_TARGET_IPHONE_SIMULATOR \
TF_PLAT_STR_TARGET_OS_IOS \
TF_PLAT_STR_TARGET_OS_IPHONE \
TF_PLAT_STR__MSC_VER \
TF_PLAT_STR__M_ARM \
TF_PLAT_STR__M_ARM64 \
TF_PLAT_STR__M_ARM_ARMV7VE \
TF_PLAT_STR__M_ARM_FP \
TF_PLAT_STR__M_IX86 \
TF_PLAT_STR__M_X64 \
TF_PLAT_STR__WIN32 \
TF_PLAT_STR__WIN64 \
TF_PLAT_STR___ANDROID__ \
TF_PLAT_STR___APPLE__ \
TF_PLAT_STR___BYTE_ORDER__ \
TF_PLAT_STR___CYGWIN__ \
TF_PLAT_STR___FreeBSD__ \
TF_PLAT_STR___LITTLE_ENDIAN__ \
TF_PLAT_STR___NetBSD__ \
TF_PLAT_STR___OpenBSD__ \
TF_PLAT_STR_____MSYS__ \
TF_PLAT_STR___aarch64__ \
TF_PLAT_STR___alpha__ \
TF_PLAT_STR___arm__ \
TF_PLAT_STR___i386__ \
TF_PLAT_STR___i686__ \
TF_PLAT_STR___ia64__ \
TF_PLAT_STR___linux__ \
TF_PLAT_STR___mips32__ \
TF_PLAT_STR___mips64__ \
TF_PLAT_STR___powerpc64__ \
TF_PLAT_STR___powerpc__ \
TF_PLAT_STR___riscv___ \
TF_PLAT_STR___s390x__ \
TF_PLAT_STR___sparc64__ \
TF_PLAT_STR___sparc__ \
TF_PLAT_STR___x86_64__ \
TF_PLAT_STR_TERMINATOR_
#if !defined(__x86_64__) && !defined(_M_X64) && \
!defined(__i386__) && !defined(_M_IX86)
#undef TF_PLAT_STR_LIST___x86_64__
#define TF_PLAT_STR_LIST___x86_64__()
#endif
#if !defined(__powerpc64__) && !defined(__powerpc__)
#undef TF_PLAT_STR_LIST___powerpc64__
#define TF_PLAT_STR_LIST___powerpc64__()
#endif
#if !defined(__aarch64__) && !defined(_M_ARM64) && \
!defined(__arm__) && !defined(_M_ARM)
#undef TF_PLAT_STR_LIST___aarch64__
#define TF_PLAT_STR_LIST___aarch64__()
#endif
#define TF_PLATFORM_STRINGS() \
static const char tf_cpu_option[] = \
TF_PLAT_STR_MAGIC_PREFIX_ "TF_PLAT_STR_VERSION=" TF_PLAT_STR_VERSION_ \
TF_PLAT_STR_LIST___x86_64__() \
TF_PLAT_STR_LIST___powerpc64__() \
TF_PLAT_STR_LIST___aarch64__() \
TF_PLAT_STR_LIST___generic__() \
; \
const char *tf_cpu_option_global; \
namespace { \
class TFCPUOptionHelper { \
public: \
TFCPUOptionHelper() { \
\
\
tf_cpu_option_global = tf_cpu_option; \
\
printf("%s%s", tf_cpu_option, ""); \
} \
} tf_cpu_option_avoid_omit_class; \
}
namespace tensorflow {
int GetPlatformStrings(const std::string& path,
std::vector<std::string>* found);
}
#endif
#include "tensorflow/core/platform/platform_strings.h"
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>
namespace tensorflow {
int GetPlatformStrings(const std::string& path,
std::vector<std::string>* found) {
int result;
FILE* ifp = fopen(path.c_str(), "rb");
if (ifp != nullptr) {
static const char prefix[] = TF_PLAT_STR_MAGIC_PREFIX_;
int first_char = prefix[1];
int last_char = -1;
int c;
while ((c = getc(ifp)) != EOF) {
if (c == first_char && last_char == 0) {
int i = 2;
while (prefix[i] != 0 && (c = getc(ifp)) == prefix[i]) {
i++;
}
if (prefix[i] == 0) {
std::string str;
while ((c = getc(ifp)) != EOF && c != 0) {
str.push_back(c);
}
if (!str.empty()) {
found->push_back(str);
}
}
}
last_char = c;
}
result = (ferror(ifp) == 0) ? 0 : errno;
if (fclose(ifp) != 0) {
result = errno;
}
} else {
result = errno;
}
return result;
}
} | #include "tensorflow/core/platform/platform_strings.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/str_util.h"
TF_PLATFORM_STRINGS()
typedef std::vector<std::string> string_vec;
static int PrintStrings(const std::string file_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(file_name, &str)) {
for (int i = 0; i != str.size(); i++) {
printf("%s\n", str[i].c_str());
}
} else {
perror(file_name.c_str());
rc = 2;
}
return rc;
}
static bool GetValue(const string_vec &str, const std::string ¯o_name,
std::string *pvalue) {
std::string nam_eq = macro_name + "=";
int i = 0;
while (i != str.size() && !absl::StartsWith(str[i], nam_eq)) {
i++;
}
bool found = (i != str.size());
if (found) {
*pvalue = str[i].substr(nam_eq.size());
}
return found;
}
static void CheckStr(const string_vec &str, const std::string ¯o_name,
const std::string &value) {
std::string value_from_str;
if (GetValue(str, macro_name, &value_from_str)) {
if (value != value_from_str) {
LOG(ERROR) << "===== value=" << value
<< " value_from_str=" << value_from_str;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, value_from_str) << " " << macro_name << ": bad value";
} else {
if (value != macro_name) {
LOG(ERROR) << "===== value=" << value << " macro_name=" << macro_name;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, macro_name) << " " << macro_name << ": not found in binary";
}
}
#define AS_STR_1_(x) #x
#define AS_STR(x) AS_STR_1_(x)
static int RunTest(const std::string &binary_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(binary_name, &str)) {
CheckStr(str, "__linux__", AS_STR(__linux__));
CheckStr(str, "_WIN32", AS_STR(_WIN32));
CheckStr(str, "__APPLE__", AS_STR(__APPLE__));
CheckStr(str, "__x86_64__", AS_STR(__x86_64__));
CheckStr(str, "__aarch64__", AS_STR(__aarch64__));
CheckStr(str, "__powerpc64__", AS_STR(__powerpc64__));
CheckStr(str, "TF_PLAT_STR_VERSION", TF_PLAT_STR_VERSION_);
} else {
perror(binary_name.c_str());
rc = 2;
}
return rc;
}
int main(int argc, char *argv[]) {
tensorflow::Env *env = tensorflow::Env::Default();
static const char usage[] = "usage: platform_strings_test [file...]";
int rc = 0;
tensorflow::port::InitMain(usage, &argc, &argv);
if (argc == 1) {
printf("rc=%d\n", PrintStrings(env->GetExecutablePath()));
rc = RunTest(env->GetExecutablePath());
} else {
for (int argn = 1; argn != argc; argn++) {
rc |= PrintStrings(argv[argn]);
}
}
return rc;
} |
1,346 | cpp | tensorflow/tensorflow | enable_tf2_utils | tensorflow/core/platform/enable_tf2_utils.cc | tensorflow/core/platform/enable_tf2_utils_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_ENABLE_TF2_UTILS_H_
#define TENSORFLOW_CORE_PLATFORM_ENABLE_TF2_UTILS_H_
namespace tensorflow {
void set_tf2_execution(bool enabled);
bool tf2_execution_enabled();
}
#endif
#include "tensorflow/core/platform/enable_tf2_utils.h"
#include <atomic>
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
enum Enablement : uint8 { kFalse = 0, kTrue = 1, undefined = 2 };
static std::atomic<Enablement> tf2_enabled{undefined};
void set_tf2_execution(bool enabled) {
tf2_enabled = (enabled) ? Enablement::kTrue : Enablement::kFalse;
}
bool tf2_execution_enabled() {
if (tf2_enabled == Enablement::undefined) {
static bool tf2_behavior_env_enabled = [] {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
return tf2_env != "0";
}();
tf2_enabled =
(tf2_behavior_env_enabled) ? Enablement::kTrue : Enablement::kFalse;
}
return tf2_enabled;
}
} | #include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
TEST(TF2EnabledTest, enabled_behavior) {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
bool expected = (tf2_env != "0");
EXPECT_EQ(tensorflow::tf2_execution_enabled(), expected);
tensorflow::set_tf2_execution(true);
EXPECT_TRUE(tensorflow::tf2_execution_enabled());
tensorflow::set_tf2_execution(false);
EXPECT_FALSE(tensorflow::tf2_execution_enabled());
}
} |
1,347 | cpp | tensorflow/tensorflow | graph_view | tensorflow/core/grappler/utils/graph_view.cc | tensorflow/core/grappler/utils/graph_view_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_VIEW_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_GRAPH_VIEW_H_
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class Device;
class Graph;
class Node;
class OpKernel;
class Tensor;
struct EdgeInfo {
int dst_id;
int output_slot : 31;
bool is_last : 1;
int input_slot;
};
struct ControlEdgeInfo {
int dst_id;
};
struct NodeItem {
int node_id = -1;
bool kernel_is_async : 1;
bool is_merge : 1;
bool is_enter : 1;
bool is_constant_enter : 1;
bool is_exit : 1;
bool is_control_trigger : 1;
bool is_source : 1;
bool is_enter_exit_or_next_iter : 1;
bool is_transfer_node : 1;
bool is_initialization_op : 1;
bool is_recv_or_switch : 1;
bool is_next_iteration : 1;
bool is_noop : 1;
bool
is_any_consumer_merge_or_control_trigger : 1;
bool is_any_input_ref_typed : 1;
bool is_distributed_communication : 1;
OpKernel* kernel = nullptr;
const Tensor* const_tensor = nullptr;
int num_inputs;
int num_outputs;
int input_start = 0;
int32 num_output_edges;
int32 num_output_control_edges;
std::unique_ptr<bool[]> outputs_required;
absl::Span<EdgeInfo> mutable_output_edges() {
return absl::Span<EdgeInfo>(output_edge_base(), num_output_edges);
}
gtl::ArraySlice<EdgeInfo> output_edges() const {
return gtl::ArraySlice<EdgeInfo>(output_edge_base(), num_output_edges);
}
gtl::ArraySlice<ControlEdgeInfo> output_control_edges() const {
return gtl::ArraySlice<const ControlEdgeInfo>(output_control_edge_base(),
num_output_control_edges);
}
DataType input_type(int i) const {
DCHECK_LT(i, num_inputs);
return static_cast<DataType>(input_type_base()[i]);
}
DataType output_type(int i) const {
DCHECK_LT(i, num_outputs);
return static_cast<DataType>(output_type_base()[i]);
}
const AllocatorAttributes* output_attrs() const { return output_attr_base(); }
const int* forward_from() const { return forward_from_base(); }
string DebugString() const;
private:
friend class GraphView;
NodeItem() {}
char* var() const {
return const_cast<char*>(reinterpret_cast<const char*>(this) +
sizeof(NodeItem));
}
EdgeInfo* output_edge_base() const {
return reinterpret_cast<EdgeInfo*>(var());
}
ControlEdgeInfo* output_control_edge_base() const {
return reinterpret_cast<ControlEdgeInfo*>(var() + sizeof(EdgeInfo) *
num_output_edges);
}
AllocatorAttributes* output_attr_base() const {
return reinterpret_cast<AllocatorAttributes*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges);
}
int* forward_from_base() const {
return reinterpret_cast<int*>(var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) *
num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs);
}
uint8* input_type_base() const {
return reinterpret_cast<uint8*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs + sizeof(int) * num_outputs);
}
uint8* output_type_base() const {
return reinterpret_cast<uint8*>(
var() + sizeof(EdgeInfo) * num_output_edges +
sizeof(ControlEdgeInfo) * num_output_control_edges +
sizeof(AllocatorAttributes) * num_outputs + sizeof(int) * num_outputs +
sizeof(uint8) * num_inputs);
}
NodeItem(const NodeItem&) = delete;
void operator=(const NodeItem&) = delete;
};
class GraphView {
public:
GraphView() : space_(nullptr) {}
~GraphView();
Status Initialize(const Graph* g);
Status SetAllocAttrs(const Graph* g, const Device* device);
void SetScopedAllocatorAttrs(const std::vector<const Node*>& sa_nodes);
NodeItem* node(int32_t id) const {
DCHECK_GE(id, 0);
DCHECK_LT(id, num_nodes_);
uint32 offset = node_offsets_[id];
return ((offset == kuint32max)
? nullptr
: reinterpret_cast<NodeItem*>(space_ + node_offsets_[id]));
}
const NodeItem& node_ref(int32_t id) const {
DCHECK_GE(id, 0);
DCHECK_LT(id, num_nodes_);
uint32 offset = node_offsets_[id];
DCHECK_NE(offset, kuint32max);
return *reinterpret_cast<NodeItem*>(space_ + node_offsets_[id]);
}
int32 num_nodes() const { return num_nodes_; }
private:
char* InitializeNode(char* ptr, const Node* n);
size_t NodeItemBytes(const Node* n);
int32 num_nodes_ = 0;
uint32* node_offsets_ = nullptr;
char* space_;
GraphView(const GraphView&) = delete;
void operator=(const GraphView&) = delete;
};
}
#endif
#include "tensorflow/core/common_runtime/graph_view.h"
#include <atomic>
#include <deque>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/edgeset.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
string NodeItem::DebugString() const {
string ret = strings::StrCat("{name:'", kernel->name(), "' id:", node_id);
if (is_source) {
strings::StrAppend(&ret, " source}");
} else {
strings::StrAppend(&ret, " def:{", SummarizeNodeDef(kernel->def()), "}}");
}
return ret;
}
GraphView::~GraphView() {
static_assert(std::is_trivially_destructible<AllocatorAttributes>::value,
"Update code if AllocatorAttributes gains a destructor");
static_assert(std::is_trivially_destructible<EdgeInfo>::value,
"Update code if EdgeInfo gains a destructor");
for (int i = 0; i < num_nodes_; i++) {
NodeItem* n = node(i);
if (n != nullptr) {
n->NodeItem::~NodeItem();
}
}
delete[] node_offsets_;
delete[] space_;
}
namespace {
typedef std::tuple<int32, int32> OutputAndControlEdges;
OutputAndControlEdges CountOutputEdges(const Node* n) {
DCHECK_LE(n->out_edges().size(), kint32max);
int32_t num_output_edges = 0;
int32_t num_output_control_edges = 0;
for (auto e : n->out_edges()) {
if (IsSink(e->dst())) continue;
if (e->IsControlEdge()) {
++num_output_control_edges;
} else {
++num_output_edges;
}
}
return OutputAndControlEdges(num_output_edges, num_output_control_edges);
}
}
size_t GraphView::NodeItemBytes(const Node* n) {
int32_t num_output_edges;
int32_t num_output_control_edges;
std::tie(num_output_edges, num_output_control_edges) = CountOutputEdges(n);
const int num_inputs = n->num_inputs();
const int num_outputs = n->num_outputs();
const size_t raw_bytes =
sizeof(NodeItem)
+ num_output_edges * sizeof(EdgeInfo)
+ num_output_control_edges *
sizeof(ControlEdgeInfo)
+ num_outputs * sizeof(AllocatorAttributes)
+ num_outputs * sizeof(int)
+ num_inputs * sizeof(uint8)
+ num_outputs * sizeof(uint8);
static constexpr size_t kItemAlignment = sizeof(NodeItem*);
static_assert(kItemAlignment % alignof(NodeItem) == 0,
"NodeItem must be aligned with kItemAlignment");
static_assert(kItemAlignment % alignof(EdgeInfo) == 0,
"EdgeInfo must be aligned with kItemAlignment");
static_assert(kItemAlignment % alignof(ControlEdgeInfo) == 0,
"ControlEdgeInfo must be aligned with kItemAlignment");
static_assert(kItemAlignment % alignof(AllocatorAttributes) == 0,
"AllocatorAttributes must be aligned with kItemAlignment");
static_assert(sizeof(NodeItem) % alignof(EdgeInfo) == 0,
"NodeItem must be aligned with EdgeInfo");
static_assert(sizeof(NodeItem) % alignof(AllocatorAttributes) == 0,
"NodeItem must be aligned with AllocatorAttributes");
static_assert(sizeof(EdgeInfo) % alignof(AllocatorAttributes) == 0,
"EdgeInfo must be aligned with AllocatorAttributes");
const size_t bytes =
((raw_bytes + kItemAlignment - 1) / kItemAlignment) * kItemAlignment;
return bytes;
}
char* GraphView::InitializeNode(char* ptr, const Node* n) {
const int id = n->id();
CHECK(node_offsets_[id] == kuint32max);
const size_t bytes = NodeItemBytes(n);
constexpr size_t kItemAlignment = sizeof(NodeItem*);
CHECK_EQ(reinterpret_cast<uintptr_t>(ptr) % kItemAlignment, 0);
NodeItem* item = reinterpret_cast<NodeItem*>(ptr);
CHECK_LE(static_cast<int64_t>(ptr - space_), kuint32max);
const uint32 offset = static_cast<uint32>(ptr - space_);
node_offsets_[id] = offset;
ptr += bytes;
int32_t num_output_edges;
int32_t num_output_control_edges;
std::tie(num_output_edges, num_output_control_edges) = CountOutputEdges(n);
const int num_inputs = n->num_inputs();
const int num_outputs = n->num_outputs();
new (item) NodeItem();
item->num_inputs = num_inputs;
item->num_outputs = num_outputs;
item->num_output_edges = num_output_edges;
item->num_output_control_edges = num_output_control_edges;
gtl::InlinedVector<EdgeInfo*, 4> last_indices(num_outputs, nullptr);
EdgeInfo* dst_edge = item->output_edge_base();
for (auto e : n->out_edges()) {
if (e->IsControlEdge()) continue;
dst_edge->dst_id = e->dst()->id();
CHECK_LE(e->src_output(), 0x3FFFFFFF);
dst_edge->output_slot = e->src_output();
dst_edge->is_last = false;
const int output_slot = dst_edge->output_slot;
if (output_slot >= 0) {
last_indices[output_slot] = dst_edge;
}
dst_edge->input_slot = e->dst_input();
dst_edge++;
}
for (EdgeInfo* edge_info : last_indices) {
if (edge_info != nullptr) {
edge_info->is_last = true;
}
}
ControlEdgeInfo* dst_control_edge = item->output_control_edge_base();
for (auto e : n->out_edges()) {
if (!e->IsControlEdge() || IsSink(e->dst())) continue;
dst_control_edge->dst_id = e->dst()->id();
dst_control_edge++;
}
AllocatorAttributes* output_attrs = item->output_attr_base();
for (int i = 0; i < num_outputs; i++) {
new (&output_attrs[i]) AllocatorAttributes();
}
DCHECK_LT(DataType_MAX, 255);
uint8* input_types = item->input_type_base();
item->is_any_input_ref_typed = false;
for (int i = 0; i < num_inputs; i++) {
input_types[i] = static_cast<uint8>(n->input_type(i));
DCHECK_EQ(item->input_type(i), n->input_type(i));
item->is_any_input_ref_typed |= IsRefType(n->input_type(i));
}
{
std::vector<int> forward_input;
Status fwd_status =
GetNodeAttr(n->attrs(), "_forward_input", &forward_input);
std::vector<int> scoped_allocator_attrs;
Status sa_status =
GetNodeAttr(n->attrs(), "_scoped_allocator", &scoped_allocator_attrs);
int* forward_from = item->forward_from_base();
uint8* output_types = item->output_type_base();
for (int i = 0; i < num_outputs; ++i) {
output_types[i] = static_cast<uint8>(n->output_type(i));
DCHECK_EQ(item->output_type(i), n->output_type(i));
forward_from[i] = OpKernelContext::Params::kNoReservation;
if (sa_status.ok()) {
for (int j = 0; j < scoped_allocator_attrs.size(); j += 2) {
if (scoped_allocator_attrs[j] == i) {
forward_from[i] = OpKernelContext::Params::kNeverForward;
DCHECK_EQ(output_attrs[i].scope_id, 0);
output_attrs[i].scope_id = scoped_allocator_attrs[j + 1];
}
}
}
if (fwd_status.ok() &&
forward_from[i] == OpKernelContext::Params::kNoReservation) {
DCHECK_EQ(forward_input.size() % 2, 0);
for (int j = 0; j < forward_input.size(); j += 2) {
if (forward_input[j + 1] == i) {
DCHECK_EQ(forward_from[i], OpKernelContext::Params::kNoReservation);
forward_from[i] = forward_input[j];
break;
}
}
}
}
}
return ptr;
}
Status GraphView::Initialize(const Graph* g) {
CHECK(node_offsets_ == nullptr);
const int num_nodes = g->num_node_ids();
num_nodes_ = num_nodes;
size_t total_bytes = 0;
for (const Node* n : g->nodes()) {
if (n->out_edges().size() > kint32max) {
return errors::InvalidArgument(
"The executor cannot handle nodes with more than ", kint32max,
" output edges. Node ", n->name(), " had ", n->out_edges().size(),
" output edges.");
}
total_bytes += NodeItemBytes(n);
}
node_offsets_ = new uint32[num_nodes];
for (int i = 0; i < num_nodes; i++) {
node_offsets_[i] = kuint32max;
}
space_ = new char[total_bytes];
char* ptr = space_;
auto it = g->nodes();
if (OpOrderDeterminismRequired()) {
std::vector<Node*> nodes(it.begin(), it.end());
std::sort(nodes.begin(), nodes.end(), NodeComparatorName());
for (const Node* n : nodes) {
ptr = InitializeNode(ptr, n);
}
} else {
for (const Node* n : it) {
ptr = InitializeNode(ptr, n);
}
}
CHECK_EQ(ptr, space_ + total_bytes);
return absl::OkStatus();
}
namespace {
bool ExtractScopedAllocatorAttr(const std::vector<int>& sc_attr,
int output_index,
AllocatorAttributes* alloc_attr) {
DCHECK_LE(2, sc_attr.size());
for (int i = 0; i < sc_attr.size(); i += 2) {
if (sc_attr[i] == output_index) {
CHECK_EQ(alloc_attr->scope_id, 0);
alloc_attr->scope_id = sc_attr[i + 1];
return true;
}
}
return false;
}
}
void GraphView::SetScopedAllocatorAttrs(
const std::vector<const Node*>& sa_nodes) {
for (const Node* sa : sa_nodes) {
NodeItem* sa_item = node(sa->id());
AllocatorAttributes* sa_attrs = sa_item->output_attr_base();
for (const auto& e : sa->out_edges()) {
if (IsSink(e->dst()) || !e->IsControlEdge()) {
continue;
}
Node* use_node = e->dst();
NodeItem* item = node(use_node->id());
AllocatorAttributes* use_attrs = item->output_attr_base();
std::vector<int> scoped_allocator_attrs;
Status s = GetNodeAttr(use_node->attrs(), "_scoped_allocator",
&scoped_allocator_attrs);
if (!s.ok()) {
VLOG(2) << "Failed to find expected ScopedAllocator attr on "
<< use_node->name();
continue;
}
for (const auto& e : use_node->out_edges()) {
if (IsSink(e->dst()) || !e->IsControlEdge()) {
AllocatorAttributes attr;
if (ExtractScopedAllocatorAttr(scoped_allocator_attrs,
e->src_output(), &attr)) {
(use_attrs + e->src_output())->Merge(attr);
attr = *(use_attrs + e->src_output());
attr.scope_id = 0;
sa_attrs->Merge(attr);
}
}
}
}
}
}
namespace {
Status InferAllocAttr(const Node* n, const Node* dst,
const DeviceNameUtils::ParsedName& local_dev_name,
AllocatorAttributes* attr) {
Status s;
if (IsRecv(n)) {
string src_name;
s = GetNodeAttr(n->attrs(), "send_device", &src_name);
if (!s.ok()) return s;
DeviceNameUtils::ParsedName parsed_src_name;
if (!DeviceNameUtils::ParseFullName(src_name, &parsed_src_name)) {
s = errors::Internal("Bad send_device attr '", src_name, "' in node ",
n->name());
return s;
}
if (!DeviceNameUtils::IsSameAddressSpace(parsed_src_name, local_dev_name)) {
attr->set_nic_compatible(true);
VLOG(2) << "node " << n->name() << " is the sink of an RPC in";
} else if ((local_dev_name.type == "CPU" || n->IsHostRecv()) &&
parsed_src_name.type != "CPU") {
attr->set_gpu_compatible(true);
VLOG(2) << "node " << n->name() << " is the sink of a gpu->cpu copy";
} else {
VLOG(2) << "default alloc case local type " << local_dev_name.type
<< " remote type " << parsed_src_name.type;
}
}
if (IsSend(dst)) {
string dst_name;
s = GetNodeAttr(dst->attrs(), "recv_device", &dst_name);
if (!s.ok()) return s;
DeviceNameUtils::ParsedName parsed_dst_name;
if (!DeviceNameUtils::ParseFullName(dst_name, &parsed_dst_name)) {
s = errors::Internal("Bad recv_device attr '", dst_name, "' in node ",
n->name());
return s;
}
if (!DeviceNameUtils::IsSameAddressSpace(parsed_dst_name, local_dev_name)) {
attr->set_nic_compatible(true);
VLOG(2) << "node " << n->name() << " is the source of an RPC out";
} else if ((local_dev_name.type == "CPU" || dst->IsHostSend()) &&
parsed_dst_name.type != "CPU") {
attr->set_gpu_compatible(true);
VLOG(2) << "node " << n->name() << " is the source of a cpu->gpu copy";
} else {
VLOG(2) << "default alloc case local type " << local_dev_name.type
<< " remote type " << parsed_dst_name.type;
}
}
if (n->IsCollective()) {
attr->set_nic_compatible(true);
}
return s;
}
}
Status GraphView::SetAllocAttrs(const Graph* g, const Device* device) {
Status s;
const DeviceNameUtils::ParsedName& local_dev_name = device->parsed_name();
std::vector<const Node*> scoped_allocator_instances;
for (const Node* n : g->nodes()) {
NodeItem* item = node(n->id());
AllocatorAttributes* attrs = item->output_attr_base();
if (IsScopedAllocator(n)) {
scoped_allocator_instances.push_back(n);
}
for (const auto& e : n->out_edges()) {
if (!e->IsControlEdge()) {
AllocatorAttributes attr;
s = InferAllocAttr(n, e->dst(), local_dev_name, &attr);
if (!s.ok()) return s;
if (attr.value != 0 || attr.scope_id != 0) {
attrs[e->src_output()].Merge(attr);
}
}
}
for (int out = 0; out < n->num_outputs(); out++) {
const OpKernel* op_kernel = item->kernel;
DCHECK_LT(out, op_kernel->output_memory_types().size());
bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY;
if (on_host) {
AllocatorAttributes h;
h.set_on_host(on_host);
attrs[out].Merge(h);
}
}
}
SetScopedAllocatorAttrs(scoped_allocator_instances);
return s;
}
} | #include "tensorflow/core/grappler/utils/graph_view.h"
#include <type_traits>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
constexpr char kNoOp[] = "NoOp";
GraphDef SimpleTestGraph() {
return GDef({NDef("a", kNoOp, {"b:2", "d:3", "b:2", "d:3", "^c"}),
NDef("b", kNoOp, {"d:2", "c:5", "^c"}),
NDef("c", kNoOp, {"^d", "^d"}), NDef("d", kNoOp, {})},
{});
}
template <typename T>
const string GetGraphViewTypeAsString() {
return std::is_same<T, class GraphView>::value ? "GraphView"
: "MutableGraphView";
}
using GraphViewTypes = ::testing::Types<GraphView, MutableGraphView>;
template <typename T>
class TypedGraphViewTest : public ::testing::Test {};
TYPED_TEST_SUITE(TypedGraphViewTest, GraphViewTypes);
TYPED_TEST(TypedGraphViewTest, GraphWithDuplicateNodeNames) {
GraphDef graph =
GDef({NDef("a", kNoOp, {}), NDef("a", kNoOp, {})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute(
"$0::$0 error: graph has multiple nodes with the name 'a'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithMissingFanins) {
GraphDef graph = GDef({NDef("a", kNoOp, {"b:3"})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute("$0::$0 error: node 'a' has missing fanin 'b:3'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithSelfCycles) {
GraphDef graph = GDef({NDef("a", kNoOp, {"a:4"})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
absl::Substitute("$0::$0 error: node 'a' has self cycle fanin 'a:4'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithMisorderedFanins) {
GraphDef graph = GDef({NDef("a", kNoOp, {"^b", "b:4"}), NDef("b", kNoOp, {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute("$0::$0 error: node 'a' has regular fanin 'b:4' "
"after controlling fanins.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GetNodeWithIndex) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const int num_nodes = graph_view.NumNodes();
ASSERT_EQ(graph_view.NumNodes(), graph.node_size());
for (int i = 0; i < num_nodes; ++i) {
const auto* node = graph_view.GetNode(i);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->node(), graph.mutable_node(i));
}
const auto* bad_node = graph_view.GetNode(-1);
ASSERT_EQ(bad_node, nullptr);
bad_node = graph_view.GetNode(num_nodes);
ASSERT_EQ(bad_node, nullptr);
}
TYPED_TEST(TypedGraphViewTest, GetNodeWithName) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
std::vector<string> node_names = {"a", "b", "c", "d"};
for (int i = 0; i < node_names.size(); ++i) {
const string& node_name = node_names[i];
const auto* node = graph_view.GetNode(node_name);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->node(), graph.mutable_node(i));
}
const auto* bad_node = graph_view.GetNode("e");
ASSERT_EQ(bad_node, nullptr);
}
TYPED_TEST(TypedGraphViewTest, GetNodes) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto& nodes = graph_view.GetNodes();
const int num_nodes = nodes.size();
EXPECT_EQ(num_nodes, 4);
ASSERT_EQ(num_nodes, graph.node_size());
for (int i = 0; i < num_nodes; ++i) {
EXPECT_EQ(nodes[i].node(), graph.mutable_node(i));
}
}
TYPED_TEST(TypedGraphViewTest, HasNode) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
for (const string& node_name : {"a", "b", "c", "d"}) {
EXPECT_TRUE(graph_view.HasNode(node_name));
}
EXPECT_FALSE(graph_view.HasNode("e"));
}
TYPED_TEST(TypedGraphViewTest, NumNodes) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 4);
}
TYPED_TEST(TypedGraphViewTest, NumNodesEmptyGraph) {
GraphDef graph;
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 0);
}
TEST(MutableGraphViewTest, DedupControlDependencies) {
GraphDef graph = GDef(
{NDef("a", kNoOp, {}), NDef("b", kNoOp, {}), NDef("c", kNoOp, {}),
NDef("d", kNoOp, {"a:2", "b:1", "^c", "^c", "^a", "^a", "^b", "^c"})},
{});
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 4);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->NumRegularFanins(), 2);
ASSERT_NE(d_node->node(), nullptr);
ASSERT_EQ(d_node->node()->input_size(), 5);
EXPECT_EQ(d_node->node()->input(0), "a:2");
EXPECT_EQ(d_node->node()->input(1), "b:1");
EXPECT_EQ(d_node->node()->input(2), "^c");
EXPECT_EQ(d_node->node()->input(3), "^b");
EXPECT_EQ(d_node->node()->input(4), "^a");
ASSERT_EQ(d_node->NumControllingFanins(), 3);
const auto& d_control_fanins = d_node->GetControllingFanins();
ASSERT_EQ(d_control_fanins.size(), 3);
ASSERT_NE(d_control_fanins[0].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[0].node_view()->GetName(), "c");
ASSERT_NE(d_control_fanins[1].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[1].node_view()->GetName(), "b");
ASSERT_NE(d_control_fanins[2].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[2].node_view()->GetName(), "a");
}
template <typename T>
class TypedNodeViewTest : public ::testing::Test {};
TYPED_TEST_SUITE(TypedNodeViewTest, GraphViewTypes);
TYPED_TEST(TypedNodeViewTest, GetName) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
for (const NodeDef& node : graph.node()) {
const auto* node_view = graph_view.GetNode(node.name());
ASSERT_NE(node_view, nullptr);
EXPECT_EQ(node_view->GetName(), node.name());
EXPECT_EQ(node_view->GetName(), node_view->node()->name());
}
}
TYPED_TEST(TypedNodeViewTest, GetOp) {
GraphDef graph = GDef({NDef("a", "op_a", {}), NDef("b", "op_b", {}),
NDef("c", "op_c", {}), NDef("d", "op_d", {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(a_node->GetOp(), "op_a");
EXPECT_EQ(a_node->node()->op(), "op_a");
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
EXPECT_EQ(b_node->GetOp(), "op_b");
EXPECT_EQ(b_node->node()->op(), "op_b");
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetOp(), "op_c");
EXPECT_EQ(c_node->node()->op(), "op_c");
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->GetOp(), "op_d");
EXPECT_EQ(d_node->node()->op(), "op_d");
}
TYPED_TEST(TypedNodeViewTest, GetDevice) {
GraphDef graph = GDef(
{NDef("a", "", {}, {}, "device_a"), NDef("b", "", {}, {}, "device_b"),
NDef("c", "", {}, {}, "device_c"), NDef("d", "", {}, {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(a_node->GetDevice(), "device_a");
EXPECT_EQ(a_node->node()->device(), "device_a");
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
EXPECT_EQ(b_node->GetDevice(), "device_b");
EXPECT_EQ(b_node->node()->device(), "device_b");
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetDevice(), "device_c");
EXPECT_EQ(c_node->node()->device(), "device_c");
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->GetDevice(), "");
EXPECT_EQ(d_node->node()->device(), "");
}
template <typename T>
class TypedFaninTest : public ::testing::Test {};
using FaninTypes =
::testing::Types<std::pair<FanoutView, GraphView>,
std::pair<MutableFanoutView, MutableGraphView>>;
TYPED_TEST_SUITE(TypedFaninTest, FaninTypes);
TYPED_TEST(TypedFaninTest, GetRegularFanins) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanins = a_node->GetRegularFanins();
ASSERT_EQ(a_fanins.size(), 4);
EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, b_node->node_index(), 2));
EXPECT_EQ(a_fanins[1], FanoutViewType(&graph_view, d_node->node_index(), 3));
EXPECT_EQ(a_fanins[2], FanoutViewType(&graph_view, b_node->node_index(), 2));
EXPECT_EQ(a_fanins[3], FanoutViewType(&graph_view, d_node->node_index(), 3));
const auto& d_fanins = d_node->GetRegularFanins();
EXPECT_EQ(d_fanins.size(), 0);
}
TYPED_TEST(TypedFaninTest, GetRegularFanin) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanin_0 = a_node->GetRegularFanin(0);
EXPECT_EQ(a_fanin_0, FanoutViewType(&graph_view, b_node->node_index(), 2));
const auto& a_fanin_1 = a_node->GetRegularFanin(1);
EXPECT_EQ(a_fanin_1, FanoutViewType(&graph_view, d_node->node_index(), 3));
const auto& a_fanin_2 = a_node->GetRegularFanin(2);
EXPECT_EQ(a_fanin_2, FanoutViewType(&graph_view, b_node->node_index(), 2));
const auto& a_fanin_3 = a_node->GetRegularFanin(3);
EXPECT_EQ(a_fanin_3, FanoutViewType(&graph_view, d_node->node_index(), 3));
const FanoutViewType missing_fanin;
EXPECT_EQ(missing_fanin, FanoutViewType(nullptr, -1, -2));
EXPECT_EQ(missing_fanin.node_view(), nullptr);
const auto& a_fanin_4 = a_node->GetRegularFanin(4);
EXPECT_EQ(a_fanin_4, missing_fanin);
const auto& a_fanin_5 = a_node->GetRegularFanin(5);
EXPECT_EQ(a_fanin_5, missing_fanin);
const auto& a_fanin_control = a_node->GetRegularFanin(Graph::kControlSlot);
EXPECT_EQ(a_fanin_control, missing_fanin);
const auto& a_fanin_bad = a_node->GetRegularFanin(-2);
EXPECT_EQ(a_fanin_bad, missing_fanin);
}
TYPED_TEST(TypedFaninTest, GetControllingFanins) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanins = a_node->GetControllingFanins();
ASSERT_EQ(a_fanins.size(), 1);
EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, c_node->node_index(),
Graph::kControlSlot));
const auto& c_fanins = c_node->GetControllingFanins();
FanoutViewType d_control_fanin(&graph_view, d_node->node_index(),
Graph::kControlSlot);
if (std::is_same<GraphViewType, GraphView>::value) {
ASSERT_EQ(c_fanins.size(), 2);
EXPECT_EQ(c_fanins[0], d_control_fanin);
EXPECT_EQ(c_fanins[1], d_control_fanin);
} else {
ASSERT_EQ(c_fanins.size(), 1);
EXPECT_EQ(c_fanins[0], d_control_fanin);
}
const auto& d_fanins = d_node->GetControllingFanins();
EXPECT_EQ(d_fanins.size(), 0);
}
template <typename T>
class TypedFanoutTest : public ::testing::Test {};
using FanoutTypes =
::testing::Types<std::pair<FaninView, GraphView>,
std::pair<MutableFaninView, MutableGraphView>>;
TYPED_TEST_SUITE(TypedFanoutTest, FanoutTypes);
TYPED_TEST(TypedFanoutTest, GetRegularFanouts) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& d_fanouts = d_node->GetRegularFanouts();
ASSERT_EQ(d_fanouts.size(), 4);
for (int i = 0; i < d_fanouts.size(); ++i) {
if (i == 2) {
ASSERT_EQ(d_fanouts[i].size(), 1);
EXPECT_EQ(d_fanouts[i][0],
FaninViewType(&graph_view, b_node->node_index(), 0));
} else if (i == 3) {
ASSERT_EQ(d_fanouts[i].size(), 2);
absl::flat_hash_set<FaninViewType> fanouts(d_fanouts[i].begin(),
d_fanouts[i].end());
EXPECT_TRUE(fanouts.contains(
FaninViewType(&graph_view, a_node->node_index(), 1)));
EXPECT_TRUE(fanouts.contains(
FaninViewType(&graph_view, a_node->node_index(), 3)));
} else {
EXPECT_EQ(d_fanouts[i].size(), 0);
}
}
const auto& a_fanouts = a_node->GetRegularFanouts();
EXPECT_EQ(a_fanouts.size(), 0);
}
TYPED_TEST(TypedFanoutTest, GetRegularFanout) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& d_fanouts_2 = d_node->GetRegularFanout(2);
ASSERT_EQ(d_fanouts_2.size(), 1);
EXPECT_EQ(d_fanouts_2.at(0),
FaninViewType(&graph_view, b_node->node_index(), 0));
const auto& d_fanouts_3 = d_node->GetRegularFanout(3);
EXPECT_EQ(d_fanouts_3.size(), 2);
absl::flat_hash_set<FaninViewType> d_fanouts_3_set(d_fanouts_3.begin(),
d_fanouts_3.end());
EXPECT_TRUE(d_fanouts_3_set.contains(
FaninViewType(&graph_view, a_node->node_index(), 1)));
EXPECT_TRUE(d_fanouts_3_set.contains(
FaninViewType(&graph_view, a_node->node_index(), 3)));
const std::vector<FaninViewType> no_fanouts;
EXPECT_EQ(d_node->GetRegularFanout(-2), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(Graph::kControlSlot), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(0), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(1), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(4), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(5), no_fanouts);
}
TYPED_TEST(TypedFanoutTest, GetControlledFanouts) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& c_fanouts = c_node->GetControlledFanouts();
EXPECT_EQ(c_fanouts.size(), 2);
absl::flat_hash_set<FaninViewType> c_fanouts_set(c_fanouts.begin(),
c_fanouts.end());
EXPECT_TRUE(c_fanouts_set.contains(
FaninViewType(&graph_view, b_node->node_index(), Graph::kControlSlot)));
EXPECT_TRUE(c_fanouts_set.contains(
FaninViewType(&graph_view, a_node->node_index(), Graph::kControlSlot)));
const auto& d_fanouts = d_node->GetControlledFanouts();
FaninViewType c_control_fanout(&graph_view, c_node->node_index(),
Graph::kControlSlot);
if (std::is_same<GraphViewType, GraphView>::value) {
ASSERT_EQ(d_fanouts.size(), 2);
EXPECT_EQ(d_fanouts[0], c_control_fanout);
EXPECT_EQ(d_fanouts[1], c_control_fanout);
} else {
ASSERT_EQ(d_fanouts.size(), 1);
EXPECT_EQ(d_fanouts[0], c_control_fanout);
}
const auto& a_fanouts = a_node->GetControlledFanouts();
EXPECT_EQ(a_fanouts.size(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumRegularFanins) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumRegularFanins(), 4);
EXPECT_EQ(b_node->NumRegularFanins(), 2);
EXPECT_EQ(c_node->NumRegularFanins(), 0);
EXPECT_EQ(d_node->NumRegularFanins(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumControllingFanins) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumControllingFanins(), 1);
EXPECT_EQ(b_node->NumControllingFanins(), 1);
if (std::is_same<TypeParam, GraphView>::value) {
EXPECT_EQ(c_node->NumControllingFanins(), 2);
} else {
EXPECT_EQ(c_node->NumControllingFanins(), 1);
}
EXPECT_EQ(d_node->NumControllingFanins(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumRegularFanouts) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumRegularFanouts(), 0);
EXPECT_EQ(b_node->NumRegularFanouts(), 2);
EXPECT_EQ(c_node->NumRegularFanouts(), 1);
EXPECT_EQ(d_node->NumRegularFanouts(), 3);
}
TYPED_TEST(TypedNodeViewTest, NumControlledFanouts) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumControlledFanouts(), 0);
EXPECT_EQ(b_node->NumControlledFanouts(), 0);
EXPECT_EQ(c_node->NumControlledFanouts(), 2);
if (std::is_same<TypeParam, GraphView>::value) {
EXPECT_EQ(d_node->NumControlledFanouts(), 2);
} else {
EXPECT_EQ(d_node->NumControlledFanouts(), 1);
}
}
TYPED_TEST(TypedNodeViewTest, HasFanin) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_TRUE(a_node->HasFanin({&graph_view, b_node->node_index(), 2}));
EXPECT_FALSE(a_node->HasFanin({&graph_view, c_node->node_index(), 4}));
EXPECT_TRUE(a_node->HasFanin(
{&graph_view, c_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(a_node->HasFanin(
{&graph_view, b_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(a_node->HasFanin({&graph_view, a_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanin(
{&graph_view, b_node->node_index(), internal::kMissingSlot}));
}
TYPED_TEST(TypedNodeViewTest, HasFanout) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_TRUE(b_node->HasFanout({&graph_view, a_node->node_index(), 2}));
EXPECT_FALSE(b_node->HasFanout({&graph_view, a_node->node_index(), 1}));
EXPECT_TRUE(d_node->HasFanout(
{&graph_view, c_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(d_node->HasFanout(
{&graph_view, a_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(d_node->HasFanout({&graph_view, d_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanout({&graph_view, b_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanout({&graph_view, 4, 0}));
EXPECT_FALSE(d_node->HasFanout(
{&graph_view, b_node->node_index(), internal::kMissingSlot}));
}
GraphDef SimpleAttrTestGraph() {
return GDef({NDef("a", kNoOp, {}), NDef("b", kNoOp, {}, {{"attr", 1}}),
NDef("c", kNoOp, {}, {{"attr_1", "a"}, {"attr_2", 2.0f}})},
{});
}
TYPED_TEST(TypedNodeViewTest, GetAttr) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetAttr("attr_1")->s(), "a");
}
TYPED_TEST(TypedNodeViewTest, GetAttrs) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
const auto& actual_attrs = c_node->GetAttrs();
EXPECT_EQ(actual_attrs.size(), 2);
const auto* attr_1 = actual_attrs.Find("attr_1");
EXPECT_NE(attr_1, nullptr);
EXPECT_EQ(attr_1->s(), "a");
const auto* attr_2 = actual_attrs.Find("attr_2");
EXPECT_NE(attr_2, nullptr);
EXPECT_EQ(attr_2->f(), 2.0f);
}
TYPED_TEST(TypedNodeViewTest, NumAttrs) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(a_node->NumAttrs(), 0);
EXPECT_EQ(b_node->NumAttrs(), 1);
EXPECT_EQ(c_node->NumAttrs(), 2);
}
TYPED_TEST(TypedNodeViewTest, HasAttr) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_TRUE(c_node->HasAttr("attr_1"));
EXPECT_FALSE(c_node->HasAttr("attr"));
}
class CompareGraphTest : public GrapplerTest {
public:
void CompareGraphViewWithGraph(MutableGraphView* graph_view,
const GraphDef& expected_graph) {
Status s;
GraphView expected_graph_view(&expected_graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view->NumNodes(), expected_graph_view.NumNodes());
for (const NodeView& expected_node_view : expected_graph_view.GetNodes()) {
const string& node_name = expected_node_view.GetName();
MutableNodeView* node_view = graph_view->GetNode(node_name);
ASSERT_NE(node_view, nullptr);
EXPECT_EQ(node_view->GetName(), expected_node_view.GetName());
EXPECT_EQ(node_view->GetOp(), expected_node_view.GetOp());
EXPECT_EQ(node_view->GetDevice(), expected_node_view.GetDevice());
const int actual_num_fanins = node_view->node()->input_size();
EXPECT_EQ(actual_num_fanins, expected_node_view.node()->input_size());
const int expected_num_regular_fanins =
expected_node_view.NumRegularFanins();
bool same_num_regular_fanins =
node_view->NumRegularFanins() == expected_num_regular_fanins;
EXPECT_TRUE(same_num_regular_fanins);
for (int i = 0; i < expected_num_regular_fanins; ++i) {
const auto& expected_fanin = expected_node_view.GetRegularFanin(i);
auto* actual_fanin_node =
graph_view->GetNode(expected_fanin.node_view()->GetName());
ASSERT_NE(actual_fanin_node, nullptr);
EXPECT_TRUE(
node_view->HasFanin({actual_fanin_node, expected_fanin.index()}));
if (i < node_view->NumRegularFanins()) {
auto& actual_fanin = node_view->GetRegularFanin(i);
EXPECT_EQ(actual_fanin, MutableFanoutView(actual_fanin_node,
expected_fanin.index()));
EXPECT_EQ(actual_fanin.node_index(),
actual_fanin.node_view()->node_index());
}
}
if (same_num_regular_fanins) {
for (int i = 0; i < expected_num_regular_fanins; ++i) {
const auto& fanin = node_view->GetRegularFanin(i);
EXPECT_EQ(ParseTensorName(node_view->node()->input(i)),
TensorId(fanin.node_view()->GetName(), fanin.index()));
}
}
const int expected_num_controlling_fanins =
expected_node_view.NumControllingFanins();
bool same_num_controlling_fanins =
node_view->NumControllingFanins() == expected_num_controlling_fanins;
EXPECT_TRUE(same_num_controlling_fanins);
for (int i = 0; i < expected_num_controlling_fanins; ++i) {
auto& expected_fanin = expected_node_view.GetControllingFanins()[i];
auto* actual_fanin_node =
graph_view->GetNode(expected_fanin.node_view()->GetName());
ASSERT_NE(actual_fanin_node, nullptr);
MutableFanoutView actual_fanin(actual_fanin_node,
expected_fanin.index());
EXPECT_TRUE(node_view->HasFanin(actual_fanin));
int found = 0;
for (const auto& actual_fanin : node_view->GetControllingFanins()) {
if (actual_fanin.index() == expected_fanin.index() &&
actual_fanin.node_view()->GetName() ==
expected_fanin.node_view()->GetName()) {
EXPECT_EQ(actual_fanin.node_index(),
actual_fanin.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
if (same_num_controlling_fanins && same_num_regular_fanins) {
for (int i = 0; i < expected_num_controlling_fanins; ++i) {
const auto& fanin = node_view->GetControllingFanins()[i];
EXPECT_EQ(ParseTensorName(node_view->node()->input(
i + expected_num_regular_fanins)),
TensorId(fanin.node_view()->GetName(), fanin.index()));
}
}
EXPECT_EQ(node_view->NumRegularFanouts(),
expected_node_view.NumRegularFanouts());
const int num_output_ports =
expected_node_view.GetRegularFanouts().size();
ASSERT_EQ(node_view->GetRegularFanouts().size(), num_output_ports);
for (int i = 0; i < num_output_ports; ++i) {
auto& expected_fanouts_at_port_i = node_view->GetRegularFanouts()[i];
const int num_fanouts_at_port = expected_fanouts_at_port_i.size();
auto& actual_fanouts_at_port_i = node_view->GetRegularFanouts()[i];
EXPECT_EQ(actual_fanouts_at_port_i.size(), num_fanouts_at_port);
for (int j = 0; j < num_fanouts_at_port; ++j) {
auto& expected_fanout = expected_fanouts_at_port_i[j];
auto* actual_fanout_node =
graph_view->GetNode(expected_fanout.node_view()->GetName());
ASSERT_NE(actual_fanout_node, nullptr);
MutableFaninView actual_fanout(actual_fanout_node,
expected_fanout.index());
EXPECT_TRUE(node_view->HasFanout(actual_fanout));
int found = 0;
for (const auto& fanout : actual_fanouts_at_port_i) {
if (fanout.index() == expected_fanout.index() &&
fanout.node_view()->GetName() ==
expected_fanout.node_view()->GetName()) {
EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
}
const int num_controlled_fanouts =
expected_node_view.NumControlledFanouts();
EXPECT_EQ(node_view->NumControlledFanouts(), num_controlled_fanouts);
for (int i = 0; i < num_controlled_fanouts; ++i) {
const auto& expected_fanout =
expected_node_view.GetControlledFanouts()[i];
auto* actual_fanout_node =
graph_view->GetNode(expected_fanout.node_view()->GetName());
ASSERT_NE(actual_fanout_node, nullptr);
MutableFaninView actual_fanout(actual_fanout_node,
expected_fanout.index());
EXPECT_TRUE(node_view->HasFanout(actual_fanout));
int found = 0;
for (const auto& fanout : node_view->GetControlledFanouts()) {
if (fanout.index() == expected_fanout.index() &&
fanout.node_view()->GetName() ==
expected_fanout.node_view()->GetName()) {
EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
EXPECT_EQ(node_view->NumAttrs(), expected_node_view.NumAttrs());
for (const auto& expected_attr : expected_node_view.GetAttrs()) {
auto* attr = node_view->GetAttr(expected_attr.first);
EXPECT_TRUE(AreAttrValuesEqual(*attr, expected_attr.second));
} |
1,348 | cpp | tensorflow/tensorflow | graph_topology_view | tensorflow/core/grappler/graph_topology_view.cc | tensorflow/core/grappler/graph_topology_view_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
namespace tensorflow {
namespace grappler {
class GraphTopologyView {
public:
GraphTopologyView() = default;
explicit GraphTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
Status InitializeFromGraph(const GraphDef& graph,
absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges);
Status InitializeFromGraph(const GraphDef& graph,
absl::Span<const GraphView::Edge> ephemeral_edges);
Status InitializeFromGraph(const GraphDef& graph, bool ignore_control_edges);
Status InitializeFromGraph(const GraphDef& graph);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
bool HasNode(absl::string_view node_name) const;
const NodeDef* GetNode(absl::string_view node_name) const;
const NodeDef* GetNode(int node_idx) const;
const absl::optional<int> GetNodeIndex(absl::string_view node_name) const;
const absl::optional<int> GetNodeIndex(const NodeDef& node) const;
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
bool skip_invalid_edges_ = false;
const GraphDef* graph_ = nullptr;
int num_nodes_ = 0;
std::vector<absl::string_view> index_to_node_name_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_;
std::vector<absl::InlinedVector<int, 2>> fanouts_;
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
}
}
#endif
#include "tensorflow/core/grappler/graph_topology_view.h"
#include <algorithm>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges) {
if (graph_ != nullptr) {
return errors::InvalidArgument("GraphTopologyView is already initialized.");
}
graph_ = &graph;
num_nodes_ = graph.node_size();
index_to_node_name_.resize(num_nodes_);
node_name_to_index_.rehash(num_nodes_);
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
index_to_node_name_.emplace_back(node.name());
}
for (const GraphView::Edge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
const int src_idx = src->second;
const int dst_idx = dst->second;
if (ignore_control_edges && (src_idx < 0 || dst_idx < 0)) {
continue;
}
fanins_[dst_idx].push_back(src_idx);
fanouts_[src_idx].push_back(dst_idx);
}
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
fanins_[node_idx].reserve(node.input_size());
for (const string& input : node.input()) {
TensorId tensor = ParseTensorName(input);
if (ignore_control_edges && IsTensorIdControl(tensor)) {
continue;
}
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat("Non-existent input ", input,
" in node ", node.name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
fanins_[node_idx].push_back(input_idx);
fanouts_[input_idx].push_back(node_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_idx]);
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
SortAndRemoveDuplicates(&fanouts_[node_idx]);
}
return absl::OkStatus();
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges) {
return InitializeFromGraph(graph, ephemeral_edges,
false);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph,
bool ignore_control_edges) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
ignore_control_edges);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
false);
}
bool GraphTopologyView::HasNode(const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it != node_name_to_index_.end();
}
const NodeDef* GraphTopologyView::GetNode(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it == node_name_to_index_.end() ? nullptr : &graph_->node(it->second);
}
const NodeDef* GraphTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &graph_->node(node_idx);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
DCHECK(it != node_name_to_index_.end()) << "Node doesn't exist in a graph";
return it == node_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const NodeDef& node) const {
return GetNodeIndex(node.name());
}
const absl::InlinedVector<int, 4>& GraphTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
}
} | #include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class GraphTopologyViewTest : public ::testing::Test {
protected:
using NodeConfig = std::pair<string, std::vector<string>>;
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
const auto& node_name = node.first;
const auto& node_inputs = node.second;
NodeDef node_def;
node_def.set_name(node_name);
for (const string& input : node_inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(GraphTopologyViewTest, SimpleGraph) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
const NodeDef* a_by_name = graph_view.GetNode("a");
const NodeDef* a_by_idx = graph_view.GetNode(0);
ASSERT_TRUE(a_by_name);
ASSERT_TRUE(a_by_idx);
EXPECT_EQ(a_by_name, a_by_idx);
const NodeDef* b_by_name = graph_view.GetNode("b");
const NodeDef* b_by_idx = graph_view.GetNode(1);
ASSERT_TRUE(b_by_name);
ASSERT_TRUE(b_by_idx);
EXPECT_EQ(b_by_name, b_by_idx);
const absl::optional<int> b_idx = graph_view.GetNodeIndex(*b_by_name);
ASSERT_TRUE(b_idx.has_value());
EXPECT_EQ(b_idx.value(), 1);
const absl::optional<int> c_idx = graph_view.GetNodeIndex("c");
ASSERT_TRUE(c_idx.has_value());
EXPECT_EQ(c_idx.value(), 2);
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(0), Fanin());
EXPECT_EQ(graph_view.GetFanin(1), Fanin());
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(0), Fanout({2, 3}));
EXPECT_EQ(graph_view.GetFanout(1), Fanout({2}));
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout());
}
TEST_F(GraphTopologyViewTest, GraphWithALoop) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "d"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
TEST_F(GraphTopologyViewTest, GraphWithControls) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "^d"}},
{"d", {"a", "c"}},
});
{
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
{
GraphTopologyView graph_view;
TF_CHECK_OK(
graph_view.InitializeFromGraph(graph, true));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({}));
}
}
}
} |
1,349 | cpp | tensorflow/tensorflow | grappler_item_builder | tensorflow/core/grappler/grappler_item_builder.cc | tensorflow/core/grappler/grappler_item_builder_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPPLER_ITEM_BUILDER_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPPLER_ITEM_BUILDER_H_
#include <memory>
#include <set>
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
class MetaGraphDef;
namespace grappler {
struct ItemConfig {
ItemConfig() {}
bool ignore_user_placement = true;
bool ignore_colocation = true;
int placeholder_unknown_output_shape_dim = -1;
bool erase_noinline_attributes = false;
string assets_directory_override;
bool prune_graph = false;
std::set<string> feed_nodes;
std::set<string> fetch_nodes;
bool apply_optimizations = false;
bool inline_functions = false;
};
Status RuntimeGraphOptimizer(const GraphDef& graph_def_arg,
GraphDef* output_graph_def, const ItemConfig& cfg);
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDef(
const string& id, const MetaGraphDef& meta_graph, const ItemConfig& cfg);
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDefFile(
const string& id, const string& meta_graph_file, const ItemConfig& cfg);
}
}
#endif
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
void InitializeTensor(DataType type, Tensor* tensor) {
const int period = 7;
if (type == DT_FLOAT) {
auto flat = tensor->flat<float>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = static_cast<float>(i % period) / 10.0f;
}
} else if (type == DT_INT64) {
auto flat = tensor->flat<int64_t>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = i % period;
}
} else if (type != DT_STRING && type != DT_RESOURCE && type != DT_VARIANT) {
memset(const_cast<char*>(tensor->tensor_data().data()), 0,
tensor->tensor_data().size());
}
}
Status PruneGraph(GrapplerItem* item) {
ModelPruner pruner;
GraphDef pruned_graph;
Cluster* cluster = nullptr;
TF_RETURN_IF_ERROR(pruner.Optimize(cluster, *item, &pruned_graph));
item->graph = std::move(pruned_graph);
return absl::OkStatus();
}
Status ReplaceUnknownShapeDim(const ItemConfig& cfg,
const TensorShapeProto& shape_pb_in,
TensorShapeProto* shape_pb_out,
TensorShape* shape_out) {
std::vector<int32> dims;
for (const auto& dim_proto : shape_pb_in.dim()) {
if (cfg.placeholder_unknown_output_shape_dim >= 0 &&
dim_proto.size() == -1) {
dims.push_back(cfg.placeholder_unknown_output_shape_dim);
shape_pb_out->add_dim()->set_size(
cfg.placeholder_unknown_output_shape_dim);
} else {
dims.push_back(std::max<int32>(1, dim_proto.size()));
shape_pb_out->add_dim()->set_size(dim_proto.size());
}
}
return TensorShapeUtils::MakeShape(dims.data(), dims.size(), shape_out);
}
Status UpdatePlaceholderShape(
const ItemConfig& cfg,
const std::unordered_set<string>& signature_feed_nodes,
GrapplerItem* new_item, NodeDef* node) {
if (node->attr().count("dtype") == 0) {
return absl::InternalError(absl::StrCat("Unknown type for placeholder ",
node->name(),
", skipping this input"));
}
DataType type = node->attr().at("dtype").type();
if (node->attr().count("shape") == 0) {
return absl::InternalError(absl::StrCat("Unknown shape for placeholder ",
node->name(),
", skipping this input"));
}
TensorShape shape;
TensorShapeProto shape_proto;
Status make_shape_status = ReplaceUnknownShapeDim(
cfg, node->attr().at("shape").shape(), &shape_proto, &shape);
if (!make_shape_status.ok()) {
return absl::InternalError(
absl::StrCat("Invalid shape for placeholder ", node->name(), ": ",
make_shape_status.ToString(), ", skipping this input"));
}
if ((cfg.placeholder_unknown_output_shape_dim >= 0) && (shape.dims() == 0) &&
(node->attr().count("_output_shapes") == 1)) {
const auto& output_shapes =
node->attr().at("_output_shapes").list().shape(0);
if (output_shapes.dim_size() != 0) {
shape.Clear();
shape_proto.clear_dim();
for (const auto& dim : output_shapes.dim()) {
auto size = dim.size();
if (size == -1) size = cfg.placeholder_unknown_output_shape_dim;
TF_RETURN_IF_ERROR(shape.AddDimWithStatus(size));
shape_proto.add_dim()->set_size(size);
}
}
}
Tensor fake_input(type, shape);
InitializeTensor(type, &fake_input);
if (cfg.feed_nodes.empty()) {
if (signature_feed_nodes.count(node->name()) == 0) {
new_item->feed.emplace_back(node->name(), fake_input);
}
} else if (cfg.feed_nodes.count(node->name()) > 0) {
auto it = find_if(new_item->feed.begin(), new_item->feed.end(),
[&node](std::pair<string, Tensor>& f) {
return f.first == node->name();
});
DCHECK(it != new_item->feed.end());
it->second = fake_input;
}
if (!shape_proto.dim().empty())
*(node->mutable_attr()->at("shape").mutable_shape()) = shape_proto;
return absl::OkStatus();
}
}
Status RuntimeGraphOptimizer(const GraphDef& graph_def_arg,
GraphDef* output_graph_def,
const ItemConfig& cfg) {
if (!cfg.apply_optimizations && !cfg.inline_functions &&
!cfg.erase_noinline_attributes) {
if (output_graph_def != &graph_def_arg) {
*output_graph_def = graph_def_arg;
}
return absl::OkStatus();
}
SessionOptions options;
GraphDef graph_def(graph_def_arg);
if (cfg.erase_noinline_attributes) {
for (auto& func : *graph_def.mutable_library()->mutable_function()) {
func.mutable_attr()->erase("_noinline");
}
}
std::vector<std::unique_ptr<Device>> devices;
DeviceFactory* cpu_factory = DeviceFactory::GetFactory("CPU");
TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(
options, "/job:localhost/replica:0/task:0", &devices));
Device* cpu_device = devices[0].get();
auto dvc_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph_def.library());
Env* env = Env::Default();
OptimizerOptions* optimizer_opts =
options.config.mutable_graph_options()->mutable_optimizer_options();
if (cfg.apply_optimizations) {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L1);
} else {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L0);
}
optimizer_opts->set_do_function_inlining(cfg.inline_functions);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(dvc_mgr.get(), env, &options.config,
graph_def.versions().producer(),
&function_library, *optimizer_opts));
FunctionLibraryRuntime* flr = pflr->GetFLR(cpu_device->name());
GraphConstructorOptions graph_ctor_opts;
graph_ctor_opts.allow_internal_ops = true;
graph_ctor_opts.expect_device_spec = false;
std::unique_ptr<Graph> graphptr(new Graph(function_library));
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
graph_ctor_opts, std::move(graph_def), graphptr.get()));
::tensorflow::GraphOptimizer optimizer(*optimizer_opts);
optimizer.Optimize(flr, env, cpu_device, &graphptr,
tensorflow::GraphOptimizer::Options());
graphptr->ToGraphDef(output_graph_def);
return AddDefaultAttrsToGraphDef(output_graph_def, *graphptr->op_registry(),
0, true);
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDef(
const string& id, const MetaGraphDef& meta_graph, const ItemConfig& cfg) {
if (id.empty()) {
LOG(ERROR) << "id must be non-empty.";
return nullptr;
}
std::unique_ptr<GrapplerItem> new_item(new GrapplerItem());
new_item->id = id;
new_item->graph = meta_graph.graph_def();
for (const auto& feed_node : cfg.feed_nodes) {
const string feed_name = NodeName(feed_node);
new_item->feed.emplace_back(feed_name, Tensor());
}
for (const auto& fetch_node : cfg.fetch_nodes) {
new_item->fetch.emplace_back(NodeName(fetch_node));
}
if (new_item->fetch.empty() &&
meta_graph.collection_def().count("train_op") > 0) {
const CollectionDef& nodes = meta_graph.collection_def().at("train_op");
if (nodes.has_node_list()) {
for (const auto& node : nodes.node_list().value()) {
new_item->fetch.push_back(NodeName(node));
}
}
}
std::unordered_set<string> signature_feed_nodes;
std::unordered_set<string> signature_fetch_nodes;
for (const auto& name_and_signature : meta_graph.signature_def()) {
for (const auto& name_and_input : name_and_signature.second.inputs()) {
const TensorInfo& input = name_and_input.second;
if (input.has_coo_sparse()) {
int64_t dim = std::max(1, cfg.placeholder_unknown_output_shape_dim);
TensorShape shape_1d({dim});
TensorShape shape_2d({dim, dim});
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().values_tensor_name()))) {
Tensor value_tensor(input.dtype(), shape_1d);
InitializeTensor(input.dtype(), &value_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().values_tensor_name()), value_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().indices_tensor_name()))) {
Tensor indices_tensor(DT_INT64, shape_2d);
InitializeTensor(input.dtype(), &indices_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().indices_tensor_name()),
indices_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().dense_shape_tensor_name()))) {
Tensor dense_shape_tensor(DT_INT64, shape_1d);
InitializeTensor(input.dtype(), &dense_shape_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().dense_shape_tensor_name()),
dense_shape_tensor);
}
} else {
if (gtl::InsertIfNotPresent(&signature_feed_nodes,
NodeName(input.name()))) {
TensorShape shape;
TensorShapeProto shape_proto;
Status s = ReplaceUnknownShapeDim(cfg, input.tensor_shape(),
&shape_proto, &shape);
if (!s.ok()) {
LOG(ERROR) << "Invalid shape for signature input " << input.name()
<< ": " << s << ", skipping this input";
return nullptr;
}
Tensor fake_input(input.dtype(), shape);
InitializeTensor(input.dtype(), &fake_input);
new_item->feed.emplace_back(NodeName(input.name()), fake_input);
}
}
}
for (const auto& name_and_output : name_and_signature.second.outputs()) {
const TensorInfo& output = name_and_output.second;
if (output.has_coo_sparse()) {
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().values_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().values_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().indices_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().indices_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().dense_shape_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().dense_shape_tensor_name()));
}
} else {
if (gtl::InsertIfNotPresent(&signature_fetch_nodes,
NodeName(output.name()))) {
new_item->fetch.push_back(NodeName(output.name()));
}
}
}
}
for (const auto& feed : new_item->feed) {
if (feed.first.empty()) {
LOG(ERROR) << "Invalid feed node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use feed node " << feed.first;
}
}
for (const auto& fetch : new_item->fetch) {
if (fetch.empty()) {
LOG(ERROR) << "Invalid fetch node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use fetch node " << fetch;
}
}
if (new_item->fetch.empty()) {
LOG(ERROR) << "Failed to detect the fetch node(s), skipping this input";
return nullptr;
}
for (const string& var_collection :
{"variables", "local_variables", "model_variables",
"trainable_variables"}) {
if (meta_graph.collection_def().count(var_collection) == 0) {
continue;
}
const CollectionDef& vars = meta_graph.collection_def().at(var_collection);
for (const auto& raw_var : vars.bytes_list().value()) {
VariableDef var;
var.ParseFromString(raw_var);
if (!var.initializer_name().empty()) {
new_item->init_ops.push_back(NodeName(var.initializer_name()));
}
}
}
if (meta_graph.collection_def().count("table_initializer") > 0) {
const CollectionDef& inits =
meta_graph.collection_def().at("table_initializer");
if (inits.has_node_list()) {
for (const auto& node : inits.node_list().value()) {
new_item->init_ops.push_back(NodeName(node));
new_item->expected_init_time += 30 * 60;
}
}
}
std::unordered_map<string, string> asset_node_to_value;
if (!cfg.assets_directory_override.empty()) {
if (meta_graph.collection_def().count("saved_model_assets") > 0) {
const CollectionDef& collection =
meta_graph.collection_def().at("saved_model_assets");
const auto& any_assets = collection.any_list().value();
if (!any_assets.empty()) {
if (std::is_base_of<protobuf::Message, AssetFileDef>()) {
for (const auto& any_asset : any_assets) {
AssetFileDef asset_file_def;
if (!ParseAny(any_asset, &asset_file_def, "tensorflow.AssetFileDef")
.ok()) {
LOG(ERROR) << "Failed to parse AssetFile.";
continue;
}
string asset_filepath = io::JoinPath(cfg.assets_directory_override,
asset_file_def.filename());
if (!FilesExist({asset_filepath}, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files "
<< asset_filepath << ", skipping this input";
return nullptr;
}
asset_node_to_value[NodeName(asset_file_def.tensor_info().name())] =
asset_filepath;
}
} else {
LOG(ERROR) << "Can't parse AssetFileDef when using lite protos.";
return nullptr;
}
}
}
} else if (meta_graph.collection_def().count("asset_filepaths") > 0) {
const CollectionDef& file_paths =
meta_graph.collection_def().at("asset_filepaths");
std::vector<string> paths;
for (const auto& raw_path : file_paths.bytes_list().value()) {
paths.push_back(raw_path);
}
if (!FilesExist(paths, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files, skipping "
"this input";
return nullptr;
}
}
if (meta_graph.collection_def().count("queue_runners") > 0) {
const CollectionDef& vars = meta_graph.collection_def().at("queue_runners");
for (const auto& raw : vars.bytes_list().value()) {
QueueRunnerDef queue_runner;
if (!queue_runner.ParseFromString(raw)) {
LOG(ERROR) << "Could not parse queue_runners, skipping this input";
return nullptr;
}
if (queue_runner.cancel_op_name().empty()) {
LOG(ERROR) << "Queue without a cancel op, skipping this input";
return nullptr;
}
new_item->queue_runners.push_back(queue_runner);
}
}
for (const auto& col : meta_graph.collection_def()) {
const CollectionDef& collection = col.second;
for (const string& node : collection.node_list().value()) {
new_item->keep_ops.push_back(NodeName(node));
}
}
for (auto& node : *new_item->graph.mutable_node()) {
if (IsPlaceholder(node) && node.op() != "PlaceholderWithDefault") {
Status s = UpdatePlaceholderShape(cfg, signature_feed_nodes,
new_item.get(), &node);
if (!s.ok()) return nullptr;
} else if (IsConstant(node)) {
auto it = asset_node_to_value.find(node.name());
if (it != asset_node_to_value.end()) {
auto iter = node.mutable_attr()->find("value");
if (iter == node.attr().end()) {
LOG(ERROR) << "Value attribute expected in const op for asset files";
return nullptr;
}
if (!iter->second.has_tensor() ||
iter->second.tensor().string_val_size() != 1) {
LOG(INFO) << "Unexpected AttrValue proto: "
<< iter->second.DebugString();
return nullptr;
}
LOG(INFO) << "Using asset file " << it->second << " for node "
<< node.name();
*(iter->second.mutable_tensor()->mutable_string_val(0)) = it->second;
}
}
node.mutable_attr()->erase("_output_shapes");
if (cfg.ignore_user_placement) {
node.clear_device();
}
if (cfg.ignore_colocation) {
auto attr = node.mutable_attr();
auto it = attr->find("_class");
if (it != attr->end()) {
attr->erase(it);
}
}
}
if (meta_graph.collection_def().count("savers") > 0) {
const CollectionDef& savers = meta_graph.collection_def().at("savers");
for (const auto& raw : savers.bytes_list().value()) {
SaverDef saver;
if (!saver.ParseFromString(raw)) {
continue;
}
if (saver.filename_tensor_name().empty()) {
continue;
}
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
break;
}
} else {
const SaverDef& saver = meta_graph.saver_def();
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
}
Status attr_status = AddDefaultAttrsToGraphDef(
&new_item->graph,
FunctionLibraryDefinition(OpRegistry::Global(),
new_item->graph.library()),
0, true);
if (!attr_status.ok()) {
LOG(ERROR) << "Failed to instantiate default attribute values: "
<< attr_status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph before RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
Status optimize_status =
RuntimeGraphOptimizer(new_item->graph, &new_item->graph, cfg);
if (!optimize_status.ok()) {
LOG(ERROR) << "Graph preprocessing failed: " << optimize_status;
return nullptr;
}
VLOG(1) << "Number of nodes in graph after RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
if (cfg.prune_graph) {
VLOG(1) << "Pruning graph...";
auto status = PruneGraph(new_item.get());
if (!status.ok()) {
LOG(ERROR) << "Pruning failed: " << status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph after pruning: "
<< new_item->graph.node_size();
}
std::unordered_set<string> nodes;
for (const auto& node : new_item->graph.node()) {
nodes.insert(node.name());
}
for (const auto& feed : new_item->feed) {
if (nodes.find(feed.first) == nodes.end()) {
LOG(ERROR) << "Feed node " << feed.first << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& fetch : new_item->fetch) {
if (nodes.find(fetch) == nodes.end()) {
LOG(ERROR) << "Fetch node " << fetch << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& init : new_item->init_ops) {
if (nodes.find(init) == nodes.end()) {
LOG(ERROR) << "Init node " << init << " doesn't exist in graph";
return nullptr;
}
}
return new_item;
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDefFile(
const string& id, const string& meta_graph_file, const ItemConfig& cfg) {
MetaGraphDef meta_graph;
if (!ReadMetaGraphDefFromFile(meta_graph_file, &meta_graph).ok()) {
LOG(ERROR) << "Failed to read " << meta_graph_file;
return nullptr;
}
return GrapplerItemFromMetaGraphDef(id, meta_graph, cfg);
}
}
} | #include "tensorflow/core/grappler/grappler_item_builder.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemBuilderTest : public ::testing::Test {};
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node =
ops::Const(s.WithOpName("filename"), string("model"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore = ops::Restore(s.WithOpName("restore"), filename_node,
tensor_name, DataType::DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), var, restore);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename");
env->DeleteFile(filename).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename));
LOG(INFO) << filename;
AssetFileDef asset_file_def;
*asset_file_def.mutable_tensor_info()->mutable_name() = "filename";
*asset_file_def.mutable_filename() = "grappler_item_builder_test_filename";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
for (const NodeDef &node : item->graph.node()) {
if (node.name() == "filename") {
const auto iter = node.attr().find("value");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_tensor());
ASSERT_EQ(1, iter->second.tensor().string_val_size());
string tensor_string_val = iter->second.tensor().string_val(0);
EXPECT_EQ(tensor_string_val, filename);
}
}
}
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest_FileNotAccessible) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node1 =
ops::Const(s.WithOpName("filename1"), string("model1"), TensorShape());
Output filename_node2 =
ops::Const(s.WithOpName("filename2"), string("model2"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore1 = ops::Restore(s.WithOpName("restore1"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output restore2 = ops::Restore(s.WithOpName("restore2"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output assign1 = ops::Assign(s.WithOpName("assign1"), var, restore1);
Output assign2 = ops::Assign(s.WithOpName("assign2"), var, restore2);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename1 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename1).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename1, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename1));
AssetFileDef asset_file_def1;
*asset_file_def1.mutable_tensor_info()->mutable_name() = "filename1";
*asset_file_def1.mutable_filename() = "grappler_item_builder_test_filename1";
string filename2 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename2).IgnoreError();
EXPECT_FALSE(env->FileExists(filename2).ok());
AssetFileDef asset_file_def2;
*asset_file_def2.mutable_tensor_info()->mutable_name() = "filename2";
*asset_file_def2.mutable_filename() = "grappler_item_builder_test_filename2";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def1);
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def2);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign1";
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign2";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithFunctions) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}},
device)},
{
test::function::XTimesTwo(),
});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithCustomOps) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "CustomOp", {"x"}, {{"T", DT_FLOAT}}, device)},
{});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
TensorInfo input2, output2;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature2;
(*serving_signature.mutable_inputs())["input2"] = input2;
(*serving_signature.mutable_outputs())["output2"] = output2;
(*meta_graph.mutable_signature_def())["serving2"] = serving_signature2;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithIncompleteSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.mutable_coo_sparse()->set_values_tensor_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithUnknownDimInSignatureInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto shape_1d = PartialTensorShape({-1});
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape(shape_1d));
auto y = ops::Const(s.WithOpName("y"), static_cast<float>(1.0));
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
shape_1d.AsProto(input.mutable_tensor_shape());
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item1 =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item1 != nullptr);
ASSERT_EQ(item1->feed.size(), 1);
EXPECT_EQ(item1->feed[0].second.NumElements(), 64);
std::unique_ptr<GrapplerItem> item2 =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item2 != nullptr);
ASSERT_EQ(item2->feed.size(), 1);
EXPECT_EQ(item2->feed[0].second.NumElements(), 1);
}
TEST_F(GrapplerItemBuilderTest, ExplicitFeedAndFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
ItemConfig config;
config.feed_nodes.insert("x");
config.fetch_nodes.insert("z");
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, config);
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, UnknownRankPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_TRUE(shape.unknown_rank());
}
TEST_F(GrapplerItemBuilderTest, ConfigPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value {
shape {
dim {
size: -1
}
dim {
size: -1
}
}
} }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 64);
}
TEST_F(GrapplerItemBuilderTest, OutputShapePlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
attr { key: "_output_shapes" value { list {
shape {
dim {
size: -1
}
dim {
size: 32
}
}
} } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 32);
}
}
}
} |
1,350 | cpp | tensorflow/tensorflow | grappler_item | tensorflow/core/grappler/grappler_item.cc | tensorflow/core/grappler/grappler_item_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPPLER_ITEM_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPPLER_ITEM_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tsl/platform/cpu_info.h"
namespace tensorflow {
namespace grappler {
struct GrapplerItem {
GrapplerItem() = default;
GrapplerItem(const GrapplerItem& other) = default;
GrapplerItem(GrapplerItem&& other) = default;
GrapplerItem& operator=(const GrapplerItem& other) = default;
GrapplerItem& operator=(GrapplerItem&& other) = default;
virtual ~GrapplerItem() = default;
GrapplerItem WithGraph(GraphDef&& graph) const;
string id;
GraphDef graph;
std::vector<std::pair<string, Tensor>> feed;
std::vector<string> fetch;
std::vector<string> init_ops;
int64_t expected_init_time = 0;
string save_op;
string restore_op;
string save_restore_loc_tensor;
std::vector<QueueRunnerDef> queue_runners;
std::vector<string> keep_ops;
std::vector<const NodeDef*> MainOpsFanin() const;
std::vector<const NodeDef*> EnqueueOpsFanin() const;
std::vector<const NodeDef*> InitOpsFanin() const;
std::vector<const NodeDef*> MainVariables() const;
std::unordered_set<string> NodesToPreserve() const;
struct OptimizationOptions {
bool allow_non_differentiable_rewrites = true;
bool allow_pruning_stateful_and_dataset_ops = true;
bool optimize_function_library = true;
bool is_eager_mode = false;
int intra_op_parallelism_threads = tsl::port::MaxParallelism();
};
const std::unordered_set<string>& devices() const;
Status AddDevice(const string& device);
Status AddDevices(const GrapplerItem& other);
Status InferDevicesFromGraph();
void ClearDevices();
const OptimizationOptions& optimization_options() const;
OptimizationOptions& optimization_options();
private:
std::unordered_set<string> devices_;
OptimizationOptions optimization_options_;
};
GrapplerItem::OptimizationOptions CreateOptOptionsForEager();
}
}
#endif
#include "tensorflow/core/grappler/grappler_item.h"
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
GrapplerItem::OptimizationOptions CreateOptOptionsForEager() {
GrapplerItem::OptimizationOptions optimization_options;
optimization_options.allow_pruning_stateful_and_dataset_ops = true;
optimization_options.is_eager_mode = true;
optimization_options.optimize_function_library = false;
return optimization_options;
}
GrapplerItem GrapplerItem::WithGraph(GraphDef&& graph_def) const {
GrapplerItem item;
item.id = id;
item.feed = feed;
item.fetch = fetch;
item.init_ops = init_ops;
item.keep_ops = keep_ops;
item.expected_init_time = expected_init_time;
item.save_op = save_op;
item.restore_op = restore_op;
item.save_restore_loc_tensor = save_restore_loc_tensor;
item.queue_runners = queue_runners;
item.devices_ = devices_;
item.optimization_options_ = optimization_options_;
item.graph.Swap(&graph_def);
return item;
}
std::vector<const NodeDef*> GrapplerItem::MainOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::EnqueueOpsFanin() const {
std::vector<string> enqueue_ops;
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
enqueue_ops.push_back(enqueue_op);
}
}
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::InitOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::MainVariables() const {
std::vector<const NodeDef*> fanin;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin));
std::vector<const NodeDef*> vars;
for (const NodeDef* node : fanin) {
if (IsVariable(*node)) {
vars.push_back(node);
}
}
return vars;
}
std::unordered_set<string> GrapplerItem::NodesToPreserve() const {
std::unordered_set<string> result;
for (const string& f : fetch) {
VLOG(1) << "Add fetch " << f;
result.insert(NodeName(f));
}
for (const auto& f : feed) {
VLOG(1) << "Add feed " << f.first;
result.insert(NodeName(f.first));
}
for (const auto& node : init_ops) {
result.insert(NodeName(node));
}
for (const auto& node : keep_ops) {
result.insert(NodeName(node));
}
if (!save_op.empty()) {
result.insert(NodeName(save_op));
}
if (!restore_op.empty()) {
result.insert(NodeName(restore_op));
}
if (!save_restore_loc_tensor.empty()) {
result.insert(NodeName(save_restore_loc_tensor));
}
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
result.insert(NodeName(enqueue_op));
}
if (!queue_runner.close_op_name().empty()) {
result.insert(NodeName(queue_runner.close_op_name()));
}
if (!queue_runner.cancel_op_name().empty()) {
result.insert(NodeName(queue_runner.cancel_op_name()));
}
}
absl::optional<FunctionLibraryDefinition> fn_library;
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops) {
fn_library.emplace(OpRegistry::Global(), graph.library());
}
for (const NodeDef& node : graph.node()) {
const auto attrs = AttrSlice(&node.attr());
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops &&
(IsStateful(node, &*fn_library) || IsDataset(node))) {
result.insert(node.name());
}
bool do_not_remove;
if (TryGetNodeAttr(attrs, "_grappler_do_not_remove", &do_not_remove) &&
do_not_remove) {
result.insert(node.name());
}
}
return result;
}
const std::unordered_set<string>& GrapplerItem::devices() const {
return devices_;
}
Status GrapplerItem::AddDevice(const string& device) {
DeviceNameUtils::ParsedName name;
if (!DeviceNameUtils::ParseFullName(device, &name)) {
return errors::InvalidArgument("Invalid device name: device=", device);
} else if (!name.has_job || !name.has_replica || !name.has_task ||
!name.has_type || !name.has_id) {
return errors::InvalidArgument("Not a fully defined device name: device=",
device);
}
devices_.insert(DeviceNameUtils::ParsedNameToString(name));
return absl::OkStatus();
}
Status GrapplerItem::AddDevices(const GrapplerItem& other) {
std::vector<absl::string_view> invalid_devices;
for (const string& device : other.devices()) {
Status added = AddDevice(device);
if (!added.ok()) invalid_devices.emplace_back(device);
}
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
Status GrapplerItem::InferDevicesFromGraph() {
absl::flat_hash_set<absl::string_view> invalid_devices;
for (const NodeDef& node : graph.node()) {
Status added = AddDevice(node.device());
if (!added.ok()) invalid_devices.insert(node.device());
}
VLOG(2) << "Inferred device set: [" << absl::StrJoin(devices_, ", ") << "]";
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
void GrapplerItem::ClearDevices() { devices_.clear(); }
const GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options()
const {
return optimization_options_;
}
GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options() {
return optimization_options_;
}
}
} | #include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemTest : public ::testing::Test {};
TEST_F(GrapplerItemTest, Basic) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {{"CPU:0"}});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
EXPECT_TRUE(item.InitOpsFanin().empty());
std::vector<string> graph_nodes;
for (const auto& node : item.graph.node()) {
graph_nodes.push_back(node.name());
}
std::vector<string> main_ops;
for (const auto& node : item.MainOpsFanin()) {
main_ops.push_back(node->name());
}
std::sort(graph_nodes.begin(), graph_nodes.end());
std::sort(main_ops.begin(), main_ops.end());
EXPECT_EQ(main_ops, graph_nodes);
}
TEST_F(GrapplerItemTest, InferDevices) {
using test::function::NDef;
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
const string cpu2 = "/device:CPU:2";
GrapplerItem item;
item.graph = test::function::GDef(
{
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu2),
},
{} );
ASSERT_FALSE(item.InferDevicesFromGraph().ok());
EXPECT_EQ(item.devices().size(), 2);
EXPECT_NE(item.devices().find(cpu0), item.devices().end());
EXPECT_NE(item.devices().find(cpu1), item.devices().end());
item.ClearDevices();
EXPECT_EQ(item.devices().size(), 0);
}
}
}
} |
1,351 | cpp | tensorflow/tensorflow | mutable_graph_view | tensorflow/core/grappler/mutable_graph_view.cc | tensorflow/core/grappler/mutable_graph_view_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_MUTABLE_GRAPH_VIEW_H_
#define TENSORFLOW_CORE_GRAPPLER_MUTABLE_GRAPH_VIEW_H_
#include <set>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
const char kMutableGraphViewCtrl[] = "ConstantFoldingCtrl";
class MutableGraphView : public internal::GraphViewInternal<GraphDef, NodeDef> {
public:
explicit MutableGraphView(GraphDef* graph) : GraphViewInternal(graph) {
for (NodeDef& node : *graph->mutable_node()) AddUniqueNodeOrDie(&node);
for (NodeDef& node : *graph->mutable_node()) AddAndDedupFanouts(&node);
}
using GraphViewInternal::GetFanout;
const absl::flat_hash_set<InputPort>& GetFanout(
const GraphView::OutputPort& port) const;
using GraphViewInternal::GetFanin;
absl::flat_hash_set<OutputPort> GetFanin(
const GraphView::InputPort& port) const;
using GraphViewInternal::GetRegularFanin;
const OutputPort GetRegularFanin(const GraphView::InputPort& port) const;
NodeDef* AddNode(NodeDef&& node);
Status AddSubgraph(GraphDef&& subgraph);
Status UpdateNode(absl::string_view node_name, absl::string_view op,
absl::string_view device,
absl::Span<const std::pair<string, AttrValue>> attrs);
Status UpdateNodeName(absl::string_view from_node_name,
absl::string_view to_node_name, bool update_fanouts);
Status SwapNodeNames(absl::string_view from_node_name,
absl::string_view to_node_name, bool update_fanouts);
Status UpdateFanouts(absl::string_view from_node_name,
absl::string_view to_node_name);
Status AddRegularFanin(absl::string_view node_name, const TensorId& fanin);
Status AddRegularFaninByPort(absl::string_view node_name, int port,
const TensorId& fanin);
Status AddControllingFanin(absl::string_view node_name,
const TensorId& fanin);
Status RemoveRegularFanin(absl::string_view node_name, const TensorId& fanin);
Status RemoveRegularFaninByPort(absl::string_view node_name, int port);
Status RemoveControllingFanin(absl::string_view node_name,
absl::string_view fanin_node_name);
Status RemoveAllFanins(absl::string_view node_name,
bool keep_controlling_fanins);
Status UpdateFanin(absl::string_view node_name, const TensorId& from_fanin,
const TensorId& to_fanin);
Status UpdateRegularFaninByPort(absl::string_view node_name, int port,
const TensorId& fanin);
Status SwapRegularFaninsByPorts(absl::string_view node_name, int from_port,
int to_port);
Status UpdateAllRegularFaninsToControlling(absl::string_view node_name);
Status DeleteNodes(const absl::flat_hash_set<string>& nodes_to_delete);
private:
void AddAndDedupFanouts(NodeDef* node);
void UpdateMaxRegularOutputPortForRemovedFanin(
const OutputPort& fanin,
const absl::flat_hash_set<InputPort>& fanin_fanouts);
void UpdateMaxRegularOutputPortForAddedFanin(const OutputPort& fanin);
Status UpdateFanoutsInternal(NodeDef* from_node, NodeDef* to_node);
bool AddFaninInternal(NodeDef* node, const OutputPort& fanin);
NodeDef* GetControllingFaninToAdd(absl::string_view node_name,
const OutputPort& fanin, string* error_msg);
NodeDef* GetOrCreateIdentityConsumingSwitch(const OutputPort& fanin);
bool RemoveRegularFaninInternal(NodeDef* node, const OutputPort& fanin);
bool RemoveControllingFaninInternal(NodeDef* node, NodeDef* fanin_node);
Status CheckNodesCanBeDeleted(
const absl::flat_hash_set<string>& nodes_to_delete);
void RemoveFaninsInternal(NodeDef* deleted_node,
bool keep_controlling_fanins);
void RemoveFanoutsInternal(NodeDef* deleted_node);
};
}
}
#endif
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include <algorithm>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace {
bool IsTensorIdPortValid(const TensorId& tensor_id) {
return tensor_id.index() >= Graph::kControlSlot;
}
bool IsTensorIdRegular(const TensorId& tensor_id) {
return tensor_id.index() > Graph::kControlSlot;
}
bool IsTensorIdControlling(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
bool IsOutputPortControlling(const MutableGraphView::OutputPort& port) {
return port.port_id == Graph::kControlSlot;
}
bool IsIdentityConsumingSwitch(const MutableGraphView& graph,
const NodeDef& node) {
if ((IsIdentity(node) || IsIdentityNSingleInput(node)) &&
node.input_size() > 0) {
TensorId tensor_id = ParseTensorName(node.input(0));
if (IsTensorIdControlling(tensor_id)) {
return false;
}
NodeDef* input_node = graph.GetNode(tensor_id.node());
if (input_node == nullptr) {
return false;
}
return IsSwitch(*input_node);
}
return false;
}
bool CanDedupControlWithRegularInput(const MutableGraphView& graph,
const NodeDef& control_node) {
return !IsIdentityConsumingSwitch(graph, control_node);
}
bool CanDedupControlWithRegularInput(const MutableGraphView& graph,
absl::string_view control_node_name) {
NodeDef* control_node = graph.GetNode(control_node_name);
if (control_node == nullptr) {
return false;
}
return CanDedupControlWithRegularInput(graph, *control_node);
}
bool HasRegularFaninNode(const MutableGraphView& graph, const NodeDef& node,
absl::string_view fanin_node_name) {
const int num_regular_fanins =
graph.NumFanins(node, false);
for (int i = 0; i < num_regular_fanins; ++i) {
if (ParseTensorName(node.input(i)).node() == fanin_node_name) {
return true;
}
}
return false;
}
using FanoutsMap =
absl::flat_hash_map<MutableGraphView::OutputPort,
absl::flat_hash_set<MutableGraphView::InputPort>>;
void SwapControlledFanoutInputs(const MutableGraphView& graph,
const FanoutsMap::iterator& control_fanouts,
absl::string_view to_node_name) {
absl::string_view from_node_name(control_fanouts->first.node->name());
string control = TensorIdToString({to_node_name, Graph::kControlSlot});
for (const auto& control_fanout : control_fanouts->second) {
const int start = graph.NumFanins(*control_fanout.node,
false);
for (int i = start; i < control_fanout.node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(control_fanout.node->input(i));
if (tensor_id.node() == from_node_name) {
control_fanout.node->set_input(i, control);
break;
}
}
}
}
void SwapRegularFanoutInputs(FanoutsMap* fanouts, NodeDef* from_node,
absl::string_view to_node_name, int max_port) {
MutableGraphView::OutputPort port;
port.node = from_node;
for (int i = 0; i <= max_port; ++i) {
port.port_id = i;
auto it = fanouts->find(port);
if (it == fanouts->end()) {
continue;
}
string input = TensorIdToString({to_node_name, i});
for (const auto& fanout : it->second) {
fanout.node->set_input(fanout.port_id, input);
}
}
}
using MaxOutputPortsMap = absl::flat_hash_map<const NodeDef*, int>;
void SwapFanoutInputs(const MutableGraphView& graph, FanoutsMap* fanouts,
MaxOutputPortsMap* max_output_ports, NodeDef* from_node,
NodeDef* to_node) {
auto from_control_fanouts = fanouts->find({from_node, Graph::kControlSlot});
if (from_control_fanouts != fanouts->end()) {
SwapControlledFanoutInputs(graph, from_control_fanouts, to_node->name());
}
auto to_control_fanouts = fanouts->find({to_node, Graph::kControlSlot});
if (to_control_fanouts != fanouts->end()) {
SwapControlledFanoutInputs(graph, to_control_fanouts, from_node->name());
}
auto from_max_port = max_output_ports->find(from_node);
if (from_max_port != max_output_ports->end()) {
SwapRegularFanoutInputs(fanouts, from_node, to_node->name(),
from_max_port->second);
}
auto to_max_port = max_output_ports->find(to_node);
if (to_max_port != max_output_ports->end()) {
SwapRegularFanoutInputs(fanouts, to_node, from_node->name(),
to_max_port->second);
}
}
void SwapFanoutsMapValues(FanoutsMap* fanouts,
const MutableGraphView::OutputPort& from_port,
const FanoutsMap::iterator& from_fanouts,
const MutableGraphView::OutputPort& to_port,
const FanoutsMap::iterator& to_fanouts) {
const bool from_exists = from_fanouts != fanouts->end();
const bool to_exists = to_fanouts != fanouts->end();
if (from_exists && to_exists) {
std::swap(from_fanouts->second, to_fanouts->second);
} else if (from_exists) {
auto node = fanouts->extract(from_fanouts);
fanouts->emplace(to_port, std::move(node.mapped()));
} else if (to_exists) {
auto node = fanouts->extract(to_port);
fanouts->emplace(from_port, std::move(node.mapped()));
}
}
void SwapRegularFanoutsAndMaxPortValues(FanoutsMap* fanouts,
MaxOutputPortsMap* max_output_ports,
NodeDef* from_node, NodeDef* to_node) {
auto from_max_port = max_output_ports->find(from_node);
auto to_max_port = max_output_ports->find(to_node);
bool from_exists = from_max_port != max_output_ports->end();
bool to_exists = to_max_port != max_output_ports->end();
auto forward_fanouts = [fanouts](NodeDef* from, NodeDef* to, int start,
int end) {
for (int i = start; i <= end; ++i) {
MutableGraphView::OutputPort from_port(from, i);
auto node = fanouts->extract(from_port);
if (!node.empty()) {
MutableGraphView::OutputPort to_port(to, i);
fanouts->emplace(to_port, std::move(node.mapped()));
}
}
};
if (from_exists && to_exists) {
const int from = from_max_port->second;
const int to = to_max_port->second;
const int shared = std::min(from, to);
for (int i = 0; i <= shared; ++i) {
MutableGraphView::OutputPort from_port(from_node, i);
auto from_fanouts = fanouts->find(from_port);
MutableGraphView::OutputPort to_port(to_node, i);
auto to_fanouts = fanouts->find(to_port);
SwapFanoutsMapValues(fanouts, from_port, from_fanouts, to_port,
to_fanouts);
}
if (to > from) {
forward_fanouts(to_node, from_node, shared + 1, to);
} else if (from > to) {
forward_fanouts(from_node, to_node, shared + 1, from);
}
std::swap(from_max_port->second, to_max_port->second);
} else if (from_exists) {
forward_fanouts(from_node, to_node, 0, from_max_port->second);
max_output_ports->emplace(to_node, from_max_port->second);
max_output_ports->erase(from_node);
} else if (to_exists) {
forward_fanouts(to_node, from_node, 0, to_max_port->second);
max_output_ports->emplace(from_node, to_max_port->second);
max_output_ports->erase(to_node);
}
}
bool HasFanoutValue(const FanoutsMap& fanouts, const FanoutsMap::iterator& it) {
return it != fanouts.end() && !it->second.empty();
}
Status MutationError(absl::string_view function_name, absl::string_view params,
absl::string_view msg) {
return errors::InvalidArgument(absl::Substitute(
"MutableGraphView::$0($1) error: $2.", function_name, params, msg));
}
using ErrorHandler = std::function<Status(absl::string_view)>;
ErrorHandler UpdateFanoutsError(absl::string_view from_node_name,
absl::string_view to_node_name) {
return [from_node_name, to_node_name](absl::string_view msg) {
string params = absl::Substitute("from_node_name='$0', to_node_name='$1'",
from_node_name, to_node_name);
return MutationError("UpdateFanouts", params, msg);
};
}
Status CheckFaninIsRegular(const TensorId& fanin, ErrorHandler handler) {
if (!IsTensorIdRegular(fanin)) {
return handler(absl::Substitute("fanin '$0' must be a regular tensor id",
fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckFaninIsValid(const TensorId& fanin, ErrorHandler handler) {
if (!IsTensorIdPortValid(fanin)) {
return handler(absl::Substitute("fanin '$0' must be a valid tensor id",
fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckAddingFaninToSelf(absl::string_view node_name,
const TensorId& fanin, ErrorHandler handler) {
if (node_name == fanin.node()) {
return handler(
absl::Substitute("can't add fanin '$0' to self", fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckRemovingFaninFromSelf(absl::string_view node_name,
const TensorId& fanin, ErrorHandler handler) {
if (node_name == fanin.node()) {
return handler(absl::Substitute("can't remove fanin '$0' from self",
fanin.ToString()));
}
return absl::OkStatus();
}
string NodeMissingErrorMsg(absl::string_view node_name) {
return absl::Substitute("node '$0' was not found", node_name);
}
Status CheckNodeExists(absl::string_view node_name, NodeDef* node,
ErrorHandler handler) {
if (node == nullptr) {
return handler(NodeMissingErrorMsg(node_name));
}
return absl::OkStatus();
}
Status CheckPortRange(int port, int min, int max, ErrorHandler handler) {
if (port < min || port > max) {
if (max < min) {
return handler("no available ports as node has no regular fanins");
}
return handler(
absl::Substitute("port must be in range [$0, $1]", min, max));
}
return absl::OkStatus();
}
string SwapNodeNamesSwitchControlErrorMsg(absl::string_view node_name) {
return absl::Substitute(
"can't swap node name '$0' as it will become a Switch control dependency",
node_name);
}
string GeneratedNameForIdentityConsumingSwitch(
const MutableGraphView::OutputPort& fanin) {
return AddPrefixToNodeName(
absl::StrCat(fanin.node->name(), "_", fanin.port_id),
kMutableGraphViewCtrl);
}
string PrintInTextFormat(const protobuf::MessageLite& message) {
return message.ShortDebugString();
}
string PrintInTextFormat(const protobuf::Message& message) {
string message_text;
::tensorflow::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
printer.PrintToString(message, &message_text);
if (!message_text.empty() && message_text[message_text.size() - 1] == ' ') {
message_text.resize(message_text.size() - 1);
}
return message_text;
}
}
void MutableGraphView::AddAndDedupFanouts(NodeDef* node) {
absl::flat_hash_set<absl::string_view> fanins;
absl::flat_hash_set<absl::string_view> controlling_fanins;
int max_input_port = -1;
int pos = 0;
const int last_idx = node->input_size() - 1;
int last_pos = last_idx;
while (pos <= last_pos) {
TensorId tensor_id = ParseTensorName(node->input(pos));
absl::string_view input_node_name = tensor_id.node();
bool is_control_input = IsTensorIdControlling(tensor_id);
bool can_dedup_control_with_regular_input =
CanDedupControlWithRegularInput(*this, input_node_name);
bool can_dedup_control =
is_control_input && (can_dedup_control_with_regular_input ||
controlling_fanins.contains(input_node_name));
if (!gtl::InsertIfNotPresent(&fanins, input_node_name) &&
can_dedup_control) {
node->mutable_input()->SwapElements(pos, last_pos);
--last_pos;
} else {
OutputPort output(nodes()[input_node_name], tensor_id.index());
if (is_control_input) {
fanouts()[output].emplace(node, Graph::kControlSlot);
} else {
max_input_port = pos;
max_regular_output_port()[output.node] =
std::max(max_regular_output_port()[output.node], output.port_id);
fanouts()[output].emplace(node, pos);
}
++pos;
}
if (is_control_input) {
controlling_fanins.insert(input_node_name);
}
}
if (last_pos < last_idx) {
node->mutable_input()->DeleteSubrange(last_pos + 1, last_idx - last_pos);
}
if (max_input_port > -1) {
max_regular_input_port()[node] = max_input_port;
}
}
void MutableGraphView::UpdateMaxRegularOutputPortForRemovedFanin(
const OutputPort& fanin,
const absl::flat_hash_set<InputPort>& fanin_fanouts) {
int max_port = max_regular_output_port()[fanin.node];
if (!fanin_fanouts.empty() || max_port != fanin.port_id) {
return;
}
bool updated_max_port = false;
for (int i = fanin.port_id - 1; i >= 0; --i) {
OutputPort fanin_port(fanin.node, i);
if (!fanouts()[fanin_port].empty()) {
max_regular_output_port()[fanin.node] = i;
updated_max_port = true;
break;
}
}
if (!updated_max_port) {
max_regular_output_port().erase(fanin.node);
}
}
void MutableGraphView::UpdateMaxRegularOutputPortForAddedFanin(
const OutputPort& fanin) {
if (max_regular_output_port()[fanin.node] < fanin.port_id) {
max_regular_output_port()[fanin.node] = fanin.port_id;
}
}
const absl::flat_hash_set<MutableGraphView::InputPort>&
MutableGraphView::GetFanout(const GraphView::OutputPort& port) const {
return GetFanout(MutableGraphView::OutputPort(const_cast<NodeDef*>(port.node),
port.port_id));
}
absl::flat_hash_set<MutableGraphView::OutputPort> MutableGraphView::GetFanin(
const GraphView::InputPort& port) const {
return GetFanin(MutableGraphView::InputPort(const_cast<NodeDef*>(port.node),
port.port_id));
}
const MutableGraphView::OutputPort MutableGraphView::GetRegularFanin(
const GraphView::InputPort& port) const {
return GetRegularFanin(MutableGraphView::InputPort(
const_cast<NodeDef*>(port.node), port.port_id));
}
NodeDef* MutableGraphView::AddNode(NodeDef&& node) {
auto* node_in_graph = graph()->add_node();
*node_in_graph = std::move(node);
AddUniqu | #include "tensorflow/core/grappler/mutable_graph_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::NDef;
using FDH = FunctionDefHelper;
void CompareNodeFanins(const MutableGraphView& graph, NodeDef* node,
absl::Span<const string> fanins) {
ASSERT_EQ(node->input_size(), fanins.size());
for (int i = 0; i < node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(fanins[i]);
EXPECT_EQ(ParseTensorName(node->input(i)), tensor_id);
int port;
if (tensor_id.index() == Graph::kControlSlot) {
port = Graph::kControlSlot;
} else {
port = i;
}
MutableGraphView::InputPort input_port(node, port);
MutableGraphView::OutputPort output_port =
graph.GetOutputPort(tensor_id.node(), tensor_id.index());
EXPECT_TRUE(graph.GetFanin(input_port).contains(output_port));
EXPECT_TRUE(graph.GetFanout(output_port).contains(input_port));
}
}
void CompareNodeFanouts(const MutableGraphView& graph, NodeDef* node,
absl::Span<const string> fanouts) {
auto node_fanouts =
graph.GetFanouts(*node, true);
EXPECT_EQ(node_fanouts.size(), fanouts.size());
for (const string& fanout : fanouts) {
TensorId tensor_id = ParseTensorName(fanout);
MutableGraphView::InputPort input_port(graph.GetNode(tensor_id.node()),
tensor_id.index());
EXPECT_TRUE(node_fanouts.contains(input_port));
}
}
void CheckNode(const MutableGraphView& graph, absl::string_view node_name,
absl::string_view op, absl::string_view device,
absl::Span<const std::pair<string, FDH::AttrValueWrapper>> attrs,
absl::Span<const string> fanins,
absl::Span<const string> fanouts) {
NodeDef* node = graph.GetNode(node_name);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->op(), op);
EXPECT_EQ(node->device(), device);
EXPECT_EQ(node->attr_size(), attrs.size());
for (const auto& attr : attrs) {
auto it = node->attr().find(attr.first);
ASSERT_NE(it, node->attr().end());
EXPECT_TRUE(AreAttrValuesEqual(it->second, attr.second.proto));
}
CompareNodeFanins(graph, node, fanins);
CompareNodeFanouts(graph, node, fanouts);
}
void CheckGraph(const MutableGraphView& mutable_graph) {
GraphView immutable_graph(mutable_graph.graph());
EXPECT_EQ(mutable_graph.graph()->node_size(),
immutable_graph.graph()->node_size());
EXPECT_EQ(mutable_graph.graph(), immutable_graph.graph());
auto check_edges =
[](const absl::flat_hash_set<MutableGraphView::Edge>& mutable_edges,
const absl::flat_hash_set<GraphView::Edge>& immutable_edges) {
EXPECT_EQ(mutable_edges.size(), immutable_edges.size());
for (const auto& fanin_edge : mutable_edges) {
GraphView::Edge immutable_edge(
{fanin_edge.src.node, fanin_edge.src.port_id},
{fanin_edge.dst.node, fanin_edge.dst.port_id});
EXPECT_TRUE(immutable_edges.contains(immutable_edge));
}
};
for (auto& node : *mutable_graph.graph()->mutable_node()) {
EXPECT_EQ(&node, immutable_graph.GetNode(node.name()));
auto mutable_fanins =
mutable_graph.GetFanins(node, true);
auto immutable_fanins =
immutable_graph.GetFanins(node, true);
EXPECT_EQ(mutable_fanins.size(), immutable_fanins.size());
for (const auto& fanin : mutable_fanins) {
GraphView::OutputPort immutable_fanin(fanin.node, fanin.port_id);
EXPECT_TRUE(immutable_fanins.contains(immutable_fanin));
}
auto mutable_fanouts =
mutable_graph.GetFanouts(node, true);
auto immutable_fanouts =
immutable_graph.GetFanouts(node, true);
EXPECT_EQ(mutable_fanouts.size(), immutable_fanouts.size());
for (const auto& fanout : mutable_fanouts) {
GraphView::InputPort immutable_fanout(fanout.node, fanout.port_id);
EXPECT_TRUE(immutable_fanouts.contains(immutable_fanout));
}
auto mutable_fanin_edges =
mutable_graph.GetFaninEdges(node, true);
auto immutable_fanin_edges =
immutable_graph.GetFaninEdges(node, true);
check_edges(mutable_fanin_edges, immutable_fanin_edges);
auto mutable_fanout_edges =
mutable_graph.GetFanoutEdges(node, true);
auto immutable_fanout_edges =
immutable_graph.GetFanoutEdges(node, true);
check_edges(mutable_fanout_edges, immutable_fanout_edges);
}
}
TEST(MutableGraphViewTest, AddSubgraph) {
GraphDef graph_def = test::function::GDef(
{
NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {}, {}),
NDef("baz", "NotImportant", {"foo", "bar"}),
},
{});
MutableGraphView graph(&graph_def);
GraphDef subgraph = test::function::GDef(
{
NDef("s/n0", "NotImportant", {}, {}),
NDef("s/n1", "NotImportant", {"bar", "s/n0"}, {}),
},
{});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {"baz:1", "s/n1"});
CheckNode(graph, "s/n1", "NotImportant", "", {}, {"bar", "s/n0"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddSubgraphAndAddFunction) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef subgraph = test::function::GDef({}, {x_times_two});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
EXPECT_EQ(graph_def.library().function_size(), 1);
}
TEST(MutableGraphViewTest, AddSubgraphAndSkipSameFunction) {
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef graph_def = test::function::GDef({}, {x_times_two});
MutableGraphView graph(&graph_def);
GraphDef subgraph = test::function::GDef({}, {x_times_two});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
EXPECT_EQ(graph_def.library().function_size(), 1);
}
TEST(MutableGraphViewTest, AddSubgraphAndFailIfFunctionDifferent) {
FunctionDef x_times_four = test::function::XTimesFour();
x_times_four.mutable_signature()->set_name("XTimesTwo");
GraphDef graph_def = test::function::GDef({}, {x_times_four});
MutableGraphView graph(&graph_def);
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef subgraph = test::function::GDef({}, {x_times_two});
Status status = graph.AddSubgraph(std::move(subgraph));
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::AddSubgraph(function_size=1) error: Found "
"different function definition with the same name: XTimesTwo.");
}
TEST(MutableGraphViewTest, UpdateNodeNoDedupControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
AttrValue list_value;
list_value.mutable_list()->add_type(DT_FLOAT);
TF_EXPECT_OK(
graph.UpdateNode("bar_2", "IdentityN", kDevice, {{"T", list_value}}));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2"});
CheckNode(graph, "bar_2", "IdentityN", kDevice, {{"T", list_value}},
{"bar_1:1"}, {"foo_1", "foo_1:2", "^foo_1", "foo_2:1", "^foo_2"});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"bar_2", "other", "bar_2:1", "^bar_2"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"other:1", "bar_2:2", "^bar_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeDedupControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNode("bar_2", "NotImportant", kDevice, {}));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2"});
CheckNode(graph, "bar_2", "NotImportant", kDevice, {}, {"bar_1:1"},
{"foo_1", "foo_1:2", "foo_2:1"});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"bar_2", "other", "bar_2:1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"other:1", "bar_2:2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeSwitchNoControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def =
test::function::GDef({NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {"foo:1"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNode("foo", "Switch", kDevice, {}));
CheckNode(graph, "foo", "Switch", kDevice, {}, {}, {"bar"});
CheckNode(graph, "bar", "NotImportant", "", {}, {"foo:1"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeSwitchControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def =
test::function::GDef({NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {"^foo"})},
{});
MutableGraphView graph(&graph_def);
AttrValue attr;
attr.set_type(DT_FLOAT);
Status s = graph.UpdateNode("foo", "Switch", kDevice, {{"T", attr}});
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::UpdateNodeOp(node_name='foo', op='Switch', "
"device='/device:foo:0', attrs={('T', type: DT_FLOAT)}) error: can't "
"change node op to Switch when node drives a control dependency "
"(alternatively, we could add the identity node needed, but it seems "
"like an unlikely event and probably a mistake).";
EXPECT_EQ(s.message(), expected_msg);
CheckNode(graph, "foo", "NotImportant", "", {}, {}, {"^bar"});
CheckNode(graph, "bar", "NotImportant", "", {}, {"^foo"}, {});
CheckGraph(graph);
}
absl::flat_hash_map<string, std::vector<string>> GetNodeInputsFromGraph(
const GraphDef& graph, absl::string_view node_to_exclude) {
absl::flat_hash_map<string, std::vector<string>> node_inputs;
for (const auto& node : graph.node()) {
if (node.name() == node_to_exclude) {
continue;
}
node_inputs[node.name()] =
std::vector<string>(node.input().begin(), node.input().end());
}
return node_inputs;
}
void CheckUnmodifiedNodeFanins(
const GraphDef& graph, absl::string_view node_to_exclude,
const absl::flat_hash_map<string, std::vector<string>>&
unmodified_node_inputs) {
for (const auto& node : graph.node()) {
if (node.name() == node_to_exclude) {
continue;
}
auto it = unmodified_node_inputs.find(node.name());
ASSERT_NE(it, unmodified_node_inputs.end());
ASSERT_EQ(it->second.size(), node.input_size());
for (int i = 0; i < node.input_size(); ++i) {
EXPECT_EQ(node.input(i), it->second[i]);
}
}
}
void TestUpdateNodeName(absl::string_view from_node_name, bool node_exists,
absl::string_view to_node_name, bool update_fanouts,
bool success, const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a"}),
NDef("c", "NotImportant", {}, {})},
{});
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(from_node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, from_node_name);
Status s = graph.UpdateNodeName(from_node_name, to_node_name, update_fanouts);
EXPECT_EQ(s.ok(), success);
string updated_node_name;
if (success) {
updated_node_name = string(to_node_name);
} else {
updated_node_name = string(from_node_name);
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
EXPECT_EQ(node->name(), updated_node_name);
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, updated_node_name,
unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeName) {
string error_msg;
TestUpdateNodeName("b", true, "d", false,
true, error_msg, {"a"});
TestUpdateNodeName("b", true, "b", false,
true, error_msg, {"a"});
TestUpdateNodeName("a", true, "a", false,
true, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='c', to_node_name='b', "
"update_fanouts=false) error: can't update node name because new node "
"name is in use.";
TestUpdateNodeName("c", true, "b", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='a', to_node_name='b', "
"update_fanouts=true) error: can't update node name because new node "
"name is in use.";
TestUpdateNodeName("a", true, "b", true,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='a', to_node_name='d', "
"update_fanouts=false) error: can't update node name because node has "
"fanouts.";
TestUpdateNodeName("a", true, "d", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='d', to_node_name='e', "
"update_fanouts=false) error: node 'd' was not found.";
TestUpdateNodeName("d", false, "e", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='d', to_node_name='e', "
"update_fanouts=true) error: node 'd' was not found.";
TestUpdateNodeName("d", false, "e", true,
false, error_msg, {});
}
TEST(MutableGraphViewTest, UpdateNodeNameWithFanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:2"}),
NDef("c", "NotImportant", {"b", "^a"}),
NDef("d", "NotImportant", {"^b", "^a"}),
NDef("e", "NotImportant", {"b:2", "c:4", "b:1", "^a"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNodeName("b", "f", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"f", "^c", "^d", "^e"});
CheckNode(graph, "f", "NotImportant", "", {}, {"a:2"},
{"c", "^d", "e", "e:2"});
CheckNode(graph, "c", "NotImportant", "", {}, {"f", "^a"}, {"e:1"});
CheckNode(graph, "d", "NotImportant", "", {}, {"^f", "^a"}, {});
CheckNode(graph, "e", "NotImportant", "", {}, {"f:2", "c:4", "f:1", "^a"},
{});
CheckGraph(graph);
}
GraphDef SimpleSwapNodeNamesMutationGraph() {
return test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("switch_1", "Switch", {"a"}),
NDef("identity_1", "Identity", {"switch_1:1"}),
NDef("b", "NotImportant", {}, {}), NDef("switch_2", "Switch", {"b"}),
NDef("identity_2", "Identity", {"switch_2:0"}),
NDef("foo_1", "NotImportant", {"identity_1", "^identity_1"}),
NDef("foo_2", "NotImportant", {"identity_2", "^identity_2"})},
{});
}
void TestSwapNodeNames(bool update_fanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("foo_1", "foo_2", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_2", "^foo_2"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_1", "^foo_1"});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNames) {
TestSwapNodeNames(false);
TestSwapNodeNames(true);
}
void TestSwapNodeNamesWithSameNames(bool update_fanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("identity_1", "identity_1", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSameName) {
TestSwapNodeNamesWithSameNames(false);
TestSwapNodeNamesWithSameNames(true);
}
TEST(MutableGraphView, SwapNodeNamesBetweenSwitches) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(
graph.SwapNodeNames("switch_1", "switch_2", false));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"a"}, {"identity_2"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"b"}, {"identity_1"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesBetweenSwitchesAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(
graph.SwapNodeNames("switch_1", "switch_2", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_2:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_1:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSwitchAndNonSwitch) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "switch_1", false));
CheckNode(graph, "switch_1", "NotImportant", "", {}, {}, {"a", "identity_1"});
CheckNode(graph, "a", "Switch", "", {}, {"switch_1"}, {});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"}, {"foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {}, {"identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSwitchAndNonSwitchAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "switch_1", true));
CheckNode(graph, "switch_1", "NotImportant", "", {}, {}, {"a"});
CheckNode(graph, "a", "Switch", "", {}, {"switch_1"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"a:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesNonSwitchAndSwitch) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("switch_2", "b", false));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "switch_2", "NotImportant", "", {}, {}, {"b", "identity_2"});
CheckNode(graph, "b", "Switch", "", {}, {"switch_2"}, {});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"}, {"foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesNonSwitchAndSwitchAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("switch_2", "b", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "switch_2", "NotImportant", "", {}, {}, {"b"});
CheckNode(graph, "b", "Switch", "", {}, {"switch_2"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"b:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
void TestSwapNodeNamesSimpleSelfLoop(bool update_fanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {"b:7"}), NDef("b", "NotImportant", {"a:10"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "b", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {"b:10"}, {"b:0"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:7"}, {"a:0"});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSelfLoops) {
TestSwapNodeNamesSimpleSelfLoop(false);
TestSwapNodeNamesSimpleSelfLoop(true);
}
void TestSwapNodeNamesError(absl::string_view from_node_name,
absl::string_view to_node_name, bool update_fanouts,
const string& error_msg) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
Status s = graph.SwapNodeNames(from_node_name, to_node_name, update_fanouts);
EXPECT_EQ(s.ok(), false);
EXPECT_EQ(s.message(), error_msg);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesError) {
string error_msg;
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_3', "
"to_node_name='foo_2', update_fanouts=false) error: node 'foo_3' was not "
"found.";
TestSwapNodeNamesError("foo_3", "foo_2", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_3', "
"to_node_name='foo_2', update_fanouts=true) error: node 'foo_3' was not "
"found.";
TestSwapNodeNamesError("foo_3", "foo_2", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_1', "
"to_node_name='foo_4', update_fanouts=false) error: node 'foo_4' was not "
"found.";
TestSwapNodeNamesError("foo_1", "foo_4", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_1', "
"to_node_name='foo_4', update_fanouts=true) error: node 'foo_4' was not "
"found.";
TestSwapNodeNamesError("foo_1", "foo_4", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_5', "
"to_node_name='foo_6', update_fanouts=false) error: node 'foo_5' was not "
"found.";
TestSwapNodeNamesError("foo_5", "foo_6", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_5', "
"to_node_name='foo_6', update_fanouts=true) error: node 'foo_5' was not "
"found.";
TestSwapNodeNamesError("foo_5", "foo_6", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='switch_2', "
"to_node_name='identity_1', update_fanouts=false) error: can't swap node "
"name 'switch_2' as it will become a Switch control dependency.";
TestSwapNodeNamesError("switch_2", "identity_1", false,
error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='identity_2', "
"to_node_name='switch_1', update_fanouts=false) error: can't swap node "
"name 'switch_1' as it will become a Switch control dependency.";
TestSwapNodeNamesError("identity_2", "switch_1", false,
error_msg);
}
TEST(MutableGraphViewTest, AddAndUpdateFanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("bar", "NotImportant", {}, {}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar", "other", "bar:1", "^bar"}),
NDef("foo_2", "NotImportant", {"other:1", "bar:2", "^bar"}),
NDef("foo_3", "NotImportant", {"other:2", "^bar"})},
{});
MutableGraphView graph(&graph_def);
NodeDef* new_bar = graph.AddNode(NDef("new_bar", "NotImportant", {}, {}));
TF_EXPECT_OK(graph.UpdateFanouts("bar", new_bar->name()));
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {});
CheckNode(graph, "other", "NotImportant", "", {}, {},
{"foo_1:1", "foo_2", "foo_3"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"new_bar", "other", "new_bar:1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"other:1", "new_bar:2"},
{});
CheckNode(graph, "foo_3", "NotImportant", "", {}, {"other:2", "^new_bar"},
{});
CheckNode(graph, "new_bar", "NotImportant", "", {}, {},
{"foo_1:0", "foo_1:2", "foo_2:1", "^foo_3"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddAndUpdateFanoutsKeepControls) {
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
NodeDef* new_bar = graph.AddNode(NDef("new_bar", "Identity", {"bar_1:2"}));
TF_EXPECT_OK(graph.UpdateFanouts("bar_2", new_bar->name()));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2", "new_bar"});
CheckNode(graph, "bar_2", "Identity", "", {}, {"bar_1:1"}, {});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"new_bar", "other", "new_bar:1", "^new_bar"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"other:1", "new_bar:2", "^new_bar"}, {});
CheckNode(graph, "new_ |
1,352 | cpp | tensorflow/tensorflow | tpu | tensorflow/core/grappler/utils/tpu.cc | tensorflow/core/grappler/utils/tpu_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_TPU_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_TPU_H_
#include "tensorflow/core/framework/graph.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def);
}
}
#endif
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def) {
for (const auto& node : def.node()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
if (!def.has_library()) return false;
for (const auto& function_def : def.library().function()) {
for (const auto& node : function_def.node_def()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
}
return false;
}
}
} | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class TpuTest : public ::testing::Test {};
TEST_F(TpuTest, NotTpuGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("Add");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("Mul");
EXPECT_FALSE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuMainGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuLibraryGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("BatchFunction");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
}
} |
1,353 | cpp | tensorflow/tensorflow | functions | tensorflow/core/grappler/utils/functions.cc | tensorflow/core/grappler/utils/functions_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_FUNCTIONS_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_FUNCTIONS_H_
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/gtl/flatset.h"
namespace tensorflow {
namespace grappler {
struct InputArgInstantiation {
InputArgInstantiation(string node_name, DataType data_type)
: node_name(std::move(node_name)), data_type(data_type) {}
string node_name;
DataType data_type;
};
struct OutputArgInstantiation {
OutputArgInstantiation(string node_name, DataType data_type)
: node_name(std::move(node_name)), data_type(data_type) {}
string node_name;
DataType data_type;
};
struct ControlOutput {
string output_name;
string node_name;
bool operator<(const ControlOutput& a) const {
return output_name < a.output_name;
}
};
class GrapplerFunctionItem : public GrapplerItem {
public:
GrapplerFunctionItem() = default;
const string& description() const;
const std::vector<InputArgInstantiation>& inputs() const;
const InputArgInstantiation& input(int i) const;
const std::size_t input_size() const;
const std::vector<OutputArgInstantiation>& outputs() const;
const OutputArgInstantiation& output(int i) const;
const std::size_t output_size() const;
const std::vector<ControlOutput>& control_outputs() const;
const std::size_t control_output_size() const;
const AttrSlice& func_attr() const;
const std::vector<const FunctionDef::ArgAttrs*>& arg_attr() const;
const GraphDef& function_body() const;
GraphDef& mutable_function_body();
bool is_stateful() const;
GrapplerFunctionItem& SwapFunctionBody(GraphDef&& other);
private:
friend Status MakeGrapplerFunctionItem(const FunctionDef&, const AttrSlice&,
const FunctionLibraryDefinition&, int,
GrapplerFunctionItem*);
friend Status ReplaceInputWithConst(const NodeDef&, int,
GrapplerFunctionItem*);
friend Status RemoveFunctionOutputs(const absl::flat_hash_set<int>&,
GrapplerFunctionItem*,
std::vector<std::pair<int, int>>*);
GrapplerFunctionItem(string func_name, string description,
AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs,
int graph_def_version, bool is_stateful,
GraphDef&& function_body);
string description_;
AttrSlice func_attr_;
std::vector<const FunctionDef::ArgAttrs*> arg_attr_;
std::vector<InputArgInstantiation> input_args_;
std::vector<OutputArgInstantiation> output_args_;
std::vector<ControlOutput> control_outputs_;
bool is_stateful_ = false;
};
bool HasParametrizedType(const FunctionDef& func);
bool HasParametrizedBody(const FunctionDef& func);
bool IsParametrized(const FunctionDef& func);
Status InstantiationTypeParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, DataType>* type_parameters);
Status InstantiationBodyParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, AttrValue>* body_parameters);
Status ReplaceInputWithConst(const NodeDef& input_const, int input_index,
GrapplerFunctionItem* item);
Status RemoveFunctionOutputs(const absl::flat_hash_set<int>& remove_outputs,
GrapplerFunctionItem* item,
std::vector<std::pair<int, int>>* output_mapping);
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionLibraryDefinition& flib,
int graph_def_version,
GrapplerFunctionItem* item);
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const FunctionLibraryDefinition& flib,
int graph_def_version,
GrapplerFunctionItem* item);
Status MakeFunctionDef(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib,
FunctionDef* func);
}
}
#endif
#include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
GrapplerFunctionItem::GrapplerFunctionItem(
string func_name, string description, AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs, const int graph_def_version,
const bool is_stateful, GraphDef&& function_body)
: description_(std::move(description)),
func_attr_(func_attr),
arg_attr_(std::move(arg_attr)),
input_args_(std::move(input_args)),
output_args_(std::move(output_args)),
control_outputs_(std::move(control_outputs)),
is_stateful_(is_stateful) {
id = std::move(func_name);
graph = std::move(function_body);
graph.mutable_versions()->set_producer(graph_def_version);
for (const InputArgInstantiation& input_arg : input_args_) {
feed.push_back({input_arg.node_name, Tensor()});
}
for (const OutputArgInstantiation& output_arg : output_args_) {
fetch.push_back(output_arg.node_name);
}
for (const ControlOutput& control_output : control_outputs_) {
keep_ops.push_back(control_output.node_name);
}
optimization_options().allow_pruning_stateful_and_dataset_ops = false;
}
const string& GrapplerFunctionItem::description() const { return description_; }
const std::vector<InputArgInstantiation>& GrapplerFunctionItem::inputs() const {
return input_args_;
}
const InputArgInstantiation& GrapplerFunctionItem::input(int i) const {
return input_args_[i];
}
const std::size_t GrapplerFunctionItem::input_size() const {
return input_args_.size();
}
const std::vector<OutputArgInstantiation>& GrapplerFunctionItem::outputs()
const {
return output_args_;
}
const OutputArgInstantiation& GrapplerFunctionItem::output(int i) const {
return output_args_[i];
}
const std::size_t GrapplerFunctionItem::output_size() const {
return output_args_.size();
}
const std::vector<ControlOutput>& GrapplerFunctionItem::control_outputs()
const {
return control_outputs_;
}
const std::size_t GrapplerFunctionItem::control_output_size() const {
return control_outputs_.size();
}
const AttrSlice& GrapplerFunctionItem::func_attr() const { return func_attr_; }
const std::vector<const FunctionDef::ArgAttrs*>&
GrapplerFunctionItem::arg_attr() const {
return arg_attr_;
}
const GraphDef& GrapplerFunctionItem::function_body() const { return graph; }
GraphDef& GrapplerFunctionItem::mutable_function_body() { return graph; }
bool GrapplerFunctionItem::is_stateful() const { return is_stateful_; }
GrapplerFunctionItem& GrapplerFunctionItem::SwapFunctionBody(GraphDef&& other) {
graph = std::move(other);
return *this;
}
bool HasParametrizedType(const FunctionDef& func) {
const auto is_type_parametrized = [](const OpDef::ArgDef& arg) {
return !arg.type_attr().empty() || !arg.number_attr().empty() ||
!arg.type_list_attr().empty();
};
const auto& input = func.signature().input_arg();
const auto& output = func.signature().output_arg();
return std::any_of(input.begin(), input.end(), is_type_parametrized) ||
std::any_of(output.begin(), output.end(), is_type_parametrized);
}
bool HasParametrizedBody(const FunctionDef& func) {
const auto is_parametrized = [&](const NodeDef& node) {
for (const auto& attr : node.attr()) {
if (!attr.second.placeholder().empty()) return true;
}
return false;
};
return std::any_of(func.node_def().begin(), func.node_def().end(),
is_parametrized);
}
bool IsParametrized(const FunctionDef& func) {
return HasParametrizedType(func) || HasParametrizedBody(func);
}
Status InstantiationTypeParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, DataType>* type_parameters) {
if (!type_parameters->empty()) {
return absl::InvalidArgumentError(
"Type parameters output map must be empty");
}
const auto resolve_type_attr = [&](const OpDef::ArgDef& arg) -> Status {
if (!arg.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_attr(), &dtype));
type_parameters->emplace(arg.type_attr(), dtype);
} else if (!arg.type_list_attr().empty()) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_list_attr(), &dtypes));
int index = 0;
for (const DataType& dtype : dtypes) {
type_parameters->emplace(absl::StrCat(arg.type_list_attr(), ":", index),
dtype);
++index;
}
}
return absl::OkStatus();
};
for (const auto& input : func.signature().input_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(input));
for (const auto& output : func.signature().output_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(output));
return absl::OkStatus();
}
Status InstantiationBodyParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, AttrValue>* body_parameters) {
if (!body_parameters->empty()) {
return absl::InvalidArgumentError(
"Body parameters output map must be empty");
}
for (const NodeDef& func_body_node : func.node_def()) {
for (auto& attr : func_body_node.attr()) {
const string& placeholder = attr.second.placeholder();
if (placeholder.empty() || body_parameters->contains(placeholder)) {
continue;
}
const AttrValue* placeholder_value =
func_instantiation_attr.Find(placeholder);
if (placeholder_value) {
body_parameters->insert({placeholder, *placeholder_value});
} else {
return absl::InvalidArgumentError(
absl::StrCat("Can't resolve placeholder: ", placeholder));
}
}
}
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
const OpDef& signature = func.signature();
if (signature.name().empty()) {
return absl::InvalidArgumentError("Function name must be specified");
}
for (const OpDef::AttrDef& attr : signature.attr()) {
if (attr.type() != "type") {
return absl::InvalidArgumentError(
"Function signature must have only type attributes");
}
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(func, func_instantiation_attr, &flib, &fbody));
GraphDef function_body;
fbody->graph->ToGraphDef(&function_body);
*function_body.mutable_library() = flib.ReachableDefinitions(func).ToProto();
VLOG(3) << absl::Substitute(
"Deleted $0 unreachable functions from the Grappler function item "
"instantiation of $1 (library size = $2)",
flib.num_functions() - function_body.library().function_size(),
signature.name(), function_body.library().function_size());
const int num_instantiated_inputs = fbody->arg_types.size();
const int num_instantiated_outputs = fbody->ret_types.size();
std::vector<InputArgInstantiation> inputs;
inputs.reserve(num_instantiated_inputs);
for (int in_id = 0; in_id < num_instantiated_inputs; ++in_id) {
const Node* node = fbody->arg_nodes[in_id];
const DataType& dtype = fbody->arg_types[in_id];
inputs.emplace_back(node->name(), dtype);
}
std::vector<OutputArgInstantiation> outputs;
outputs.reserve(num_instantiated_outputs);
for (int out_id = 0; out_id < num_instantiated_outputs; ++out_id) {
const Node* node = fbody->ret_nodes[out_id];
const DataType& dtype = fbody->ret_types[out_id];
outputs.emplace_back(node->name(), dtype);
}
std::vector<ControlOutput> control_outputs;
control_outputs.reserve(func.control_ret_size());
for (const auto& control_ret : func.control_ret()) {
control_outputs.push_back({control_ret.first, control_ret.second});
}
std::sort(control_outputs.begin(), control_outputs.end());
std::vector<const FunctionDef::ArgAttrs*> arg_attr(inputs.size(), nullptr);
for (const auto& attr : func.arg_attr()) {
if (attr.first >= inputs.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid attribute index, got ", attr.first,
" but expected less than ", inputs.size()));
}
arg_attr.at(attr.first) = &attr.second;
}
*item = GrapplerFunctionItem(
signature.name(),
signature.description(),
AttrSlice(&func.attr()), std::move(arg_attr),
std::move(inputs), std::move(outputs), std::move(control_outputs),
graph_def_version, signature.is_stateful(), std::move(function_body));
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
return MakeGrapplerFunctionItem(func, AttrSlice(), flib, graph_def_version,
item);
}
Status ReplaceInputWithConst(const NodeDef& input_const, int input_index,
GrapplerFunctionItem* item) {
if (!IsConstant(input_const)) {
return absl::InvalidArgumentError(absl::StrCat(
"Input node is not a constant: ", SummarizeNodeDef(input_const)));
}
const int item_input_size = item->input_size();
if (input_index < 0 || input_index >= item_input_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function input index is out of bound: index=", input_index,
" input_size=", item->input_size()));
}
const InputArgInstantiation& input_arg = item->input(input_index);
for (NodeDef& node : *item->graph.mutable_node()) {
if (node.name() == input_arg.node_name) {
node = input_const;
node.set_name(input_arg.node_name);
node.clear_input();
node.clear_device();
}
if (IsArg(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
if (index >= input_index) {
(*node.mutable_attr())["index"].set_i(index - 1);
}
}
}
item->input_args_.erase(item->input_args_.begin() + input_index);
item->arg_attr_.erase(item->arg_attr_.begin() + input_index);
return absl::OkStatus();
}
Status RemoveFunctionOutputs(const absl::flat_hash_set<int>& remove_outputs,
GrapplerFunctionItem* item,
std::vector<std::pair<int, int>>* output_mapping) {
DCHECK(output_mapping->empty());
for (int remove_output : remove_outputs) {
const int item_output_size = item->output_size();
if (remove_output < 0 || remove_output >= item_output_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function output index is out of bound: index=", remove_output,
" output_size=", item->output_size()));
}
}
absl::flat_hash_set<const OutputArgInstantiation*> remove_output_args;
const auto is_remove_output_arg = [&](const OutputArgInstantiation& output) {
return remove_output_args.find(&output) != remove_output_args.end();
};
for (int i = 0, end = item->output_size(); i < end; ++i) {
const OutputArgInstantiation& output = item->output(i);
if (remove_outputs.contains(i)) {
VLOG(3) << "Remove functions output: name=" << output.node_name
<< "(index = " << i << ")";
remove_output_args.insert(&output);
} else if (!remove_output_args.empty()) {
output_mapping->push_back({i, i - remove_output_args.size()});
}
}
for (NodeDef& node : *item->graph.mutable_node()) {
if (IsRetval(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
for (const auto& mapping : *output_mapping) {
const int from = mapping.first;
const int to = mapping.second;
if (index == from) {
(*node.mutable_attr())["index"].set_i(to);
}
}
}
}
auto& o = item->output_args_;
o.erase(std::remove_if(o.begin(), o.end(), is_remove_output_arg), o.end());
return absl::OkStatus();
}
namespace {
class MakeFunctionDefHelper {
public:
MakeFunctionDefHelper() = default;
Status Initialize(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib);
Status AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const;
Status AsFunctionDefNode(NodeDef* function_body_node) const;
bool IsInputNode(const NodeDef& node) const {
return input_nodes_.contains(node.name());
}
bool IsOutputNode(const NodeDef& node) const {
return output_nodes_.contains(node.name());
}
private:
absl::flat_hash_set<absl::string_view> input_nodes_;
absl::flat_hash_set<absl::string_view> output_nodes_;
absl::flat_hash_map<string, tensorflow::NameRangeMap> function_body_outputs_;
};
Status MakeFunctionDefHelper::Initialize(
const GrapplerFunctionItem& item, const FunctionLibraryDefinition& flib) {
for (const InputArgInstantiation& input_arg : item.inputs()) {
input_nodes_.insert(input_arg.node_name);
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
output_nodes_.insert(output_arg.node_name);
}
for (const NodeDef& node : item.function_body().node()) {
const OpRegistrationData* registration;
TF_RETURN_IF_ERROR(flib.LookUp(node.op(), ®istration));
tensorflow::NameRangeMap outputs_range_map;
TF_RETURN_IF_ERROR(tensorflow::NameRangesForNode(
node, registration->op_def, nullptr, &outputs_range_map));
function_body_outputs_.emplace(node.name(), std::move(outputs_range_map));
}
return absl::OkStatus();
}
Status MakeFunctionDefHelper::AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const {
if (IsControlInput(graph_def_input)) {
*func_def_input = graph_def_input;
return absl::OkStatus();
}
const SafeTensorId tensor = ParseTensorName(graph_def_input);
DCHECK_GE(tensor.index(), 0);
const auto is_input = input_nodes_.find(tensor.node());
if (is_input != input_nodes_.end()) {
DCHECK_EQ(tensor.index(), 0);
*func_def_input = tensor.node();
return absl::OkStatus();
}
const auto is_body_output = function_body_outputs_.find(tensor.node());
if (is_body_output != function_body_outputs_.end()) {
const tensorflow::NameRangeMap& outputs_range_map = is_body_output->second;
for (const auto& el : outputs_range_map) {
const auto& output_name = el.first;
const auto& output_range = el.second;
if (tensor.index() >= output_range.first &&
tensor.index() < output_range.second) {
*func_def_input = absl::StrCat(tensor.node(), ":", output_name, ":",
tensor.index() - output_range.first);
return absl::OkStatus();
}
}
}
return absl::InvalidArgumentError(
absl::StrCat("Unknown graph def input: ", graph_def_input));
}
Status MakeFunctionDefHelper::AsFunctionDefNode(
NodeDef* function_body_node) const {
string func_def_input;
for (int i = 0; i < function_body_node->input_size(); ++i) {
TF_RETURN_IF_ERROR(
AsFunctionDefInput(function_body_node->input(i), &func_def_input));
function_body_node->set_input(i, func_def_input);
}
return absl::OkStatus();
}
}
Status MakeFunctionDef(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib,
FunctionDef* func) {
func->mutable_signature()->set_name(item.id);
func->mutable_signature()->set_description(item.description());
func->mutable_signature()->set_is_stateful(item.is_stateful());
MakeFunctionDefHelper helper;
TF_RETURN_IF_ERROR(helper.Initialize(item, flib));
absl::flat_hash_map<absl::string_view, string> output_tensors;
for (const NodeDef& func_body_node : item.function_body().node()) {
if (!helper.IsOutputNode(func_body_node)) continue;
if (func_body_node.input_size() != 1) {
return absl::InternalError(
absl::StrCat("_Retval node must have single input: ",
SummarizeNodeDef(func_body_node)));
}
output_tensors.emplace(func_body_node.name(), func_body_node.input(0));
}
for (const InputArgInstantiation& input_arg : item.inputs()) {
OpDef::ArgDef arg_def;
arg_def.set_name(input_arg.node_name);
arg_def.set_type(input_arg.data_type);
arg_def.set_is_ref(IsRefType(input_arg.data_type));
*func->mutable_signature()->add_input_arg() = arg_def;
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
const string output_name =
absl::StrReplaceAll(output_arg.node_name, {{"_RetVal", ""}});
OpDef::ArgDef arg_def;
arg_def.set_name(output_name);
arg_def.set_type(output_arg.data_type);
arg_def.set_is_ref(IsRefType(output_arg.data_type));
*func->mutable_signature()->add_output_arg() = arg_def;
auto it = output_tensors.find(output_arg.node_name);
if (it == output_tensors.end()) {
return absl::InternalError(
absl::StrCat("Can't find an output tensor for the output node: ",
output_arg.node_name));
}
TF_RETURN_IF_ERROR(helper.AsFunctionDefInput(
it->second, &(*func->mutable_ret())[output_name]));
}
for (const ControlOutput& control_out : item.control_outputs()) {
func->mutable_control_ret()->insert(
{control_out.output_name, control_out.node_name});
*func->mutable_signature()->add_control_output() = control_out.output_name;
}
for (const auto& attr : item.func_attr()) {
const auto& attr_name = attr.first;
const auto& attr_value = attr.second;
(*func->mutable_attr())[attr_name] = attr_value;
}
for (int i = 0, end = item.arg_attr().size(); i < end; ++i) {
const auto* attr = item.arg_attr().at(i);
if (attr != nullptr) {
(*func->mutable_arg_attr())[i] = *attr;
}
}
for (const NodeDef& func_node : item.function_body().node()) {
if (IsArg(func_node) || IsRetval(func_node) ||
helper.IsInputNode(func_node) || helper.IsOutputNode(func_node))
continue;
NodeDef* func_def_node = func->add_node_def();
*func_def_node = func_node;
TF_RETURN_IF_ERROR(helper.AsFunctionDefNode(func_def_node));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class FunctionsTest : public ::testing::Test {};
TEST_F(FunctionsTest, IsParametrized) {
FunctionDef parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef non_parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:float", "y:float"}, {"z:float"}, {},
{{{"output"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "output:z:0"}});
EXPECT_TRUE(HasParametrizedType(parametrized_func));
EXPECT_TRUE(HasParametrizedBody(parametrized_func));
EXPECT_TRUE(IsParametrized(parametrized_func));
EXPECT_FALSE(HasParametrizedType(non_parametrized_func));
EXPECT_FALSE(HasParametrizedBody(non_parametrized_func));
EXPECT_FALSE(IsParametrized(non_parametrized_func));
}
TEST_F(FunctionsTest, InstantiationParameters) {
FunctionDef func = FunctionDefHelper::Create(
"ParametrizedFunc",
{"input1:A", "input2:B", "input3:float", "input4: C"},
{"output1: A", "output2:D"},
{
"A: {float, double}",
"B: {float, int32}",
"C: list(type)",
"D: {float, double}",
},
{{{"output"}, "FakeOp", {"input1", "input2"}, {{"key", "$key"}}}},
{{"x", "cx:output:0"}, {"y", "cy:output:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["key"].set_s("key-value");
func_instantiation_attr["A"].set_type(DT_FLOAT);
func_instantiation_attr["B"].set_type(DT_INT32);
func_instantiation_attr["C"].mutable_list()->add_type(DT_FLOAT);
func_instantiation_attr["C"].mutable_list()->add_type(DT_INT32);
func_instantiation_attr["D"].set_type(DT_DOUBLE);
absl::flat_hash_map<string, DataType> type_parameters;
TF_EXPECT_OK(InstantiationTypeParameters(
func, AttrSlice(&func_instantiation_attr), &type_parameters));
ASSERT_EQ(5, type_parameters.size());
EXPECT_EQ(DT_FLOAT, type_parameters["A"]);
EXPECT_EQ(DT_INT32, type_parameters["B"]);
EXPECT_EQ(DT_FLOAT, type_parameters["C:0"]);
EXPECT_EQ(DT_INT32, type_parameters["C:1"]);
EXPECT_EQ(DT_DOUBLE, type_parameters["D"]);
absl::flat_hash_map<string, AttrValue> body_parameters;
TF_EXPECT_OK(InstantiationBodyParameters(
func, AttrSlice(&func_instantiation_attr), &body_parameters));
ASSERT_EQ(1, body_parameters.size());
EXPECT_EQ("key-value", body_parameters["key"].s());
}
TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
}
TEST_F(FunctionsTest, MakeFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
const uint32 arg_index = 0;
const std::pair<string, string> arg_attr_key_and_value = {"_arg_attr", "abc"};
FunctionDef::ArgAttrs arg_attr;
(*arg_attr.mutable_attr())[arg_attr_key_and_value.first].set_s(
arg_attr_key_and_value.second);
(*func.mutable_arg_attr())[arg_index] = arg_attr;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ("x", specialized.signature().input_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().input_arg(0).type());
EXPECT_EQ("y", specialized.signature().output_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().output_arg(0).type());
EXPECT_EQ(specialized.arg_attr().size(), 1);
EXPECT_EQ(specialized.arg_attr().at(arg_index).attr().size(), 1);
EXPECT_EQ(specialized.arg_attr()
.at(arg_index)
.attr()
.at(arg_attr_key_and_value.first)
.s(),
arg_attr_key_and_value.second);
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "scale" && ++count) {
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale:y:0", node.input(1));
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
}
}
EXPECT_EQ(2, count);
}
TEST_F(FunctionsTest, ReplaceInputWithConst) {
FunctionDef func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(2, item.input_size());
EXPECT_EQ(1, item.output_size());
ASSERT_EQ(4, item.function_body().node_size());
const NodeDef &input_x = item.function_body().node(0);
const NodeDef &input_y = item.function_body().node(1);
EXPECT_EQ("_Arg", input_x.op());
EXPECT_EQ("_Arg", input_y.op());
NodeDef const_input_x;
const_input_x.set_op("Const");
AddNodeAttr("Tag", "const_input_x", &const_input_x);
NodeDef const_input_y;
const_input_y.set_op("Const");
AddNodeAttr("Tag", "const_input_y", &const_input_y);
TF_EXPECT_OK(ReplaceInputWithConst(const_input_x, 0, &item));
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("Const", input_x.op());
EXPECT_EQ("const_input_x", input_x.attr().at("Tag").s());
TF_EXPECT_OK(ReplaceInputWithConst(const_input_y, 0, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ("Const", input_y.op());
EXPECT_EQ("const_input_y", input_y.attr().at("Tag").s());
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ(0, specialized.signature().input_arg_size());
EXPECT_EQ(1, specialized.signature().output_arg_size());
EXPECT_EQ(3, specialized.node_def_size());
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_x", node.attr().at("Tag").s());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_y", node.attr().at("Tag").s());
} else if (node.name() == "output" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x:output:0", node.input(0));
EXPECT_EQ("y:output:0", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(FunctionsTest, SwapFunctionBodyAndMakeFunctionDef) {
using ::tensorflow::test::function::NDef;
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GraphDef id_func_body = test::function::GDef(
{
NDef("read_x", "Identity", {"x"}, {{"T", "float"}}),
NDef("z_RetVal", "_Retval", {"read_x"}, {{"T", "float"}})});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionDefLibrary lib_def;
*lib_def.add_function() = func;
*lib_def.add_function() = mul_func;
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
item.SwapFunctionBody(std::move(id_func_body));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "read_x" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("x", node.input(0));
}
}
EXPECT_EQ(1, count);
EXPECT_EQ("read_x:output:0", (*specialized.mutable_ret())["z"]);
}
TEST_F(FunctionsTest, FunctionDefGrapplerFunctionItemRoundTrip) {
FunctionDef func = FunctionDefHelper::Create(
"DoNothing", {"i: int32"}, {"o: int32"},
{},
{
{{"id"}, "Identity", {"i"}, {{"T", DT_INT32}}},
},
{{"o", "id:output:0"}},
{{"must_execute", "id"}});
constexpr char description[] = "This is a helpful description.";
func.mutable_signature()->set_description(description);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_INT32);
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef func2;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &func2));
EXPECT_TRUE(FunctionDefsEqual(func, func2));
}
}
}
} |
1,354 | cpp | tensorflow/tensorflow | frame | tensorflow/core/grappler/utils/frame.cc | tensorflow/core/grappler/utils/frame_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_FRAME_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_FRAME_H_
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
class FrameView {
public:
FrameView() : is_inferred_(false), num_frames_(0) {}
Status InferFromGraphView(const utils::GraphView& graph_view);
Status InferFromGraphView(const utils::MutableGraphView& graph_view);
Status InferFromGraph(const GraphDef& graph);
const std::vector<int>& Frames(const NodeDef& node) const;
bool IsInFrame(const NodeDef& node) const;
int num_frames() const { return num_frames_; }
bool is_inferred() const { return is_inferred_; }
private:
template <typename GraphViewT>
inline Status InferFromGraphViewT(const GraphViewT& graph_view);
bool is_inferred_;
int num_frames_;
absl::flat_hash_map<const NodeDef*, std::vector<int>> node_to_frames_;
const std::vector<int> node_has_no_frames_;
};
}
}
#endif
#include "tensorflow/core/grappler/utils/frame.h"
#include <deque>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
namespace {}
template <typename GraphViewT>
inline Status FrameView::InferFromGraphViewT(const GraphViewT& graph_view) {
if (is_inferred_) {
return errors::Internal("FrameView was already inferred from the graph");
}
is_inferred_ = true;
std::deque<int> ready_node_indices;
for (const auto& node : graph_view.GetNodes()) {
if (node.NumRegularFanins() + node.NumControllingFanins() == 0) {
ready_node_indices.push_back(node.node_index());
node_to_frames_[node.node()] = node_has_no_frames_;
}
}
const auto* graph = graph_view.graph();
absl::flat_hash_map<string, int> frame_name_to_id;
auto process_fanout = [this, graph](
absl::flat_hash_map<string, int>* frame_name_to_id,
std::deque<int>* ready_node_indices,
const NodeDef* ready_node, int fanout_node_index) {
const NodeDef* fanout_node = &graph->node(fanout_node_index);
if (!node_to_frames_.contains(fanout_node)) {
std::vector<int> frame_ids = node_to_frames_[ready_node];
if (IsExit(*ready_node)) {
frame_ids.pop_back();
}
if (IsEnter(*fanout_node)) {
const AttrValue* frame_name_attr =
AttrSlice(*fanout_node).Find("frame_name");
if (!frame_name_attr) {
return errors::InvalidArgument(
"Missing frame name for the Enter node: ",
SummarizeNodeDef(*fanout_node));
}
const string& frame_name = frame_name_attr->s();
int frame_id;
if (frame_name_to_id->contains(frame_name)) {
frame_id = (*frame_name_to_id)[frame_name];
} else {
frame_id = static_cast<int>(frame_name_to_id->size());
(*frame_name_to_id)[frame_name] = frame_id;
}
frame_ids.push_back(frame_id);
}
ready_node_indices->push_back(fanout_node_index);
node_to_frames_[fanout_node] = std::move(frame_ids);
} else {
std::vector<int> frame_ids_fanout = node_to_frames_[fanout_node];
std::vector<int> frame_ids_node = node_to_frames_[ready_node];
if (IsEnter(*fanout_node)) {
frame_ids_fanout.pop_back();
}
if (IsExit(*ready_node)) {
frame_ids_node.pop_back();
}
if (frame_ids_node != frame_ids_fanout) {
return errors::InvalidArgument(
"Invalid graph: Frame ids for node ", ready_node->name(),
" does not match frame ids for it's fanout ", fanout_node->name());
}
}
return absl::OkStatus();
};
while (!ready_node_indices.empty()) {
const int ready_node_index = ready_node_indices.front();
ready_node_indices.pop_front();
const auto* ready_node_view = graph_view.GetNode(ready_node_index);
const NodeDef* ready_node_def = ready_node_view->node();
for (const auto& regular_fanouts_port_i :
ready_node_view->GetRegularFanouts()) {
for (const auto& regular_fanout : regular_fanouts_port_i) {
TF_RETURN_IF_ERROR(process_fanout(&frame_name_to_id,
&ready_node_indices, ready_node_def,
regular_fanout.node_index()));
}
}
for (const auto& controlled_fanout :
ready_node_view->GetControlledFanouts()) {
TF_RETURN_IF_ERROR(process_fanout(&frame_name_to_id, &ready_node_indices,
ready_node_def,
controlled_fanout.node_index()));
}
}
num_frames_ = static_cast<int>(frame_name_to_id.size());
return absl::OkStatus();
}
Status FrameView::InferFromGraphView(const utils::GraphView& graph_view) {
return InferFromGraphViewT(graph_view);
}
Status FrameView::InferFromGraphView(
const utils::MutableGraphView& graph_view) {
return InferFromGraphViewT(graph_view);
}
Status FrameView::InferFromGraph(const GraphDef& graph) {
Status status;
utils::GraphView graph_view(&graph, &status);
TF_RETURN_IF_ERROR(status);
return InferFromGraphViewT(graph_view);
}
const std::vector<int>& FrameView::Frames(const NodeDef& node) const {
DCHECK(is_inferred_) << "FrameView is not initialized";
auto frames = node_to_frames_.find(&node);
if (frames == node_to_frames_.end()) {
LOG(WARNING) << "Node '" << node.name()
<< "' doesn't belong to the graph used for initialization";
return node_has_no_frames_;
} else {
return frames->second;
}
}
bool FrameView::IsInFrame(const NodeDef& node) const {
return !Frames(node).empty();
}
}
} | #include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using GraphTypes =
::testing::Types<GraphDef, utils::GraphView, utils::MutableGraphView>;
template <typename T>
class FrameViewTest : public ::testing::Test {
protected:
NodeDef CreateNode(const string& name, const std::vector<string>& inputs) {
return CreateNode(name, "", "", inputs);
}
NodeDef CreateNode(const string& name, const string& op,
const std::vector<string>& inputs) {
return CreateNode(name, op, "", inputs);
}
NodeDef CreateNode(const string& name, const string& op, const string& frame,
const std::vector<string>& inputs) {
NodeDef node;
node.set_name(name);
if (!op.empty()) {
node.set_op(op);
}
if (!frame.empty()) {
AttrValue frame_name;
frame_name.set_s(frame);
node.mutable_attr()->insert({"frame_name", frame_name});
}
for (const string& input : inputs) {
node.add_input(input);
}
return node;
}
};
TYPED_TEST_SUITE(FrameViewTest, GraphTypes);
template <typename T>
void InferFromGraph(FrameView* frame_view, GraphDef* graph, bool valid) {
Status status;
T graph_view(graph, &status);
TF_ASSERT_OK(status);
status = frame_view->InferFromGraphView(graph_view);
if (valid) {
TF_ASSERT_OK(status);
} else {
ASSERT_FALSE(status.ok());
}
}
template <>
void InferFromGraph<GraphDef>(FrameView* frame_view, GraphDef* graph,
bool valid) {
Status status = frame_view->InferFromGraph(*graph);
if (valid) {
TF_ASSERT_OK(status);
} else {
ASSERT_FALSE(status.ok());
}
}
TYPED_TEST(FrameViewTest, NestedLoop) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context1", {"0"});
*graph.add_node() = this->CreateNode("2", {"1"});
*graph.add_node() = this->CreateNode("3", "Merge", {"2", "14"});
*graph.add_node() = this->CreateNode("4", {"3"});
*graph.add_node() = this->CreateNode("5", "Switch", {"4"});
*graph.add_node() = this->CreateNode("6", {"5"});
*graph.add_node() = this->CreateNode("7", "Enter", "while/context2", {"6"});
*graph.add_node() = this->CreateNode("8", {"7"});
*graph.add_node() = this->CreateNode("9", "Merge", {"8", "12"});
*graph.add_node() = this->CreateNode("10", {"9"});
*graph.add_node() = this->CreateNode("11", "Switch", {"10"});
*graph.add_node() = this->CreateNode("12", "NextIteration", {"11"});
*graph.add_node() = this->CreateNode("13", "Exit", {"11"});
*graph.add_node() = this->CreateNode("14", "NextIteration", {"13"});
*graph.add_node() = this->CreateNode("15", {"5"});
*graph.add_node() = this->CreateNode("16", "Exit", {"15"});
*graph.add_node() = this->CreateNode("17", {"16"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {0}},
{"4", {0}}, {"5", {0}}, {"6", {0}}, {"7", {0, 1}},
{"8", {0, 1}}, {"9", {0, 1}}, {"10", {0, 1}}, {"11", {0, 1}},
{"12", {0, 1}}, {"13", {0, 1}}, {"14", {0}}, {"15", {0}},
{"16", {0}}, {"17", {}}};
EXPECT_EQ(frame_view.num_frames(), 2);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, MultipleInputsToEnter) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", {});
*graph.add_node() =
this->CreateNode("2", "Enter", "while/context", {"0", "1"});
*graph.add_node() = this->CreateNode("3", "Exit", {"2"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {}}, {"2", {0}}, {"3", {0}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, ExitOutput) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context", {"0"});
*graph.add_node() = this->CreateNode("2", "Exit", {"1"});
*graph.add_node() = this->CreateNode("3", {});
*graph.add_node() = this->CreateNode("4", {"2", "3"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {}}, {"4", {}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, MultipleEnterNodes) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context", {"0"});
*graph.add_node() = this->CreateNode("2", {"1"});
*graph.add_node() = this->CreateNode("5", {});
*graph.add_node() = this->CreateNode("4", "Enter", "while/context", {"5"});
*graph.add_node() = this->CreateNode("3", {"4", "2"});
*graph.add_node() = this->CreateNode("6", "Merge", {"3", "8"});
*graph.add_node() = this->CreateNode("7", "Switch", {"6"});
*graph.add_node() = this->CreateNode("8", "NextIteration", {"7"});
*graph.add_node() = this->CreateNode("9", "Exit", {"7"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {0}}, {"4", {0}},
{"5", {}}, {"6", {0}}, {"7", {0}}, {"8", {0}}, {"9", {0}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, ConflictingFrames) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context1", {"0"});
*graph.add_node() = this->CreateNode("2", "Enter", "while/context2", {"1"});
*graph.add_node() = this->CreateNode("3", {"1", "2"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, false);
}
}
}
} |
1,355 | cpp | tensorflow/tensorflow | colocation | tensorflow/core/grappler/utils/colocation.cc | tensorflow/core/grappler/utils/colocation_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_COLOCATION_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_COLOCATION_H_
#include <unordered_map>
#include "tensorflow/core/framework/graph.pb.h"
namespace tensorflow {
namespace grappler {
void ReassignColocation(GraphDef* graph);
}
}
#endif
#include "tensorflow/core/grappler/utils/colocation.h"
#include <cstring>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
string GetColocationGroupRoot(std::unordered_map<string, string>* map,
const string& node_name) {
if (map->find(node_name) == map->end()) {
map->insert({node_name, node_name});
return node_name;
}
std::list<string> nodes_to_root;
string cur = node_name;
while ((*map)[cur] != cur) {
nodes_to_root.push_back(cur);
cur = (*map)[cur];
}
if (!nodes_to_root.empty()) {
nodes_to_root.pop_back();
for (const string& node : nodes_to_root) {
(*map)[node] = cur;
}
}
return cur;
}
void MergeColocationGroup(std::unordered_map<string, string>* map,
const string& left, const string& right) {
if (map->find(left) == map->end() || map->find(right) == map->end()) {
return;
}
if (left != right) {
map->at(right) = left;
}
}
}
void ReassignColocation(GraphDef* graph) {
constexpr char kClassAttr[] = "_class";
constexpr char kColocPrefix[] = "loc:@";
std::unordered_map<string, string> coloc_groups;
NodeMap node_map(graph);
for (const auto& node : graph->node()) {
auto iter = node.attr().find(kClassAttr);
if (iter != node.attr().end() && iter->second.has_list()) {
for (const auto& str : iter->second.list().s()) {
size_t pos = str.find(kColocPrefix);
if (pos == 0) {
string colocate_node = str.substr(pos + strlen(kColocPrefix));
MergeColocationGroup(
&coloc_groups, GetColocationGroupRoot(&coloc_groups, node.name()),
GetColocationGroupRoot(&coloc_groups, colocate_node));
}
}
}
}
for (const auto& pair : coloc_groups) {
if (pair.first != pair.second) {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
AttrValue new_value;
new_value.mutable_list()->add_s(
kColocPrefix + GetColocationGroupRoot(&coloc_groups, pair.first));
node->mutable_attr()->erase(kClassAttr);
node->mutable_attr()->insert({kClassAttr, new_value});
}
} else {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
node->mutable_attr()->erase(kClassAttr);
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/colocation.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class ColocationTest : public ::testing::Test {};
bool VerifyNodeHasColocation(const NodeDef& ndef, const string& coloc) {
if (ndef.attr().empty()) {
return false;
}
if (ndef.attr().find("_class") == ndef.attr().end()) {
return false;
}
return ndef.attr().at("_class").list().s(0) == coloc;
}
TEST(ColocationTest, ReassignColocation_SingleNode) {
NodeDef ndef;
const Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@B"}).Finalize(&ndef);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef({ndef});
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(1, gdef.node(0).attr_size());
ReassignColocation(&gdef);
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(0, gdef.node(0).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_SingleGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
GraphDef gdef =
test::function::GDef({ndef_a, ndef_b, ndef_c, ndef_d, ndef_e});
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
ReassignColocation(&gdef);
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_MultiGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("U", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_u);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("V", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_v);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef(
{ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v});
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@W"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(6), "loc:@W"));
ReassignColocation(&gdef);
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@V"));
EXPECT_EQ(0, gdef.node(6).attr_size());
}
}
} |
1,356 | cpp | tensorflow/tensorflow | canonicalizer | tensorflow/core/grappler/utils/canonicalizer.cc | tensorflow/core/grappler/utils/canonicalizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_CANONICALIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_CANONICALIZER_H_
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
void CanonicalizeNode(NodeDef* node);
void CanonicalizeGraph(GraphDef* graph);
void CompressConstants(GraphDef* graph);
}
}
#endif
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include <algorithm>
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
void CanonicalizeNode(NodeDef* node) {
if (node->input_size() < 2) return;
int index = 0;
for (; index < node->input_size(); ++index) {
if (IsControlInput(node->input(index))) {
break;
}
}
auto* input = node->mutable_input();
if (IsCommutative(*node) && index > 0) {
std::sort(input->begin(), input->begin() + index);
}
if (index < node->input_size()) {
std::sort(input->begin() + index, input->end());
input->erase(std::unique(input->begin() + index, input->end()),
input->end());
}
}
void CanonicalizeGraph(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
CanonicalizeNode(graph->mutable_node(i));
}
}
void CompressConstants(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if ((IsConstant(*node) || IsHostConstant(*node)) &&
HasNodeAttr(*node, "value")) {
AttrValue& attr_val = (*node->mutable_attr())["value"];
if (attr_val.has_tensor()) {
tensor::CompressTensorProtoInPlace(attr_val.mutable_tensor());
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeNode(const string& op) {
NodeDef node;
node.set_name("node");
node.set_op(op);
*node.add_input() = "b";
*node.add_input() = "a";
*node.add_input() = "^z";
*node.add_input() = "^y";
*node.add_input() = "^x";
*node.add_input() = "^z";
return node;
}
void Verify(const NodeDef& node) {
EXPECT_EQ(node.name(), "node");
ASSERT_EQ(node.input_size(), 5);
if (node.op() == "Div") {
EXPECT_EQ(node.input(0), "b");
EXPECT_EQ(node.input(1), "a");
} else {
EXPECT_EQ(node.input(0), "a");
EXPECT_EQ(node.input(1), "b");
}
EXPECT_EQ(node.input(2), "^x");
EXPECT_EQ(node.input(3), "^y");
EXPECT_EQ(node.input(4), "^z");
}
TEST(CanonicalizeNode, NonCommutative) {
NodeDef node = MakeNode("Div");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeNode, Commutative) {
NodeDef node = MakeNode("Mul");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeGraph, Simple) {
GraphDef graph;
*graph.add_node() = MakeNode("Div");
*graph.add_node() = MakeNode("Mul");
CanonicalizeGraph(&graph);
for (auto node : graph.node()) {
Verify(node);
}
}
}
}
} |
1,357 | cpp | tensorflow/tensorflow | transitive_fanin | tensorflow/core/grappler/utils/transitive_fanin.cc | tensorflow/core/grappler/utils/transitive_fanin_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_TRANSITIVE_FANIN_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_TRANSITIVE_FANIN_H_
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
Status ComputeTransitiveFanin(
const GraphDef& graph, const std::vector<string>& terminal_nodes,
std::unordered_map<string, const NodeDef*>* name_to_fanin_node,
std::vector<const NodeDef*>* fanin_nodes);
Status ComputeTransitiveFanin(const GraphDef& graph,
const std::vector<string>& terminal_nodes,
std::vector<const NodeDef*>* fanin_nodes);
Status SetTransitiveFaninGraph(const GraphDef& input_graph,
GraphDef* output_graph,
const std::vector<string>& terminal_nodes);
}
}
#endif
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <queue>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace grappler {
Status ComputeTransitiveFanin(
const GraphDef& graph, const std::vector<string>& terminal_nodes,
std::unordered_map<string, const NodeDef*>* name_to_fanin_node,
std::vector<const NodeDef*>* fanin_nodes) {
std::unordered_map<string, const NodeDef*> name_to_node;
std::unordered_map<string, const NodeDef*> name_to_send;
for (const auto& node : graph.node()) {
name_to_node[node.name()] = &node;
if (node.op() == "_Send") {
const auto& attr = node.attr();
name_to_send[attr.at("tensor_name").s()] = &node;
}
}
std::vector<const NodeDef*> queue;
for (const string& root : terminal_nodes) {
const NodeDef* node = name_to_node[NodeName(root)];
if (!node) {
return errors::InvalidArgument("Graph does not contain terminal node ",
root, ".");
}
queue.push_back(node);
}
std::unordered_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.back();
queue.pop_back();
if (!visited.insert(node).second) {
continue;
}
fanin_nodes->push_back(node);
if (name_to_fanin_node) {
name_to_fanin_node->insert(
std::pair<string, const NodeDef*>(node->name(), node));
}
for (const string& input : node->input()) {
const NodeDef* in = name_to_node[NodeName(input)];
if (!in) {
return errors::InvalidArgument("Graph does not contain input ",
NodeName(input), " of node ",
node->name(), ".");
}
queue.push_back(in);
}
if (node->op() == "_Recv") {
const auto& attr = node->attr();
const NodeDef* send = name_to_send[attr.at("tensor_name").s()];
if (send) {
queue.push_back(send);
}
}
}
return absl::OkStatus();
}
Status ComputeTransitiveFanin(const GraphDef& graph,
const std::vector<string>& terminal_nodes,
std::vector<const NodeDef*>* fanin_nodes) {
return ComputeTransitiveFanin(graph, terminal_nodes, nullptr, fanin_nodes);
}
Status SetTransitiveFaninGraph(const GraphDef& input_graph,
GraphDef* output_graph,
const std::vector<string>& terminal_nodes) {
std::vector<const NodeDef*> keep;
TF_RETURN_IF_ERROR(
ComputeTransitiveFanin(input_graph, terminal_nodes, &keep));
output_graph->mutable_node()->Reserve(keep.size());
for (int i = keep.size() - 1; i >= 0; --i) {
*output_graph->add_node() = *keep[i];
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class TransitiveFaninTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TransitiveFaninTest, NoPruning) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromSingleTerminalNode) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_FALSE(node_map.NodeExists("5"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromMultipleTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"2"}},
{"6", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_TRUE(node_map.NodeExists("5"));
ASSERT_FALSE(node_map.NodeExists("6"));
}
TEST_F(TransitiveFaninTest, InvalidGraphOrTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"6"}},
{"7", {"8"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
auto s = SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain input 6 of node 5.");
const std::vector<string> invalid_terminal_nodes = {"0", "1", "5"};
s = SetTransitiveFaninGraph(graph, &output_graph, invalid_terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain terminal node 0.");
}
}
}
} |
1,358 | cpp | tensorflow/tensorflow | symbolic_shapes | tensorflow/core/grappler/utils/symbolic_shapes.cc | tensorflow/core/grappler/utils/symbolic_shapes_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_SYMBOLIC_SHAPES_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_SYMBOLIC_SHAPES_H_
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
bool IsKnown(const TensorShapeProto::Dim& dim);
bool IsKnownSymbolically(const TensorShapeProto::Dim& dim);
bool IsUnknown(const TensorShapeProto::Dim& dim);
bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape);
bool ShapeIsSymbolicallyDefined(const OpInfo::TensorProperties& properties);
int Rank(const TensorShapeProto& shape);
int64_t NumCoefficients(const TensorShapeProto& shape);
bool ShapesSymbolicallyEqual(const TensorShapeProto& left,
const TensorShapeProto& right);
bool ShapesSymbolicallyEqual(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right);
bool ShapesBroadcastable(const TensorShapeProto& left,
const TensorShapeProto& right);
bool ShapesBroadcastable(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right);
bool ShapeAfterBroadcast(const TensorShapeProto& left,
const TensorShapeProto& right,
TensorShapeProto* output_shape);
bool CompareSymbolicallyShapedTensorSizes(const TensorShapeProto& left,
const TensorShapeProto& right);
bool CompareSymbolicallyShapedTensorSizes(
const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right);
int64_t ComputeSizeRatio(const TensorShapeProto& numerator,
const TensorShapeProto& denominator);
}
}
#endif
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include <unordered_map>
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace grappler {
namespace {
BCast::Vec ShapeDims(const TensorShapeProto& shape) {
BCast::Vec dims;
dims.reserve(shape.dim_size());
for (int i = 0; i < shape.dim_size(); ++i)
dims.push_back(shape.dim(i).size());
return dims;
}
}
bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; }
bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) {
return dim.size() <= -2;
}
bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; }
bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape) {
return !shape.unknown_rank() &&
std::all_of(
shape.dim().begin(), shape.dim().end(),
[](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); });
}
bool ShapeIsSymbolicallyDefined(const OpInfo::TensorProperties& properties) {
return ShapeIsSymbolicallyDefined(properties.shape());
}
int Rank(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
return shape.dim_size();
}
int64_t NumCoefficients(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
int64_t num_coefficients = 1;
for (const auto& dim : shape.dim()) {
if (dim.size() < 0) {
return -1;
}
num_coefficients *= dim.size();
}
return num_coefficients;
}
bool ShapesSymbolicallyEqual(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank() ||
left.dim_size() != right.dim_size()) {
return false;
}
for (int i = 0; i < left.dim_size(); ++i) {
const auto& ldim = left.dim(i);
const auto& rdim = right.dim(i);
if (IsUnknown(ldim) || IsUnknown(rdim) || ldim.size() != rdim.size()) {
return false;
}
}
return true;
}
bool ShapesSymbolicallyEqual(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesSymbolicallyEqual(left.shape(), right.shape());
}
bool ShapesBroadcastable(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
return bcast.IsValid();
}
bool ShapesBroadcastable(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesBroadcastable(left.shape(), right.shape());
}
bool ShapeAfterBroadcast(const TensorShapeProto& left,
const TensorShapeProto& right,
TensorShapeProto* output_shape) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
if (!bcast.IsValid()) {
return false;
}
output_shape->set_unknown_rank(false);
output_shape->clear_dim();
for (const auto& dim : bcast.output_shape()) {
output_shape->add_dim()->set_size(dim);
}
return true;
}
bool CompareSymbolicallyShapedTensorSizes(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank()) {
return false;
}
int64_t left_defined_size = 1;
int64_t right_defined_size = 1;
std::unordered_map<int64_t, int64_t> left_unknown_dims;
std::unordered_map<int64_t, int64_t> right_unknown_dims;
int64_t unknown_dim_id = 1;
auto process_dimensions =
[&unknown_dim_id](const TensorShapeProto& shape, int64* defined_size,
std::unordered_map<int64, int64>* unknown_dims) {
for (int i = 0; i < shape.dim_size(); ++i) {
const auto& dim = shape.dim(i);
int64_t dim_size = dim.size();
if (dim_size > 0) {
*defined_size *= dim_size;
} else if (IsUnknown(dim)) {
++(*unknown_dims)[unknown_dim_id++];
} else if (IsKnownSymbolically(dim)) {
++(*unknown_dims)[dim_size];
}
}
};
process_dimensions(left, &left_defined_size, &left_unknown_dims);
process_dimensions(right, &right_defined_size, &right_unknown_dims);
std::set<int64_t> unknown_dims;
for (const auto& el : left_unknown_dims) unknown_dims.insert(el.first);
for (const auto& el : right_unknown_dims) unknown_dims.insert(el.first);
for (int64_t unknown_dim : unknown_dims) {
int64_t co_occurrence = std::min(left_unknown_dims[unknown_dim],
right_unknown_dims[unknown_dim]);
left_unknown_dims[unknown_dim] -= co_occurrence;
right_unknown_dims[unknown_dim] -= co_occurrence;
}
int64_t left_unbalanced_unknown_dims = 0;
int64_t right_unbalanced_unknown_dims = 0;
for (const auto& el : left_unknown_dims)
left_unbalanced_unknown_dims += el.second;
for (const auto& el : right_unknown_dims)
right_unbalanced_unknown_dims += el.second;
if (left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims == 0) {
return left_defined_size < right_defined_size;
}
if (left_defined_size <= right_defined_size &&
left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims > 0) {
return true;
}
return false;
}
bool CompareSymbolicallyShapedTensorSizes(
const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return CompareSymbolicallyShapedTensorSizes(left.shape(), right.shape());
}
int64_t ComputeSizeRatio(const TensorShapeProto& numerator,
const TensorShapeProto& denominator) {
if (numerator.unknown_rank() || denominator.unknown_rank()) {
return -1;
}
std::multiset<int> symbolic_dims;
int64_t num = 1;
for (const auto& dim : numerator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
symbolic_dims.insert(dim.size());
} else {
num *= dim.size();
}
}
int64_t denom = 1;
for (const auto& dim : denominator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
auto it = symbolic_dims.find(dim.size());
if (it == symbolic_dims.end()) {
return -1;
}
symbolic_dims.erase(it);
} else {
denom *= dim.size();
}
}
if (denom == 0) {
return -1;
}
if (!symbolic_dims.empty()) {
return -1;
}
return num / denom;
}
}
} | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SymbolicShapesTest : public ::testing::Test {
protected:
TensorShapeProto MakeUnknown() {
TensorShapeProto shape;
shape.set_unknown_rank(true);
return shape;
}
TensorShapeProto MakeShape(std::vector<int> dims) {
TensorShapeProto shape;
for (int dim_size : dims) {
TensorShapeProto::Dim dim;
dim.set_size(dim_size);
*shape.add_dim() = dim;
}
return shape;
}
};
bool operator<(const TensorShapeProto& lhs, const TensorShapeProto& rhs) {
return CompareSymbolicallyShapedTensorSizes(lhs, rhs);
}
TEST_F(SymbolicShapesTest, ShapeIsSymbolicallyDefined) {
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeUnknown()));
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeShape({-1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesSymbolicallyEqual) {
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesBroadcastable) {
EXPECT_FALSE(ShapesBroadcastable(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2}), MakeShape({1, -3})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 4}), MakeShape({-2, 8})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-2, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 32}), MakeShape({-2, 1})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -3})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-3}), MakeShape({-2, -3})));
TensorShapeProto output_shape;
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({1, 2}), MakeShape({1, 2}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 2}), MakeShape({-2, 2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 32}), MakeShape({-2, 1}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 32}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -3}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({-3}), MakeShape({-2, -3}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
}
TEST_F(SymbolicShapesTest, CompareSymbolicallyShapedTensorSizes) {
EXPECT_TRUE(MakeShape({1, 1, 32}) < MakeShape({32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({2048}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-1, 32, 32}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, -2, 32}));
EXPECT_FALSE(MakeShape({1, -2, 32}) < MakeShape({-3, 32, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({1, -1, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({-1, -1, 32}));
EXPECT_FALSE(MakeShape({-1, -1, 32}) < MakeShape({1, -1, 32}));
}
TEST_F(SymbolicShapesTest, RankAndNumCoeff) {
EXPECT_EQ(2, Rank(MakeShape({32, 32})));
EXPECT_EQ(32 * 32, NumCoefficients(MakeShape({32, 32})));
EXPECT_EQ(2, Rank(MakeShape({-2, 32})));
EXPECT_EQ(-1, NumCoefficients(MakeShape({-2, 32})));
TensorShapeProto shape;
shape.set_unknown_rank(true);
EXPECT_EQ(-1, Rank(shape));
EXPECT_EQ(-1, NumCoefficients(shape));
}
TEST_F(SymbolicShapesTest, SizeRatio) {
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({32, 32}), MakeShape({32, 2})));
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, 2})));
EXPECT_EQ(16,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, 2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, 2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, -2}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, -2})));
EXPECT_EQ(1, ComputeSizeRatio(MakeShape({-2, -3}), MakeShape({-3, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 0})));
}
}
}
} |
1,359 | cpp | tensorflow/tensorflow | traversal | tensorflow/core/grappler/utils/traversal.cc | tensorflow/core/grappler/utils/traversal_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_TRAVERSAL_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_TRAVERSAL_H_
#include <functional>
#include "tensorflow/core/grappler/graph_topology_view.h"
namespace tensorflow {
namespace grappler {
enum class TraversalDirection { kFollowInputs, kFollowOutputs };
struct DfsCallbacks {
DfsCallbacks() = default;
DfsCallbacks(std::function<void(const NodeDef*)> pre,
std::function<void(const NodeDef*)> post,
std::function<void(const NodeDef*, const NodeDef*)> back_edge)
: pre_order(std::move(pre)),
post_order(std::move(post)),
on_back_edge(std::move(back_edge)) {}
static DfsCallbacks PreOrder(std::function<void(const NodeDef*)> pre) {
return DfsCallbacks(std::move(pre), nullptr, nullptr);
}
static DfsCallbacks PostOrder(std::function<void(const NodeDef*)> post) {
return DfsCallbacks(nullptr, std::move(post), nullptr);
}
std::function<void(const NodeDef*)> pre_order;
std::function<void(const NodeDef*)> post_order;
std::function<void(const NodeDef*, const NodeDef*)> on_back_edge;
};
struct DfsPredicates {
DfsPredicates() = default;
DfsPredicates(std::function<bool(const NodeDef*)> enter,
std::function<bool(const NodeDef*)> advance)
: enter(std::move(enter)), advance(std::move(advance)) {}
static DfsPredicates Enter(std::function<bool(const NodeDef*)> enter) {
return DfsPredicates(std::move(enter), nullptr);
}
static DfsPredicates Advance(std::function<bool(const NodeDef*)> advance) {
return DfsPredicates(nullptr, std::move(advance));
}
std::function<bool(const NodeDef*)> enter;
std::function<bool(const NodeDef*)> advance;
};
void DfsTraversal(const GraphTopologyView& graph_view,
absl::Span<const NodeDef* const> from,
TraversalDirection direction, const DfsPredicates& predicates,
const DfsCallbacks& callbacks);
void DfsTraversal(const GraphTopologyView& graph_view,
absl::Span<const NodeDef* const> from,
TraversalDirection direction, const DfsCallbacks& callbacks);
}
}
#endif
#include "tensorflow/core/grappler/utils/traversal.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
namespace tensorflow {
namespace grappler {
namespace {
struct DfsStackElem {
DfsStackElem(int node, bool children_visited, int src)
: node(node), children_visited(children_visited), src(src) {}
explicit DfsStackElem(int node) : DfsStackElem(node, false, -1) {}
int node;
bool children_visited;
int src;
};
enum class NodeState { kNotVisited, kVisiting, kDone };
}
void DfsTraversal(const GraphTopologyView& graph_view,
const absl::Span<const NodeDef* const> from,
const TraversalDirection direction,
const DfsPredicates& predicates,
const DfsCallbacks& callbacks) {
std::vector<DfsStackElem> stack;
stack.reserve(from.size());
for (const NodeDef* node : from) {
const absl::optional<int> node_idx = graph_view.GetNodeIndex(*node);
DCHECK(node_idx.has_value()) << "Illegal start node: " << node->name();
if (node_idx.has_value()) {
stack.emplace_back(node_idx.value());
}
}
absl::flat_hash_map<int, NodeState> node_state;
while (!stack.empty()) {
DfsStackElem w = stack.back();
stack.pop_back();
NodeState& state = node_state[w.node];
if (state == NodeState::kDone) continue;
if (predicates.enter && !predicates.enter(graph_view.GetNode(w.node))) {
state = NodeState::kDone;
continue;
}
if (w.children_visited) {
state = NodeState::kDone;
if (callbacks.post_order) {
callbacks.post_order(graph_view.GetNode(w.node));
}
continue;
}
if (state == NodeState::kVisiting) {
if (callbacks.on_back_edge) {
callbacks.on_back_edge(graph_view.GetNode(w.src),
graph_view.GetNode(w.node));
}
continue;
}
state = NodeState::kVisiting;
if (callbacks.pre_order) {
callbacks.pre_order(graph_view.GetNode(w.node));
}
stack.emplace_back(w.node, true, w.src);
if (predicates.advance && !predicates.advance(graph_view.GetNode(w.node))) {
continue;
}
if (direction == TraversalDirection::kFollowInputs) {
for (const int fanin : graph_view.GetFanin(w.node)) {
stack.emplace_back(fanin, false, w.node);
}
} else {
for (const int fanout : graph_view.GetFanout(w.node)) {
stack.emplace_back(fanout, false, w.node);
}
}
}
}
void DfsTraversal(const GraphTopologyView& graph_view,
const absl::Span<const NodeDef* const> from,
TraversalDirection direction, const DfsCallbacks& callbacks) {
DfsTraversal(graph_view, from, direction, {}, callbacks);
}
}
} | #include "tensorflow/core/grappler/utils/traversal.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::NDef;
DfsCallbacks MkCallbacks(std::vector<string>* pre_order,
std::vector<string>* post_order,
std::vector<string>* back_edges) {
return {[pre_order](const NodeDef* n) { pre_order->push_back(n->name()); },
[post_order](const NodeDef* n) { post_order->push_back(n->name()); },
[back_edges](const NodeDef* src, const NodeDef* dst) {
back_edges->push_back(absl::StrCat(src->name(), "->", dst->name()));
}};
}
TEST(TraversalTest, OutputsDfsNoLoop) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", op, {"5"}, {}),
NDef("0", op, {"5", "4"}, {}),
NDef("1", op, {"4", "3"}, {}),
NDef("3", op, {"2"}, {}),
NDef("5", op, {}, {}),
NDef("4", op, {}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(4), &graph.node(5)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"4", "1", "0", "5", "2", "3"};
const std::vector<string> expected_post = {"1", "0", "4", "3", "2", "5"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, InputsDfsNoLoop) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", op, {"5"}, {}),
NDef("0", op, {"5", "4"}, {}),
NDef("1", op, {"4", "3"}, {}),
NDef("3", op, {"2"}, {}),
NDef("5", op, {}, {}),
NDef("4", op, {}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(1), &graph.node(2)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowInputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "3", "2", "5", "0"};
const std::vector<string> expected_post = {"4", "5", "2", "3", "1", "0"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, InputsDfsWithLoop) {
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", "Merge", {"1", "5"}, {}),
NDef("3", "Switch", {"2"}, {}),
NDef("4", "Identity", {"3"}, {}),
NDef("5", "NextIteration", {"4"}, {}),
NDef("1", "Enter", {}, {}),
NDef("6", "Exit", {"3"}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(5)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowInputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"6", "3", "2", "1", "5", "4"};
const std::vector<string> expected_post = {"1", "4", "5", "2", "3", "6"};
const std::vector<string> expected_edges = {"4->3"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_EQ(back_edges, expected_edges);
}
TEST(TraversalTest, OutputDfsWithLoop) {
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", "Merge", {"1", "5"}, {}),
NDef("3", "Switch", {"2"}, {}),
NDef("4", "Identity", {"3"}, {}),
NDef("5", "NextIteration", {"4"}, {}),
NDef("1", "Enter", {}, {}),
NDef("6", "Exit", {"3"}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"2", "3", "6", "4", "5"};
const std::vector<string> expected_post = {"6", "5", "4", "3", "2"};
const std::vector<string> expected_edges = {"5->2"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_EQ(back_edges, expected_edges);
}
TEST(TraversalTest, DfsWithEnterPredicate) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("1", op, {}, {}),
NDef("2", op, {"1"}, {}),
NDef("3", op, {"2"}, {}),
NDef("4", op, {"1"}, {}),
NDef("5", op, {"4"}, {}),
NDef("6", op, {"3", "5"}, {})},
{});
const auto enter = [](const NodeDef* node) {
return node->name() != "2" && node->name() != "3";
};
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
DfsPredicates::Enter(enter),
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "5", "6"};
const std::vector<string> expected_post = {"6", "5", "4", "1"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, DfsWithAdvancePredicate) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("1", op, {}, {}),
NDef("2", op, {"1"}, {}),
NDef("3", op, {"2"}, {}),
NDef("4", op, {"1"}, {}),
NDef("5", op, {"4"}, {}),
NDef("6", op, {"3", "5"}, {})},
{} );
const auto advance = [](const NodeDef* node) {
return node->name() != "2" && node->name() != "3";
};
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
DfsPredicates::Advance(advance),
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "5", "6", "2"};
const std::vector<string> expected_post = {"6", "5", "4", "2", "1"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
}
}
} |
1,360 | cpp | tensorflow/tensorflow | pattern_utils | tensorflow/core/grappler/utils/pattern_utils.cc | tensorflow/core/grappler/utils/pattern_utils_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_PATTERN_UTILS_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_PATTERN_UTILS_H_
#include "tensorflow/core/grappler/utils/graph_view.h"
namespace tensorflow {
namespace grappler {
namespace utils {
enum class MatchingDirection { kFollowInputs, kFollowOutputs };
enum class NodeStatus { kRemain, kRemove, kReplace };
struct OpTypePattern {
string op;
string label;
NodeStatus node_status;
std::vector<OpTypePattern> children;
string DebugString() const {
string result = "{(op: " + op + ", " + "label: " + label + "), {";
for (const OpTypePattern& child : children) {
result += child.DebugString() + ",";
}
result += "}}";
return result;
}
};
struct NodeViewMatch {
MutableNodeView* node_view = nullptr;
std::vector<NodeViewMatch> children;
string DebugString() const {
string result = "{";
if (node_view == nullptr) {
result += "Non-Matched-Node}";
return result;
} else {
result += node_view->node()->DebugString();
result += ", {";
for (const NodeViewMatch& child : children) {
result += child.DebugString() + ",";
}
result += "}}";
return result;
}
}
void Clear() {
for (NodeViewMatch& child : children) {
child.Clear();
}
children.clear();
if (node_view != nullptr) {
node_view = nullptr;
}
}
};
template <MatchingDirection DIRECTION = MatchingDirection::kFollowInputs>
class SubGraphMatcher {
public:
SubGraphMatcher(MutableGraphView* graph_view) : graph_view_(graph_view){};
bool GetMatchedNodes(const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices);
private:
MutableGraphView* graph_view_;
std::map<string, int> node_label_to_index_;
std::set<int> matched_node_indices_;
std::set<int> remove_node_indices_;
std::unique_ptr<NodeViewMatch> match_ = nullptr;
bool DoesOpTypePatternMatch(const OpTypePattern& pattern,
MutableNodeView* node_view, NodeViewMatch* match);
bool IsSafeNodesToRemove(
const std::unordered_set<string>& nodes_to_preserve) {
for (const auto& node_idx : remove_node_indices_) {
auto node_view = graph_view_->GetNode(node_idx);
string node_name = node_view->GetName();
if (nodes_to_preserve.count(node_name) > 0) return false;
auto fanouts_by_ports = node_view->GetRegularFanouts();
for (const auto& fanouts : fanouts_by_ports) {
for (const auto& fanout : fanouts) {
if (!matched_node_indices_.count(fanout.node_index())) {
return false;
}
}
}
}
return true;
}
};
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match);
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::GetMatchedNodes(
const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices);
}
}
}
#endif
#include "tensorflow/core/grappler/utils/pattern_utils.h"
#include <algorithm>
#include <memory>
#include "absl/container/flat_hash_set.h"
namespace tensorflow {
namespace grappler {
namespace utils {
const bool IsCommutativeOp(const string& op) {
std::vector<string> op_list = str_util::Split(op, '|');
static const auto* commutative_ops = new absl::flat_hash_set<string>(
{"Add", "AddV2", "Mul", "Maximum", "SquaredDifference"});
for (const string& op_ : op_list) {
if (commutative_ops->contains(op_)) return true;
}
return false;
}
bool IsSame(string op1, string op2) {
if (op1 == "*") return true;
std::vector<string> op1_list = str_util::Split(op1, '|');
for (const string& op_1 : op1_list) {
if (op_1 == op2) return true;
}
return false;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match) {
if ((node_view->NumControllingFanins() > 0 &&
pattern.node_status != NodeStatus::kRemain) ||
(node_view->NumControlledFanouts() > 0 &&
pattern.node_status == NodeStatus::kRemove))
return false;
bool op_type_matched = false;
if (pattern.op == "*") {
op_type_matched = true;
} else {
std::vector<string> op_list = str_util::Split(pattern.op, '|');
for (const string& op : op_list) {
if (node_view->node()->op() == op) {
op_type_matched = true;
break;
}
}
}
if (op_type_matched) {
if (node_label_to_index_.find(pattern.label) ==
node_label_to_index_.end()) {
node_label_to_index_[pattern.label] = node_view->node_index();
matched_node_indices_.insert(node_view->node_index());
if (pattern.node_status == NodeStatus::kRemove) {
remove_node_indices_.insert(node_view->node_index());
}
} else if (node_label_to_index_[pattern.label] != node_view->node_index()) {
return false;
} else {
DCHECK(node_label_to_index_[pattern.label] == node_view->node_index());
}
} else {
return false;
}
match->node_view = node_view;
if (!pattern.children.empty()) {
auto graph_children = node_view->GetRegularFanins();
int num_children = graph_children.size();
if (num_children != pattern.children.size()) {
return false;
} else {
std::vector<int> pattern_child_indices(num_children);
std::iota(pattern_child_indices.begin(), pattern_child_indices.end(), 0);
string op_name = pattern.op;
if (IsCommutativeOp(op_name) && num_children == 2) {
MutableNodeView* graph_child0_node_view =
graph_view_->GetNode(graph_children[0].node_index());
MutableNodeView* graph_child1_node_view =
graph_view_->GetNode(graph_children[1].node_index());
if ((!IsSame(pattern.children[0].op, graph_child0_node_view->GetOp()) &&
IsSame(pattern.children[1].op, graph_child0_node_view->GetOp())) ||
(!IsSame(pattern.children[1].op, graph_child1_node_view->GetOp()) &&
IsSame(pattern.children[0].op, graph_child1_node_view->GetOp())))
std::swap(pattern_child_indices[0], pattern_child_indices[1]);
}
for (int i = 0; i < num_children; ++i) {
auto child_node_index = graph_children[i].node_index();
MutableNodeView* child_node_view =
graph_view_->GetNode(child_node_index);
const OpTypePattern& child_pattern =
pattern.children[pattern_child_indices[i]];
match->children.push_back(NodeViewMatch());
NodeViewMatch* child_match = &(match->children.back());
if (!DoesOpTypePatternMatch(child_pattern, child_node_view,
child_match)) {
return false;
}
}
}
}
return true;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::GetMatchedNodes(
const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
bool found_match = false;
match_ = std::make_unique<NodeViewMatch>();
if (DoesOpTypePatternMatch(pattern, node_view, match_.get())) {
if (IsSafeNodesToRemove(nodes_to_preserve)) {
found_match = true;
*matched_nodes_map = this->node_label_to_index_;
*remove_node_indices = this->remove_node_indices_;
}
} else {
found_match = false;
}
match_->Clear();
match_.reset(nullptr);
matched_node_indices_.clear();
node_label_to_index_.clear();
remove_node_indices_.clear();
return found_match;
}
}
}
} | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::ops::Placeholder;
void GetMatMulBiasAddGeluGraph(GraphDef* graph,
bool add_external_dependent = false) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32});
auto weight_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto weight = Placeholder(s.WithOpName("weight"), DT_FLOAT, weight_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), input, weight);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
if (add_external_dependent) {
auto external_dependent =
ops::Identity(s.WithOpName("external_dependent"), bias_add);
}
auto one_over_square_root_two =
ops::Const(s.WithOpName("one_over_square_root_two"), {0.707f}, {});
auto bias_add_times_const = ops::Mul(s.WithOpName("bias_add_times_const"),
bias_add, one_over_square_root_two);
auto erf = ops::Erf(s.WithOpName("erf"), bias_add_times_const);
auto one = ops::Const(s.WithOpName("one"), {1.0f}, {});
auto erf_plus_one = ops::AddV2(s.WithOpName("erf_plus_one"), erf, one);
auto one_half = ops::Const(s.WithOpName("one_half"), {0.5f}, {});
auto one_half_times_erf_plus_one = ops::Mul(
s.WithOpName("one_half_times_erf_plus_one"), one_half, erf_plus_one);
auto gelu =
ops::Mul(s.WithOpName("gelu"), one_half_times_erf_plus_one, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), gelu);
TF_ASSERT_OK(s.ToGraphDef(graph));
}
OpTypePattern GetMatMulBiasAddGeluPattern() {
OpTypePattern pattern_syntax{"Mul", "my_gelu", NodeStatus::kReplace,
{
{"Mul", "my_one_half_times_erf_plus_one", NodeStatus::kRemove,
{
{"Const", "my_one_half", NodeStatus::kRemain},
{"AddV2", "my_erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "my_erf", NodeStatus::kRemove,
{
{"Mul", "my_bias_add_times_const", NodeStatus::kRemove,
{
{"BiasAdd", "my_bias_add", NodeStatus::kRemove},
{"Const", "my_one_over_square_root_two", NodeStatus::kRemain}
}
}
}
},
{"Const", "my_one", NodeStatus::kRemain}
}
}
}
},
{"BiasAdd", "my_bias_add", NodeStatus::kRemove,
{
{"MatMul", "my_matmul", NodeStatus::kRemove},
{"*", "my_bias", NodeStatus::kRemain}
}
}
}
};
return pattern_syntax;
}
class PatternMatcherTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(PatternMatcherTest, Tree) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove},
{"D", "my_d", NodeStatus::kRemove}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, DAG) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::unordered_set<string> nodes_to_preserve = {"foo"};
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
nodes_to_preserve.insert({"c", "d"});
matched_nodes_map.clear();
remove_node_indices.clear();
found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, DAGExternalDependent) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"f", "F", {"d"}},
{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGelu) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluExternalDependent) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph, true);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluMutation) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
int num_nodes_before = graph_view.NumNodes();
std::vector<string> remove_node_names;
for (auto const& node_idx : remove_node_indices) {
remove_node_names.push_back(graph_view.GetNode(node_idx)->GetName());
}
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef fused_node;
fused_node.set_name("gelu");
fused_node.set_op("_FusedMatMul");
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(0));
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(1));
fused_node.add_input(graph_view.GetNode("bias_add")->node()->input(1));
mutation->AddNode(std::move(fused_node), &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(mutation->Apply());
for (auto const& node_idx : remove_node_indices) {
mutation->RemoveNode(graph_view.GetNode(node_idx));
}
TF_EXPECT_OK(mutation->Apply());
int num_nodes_after = graph_view.NumNodes();
EXPECT_EQ(num_nodes_before - remove_node_indices.size(), num_nodes_after);
bool remove_nodes_deleted = true;
for (auto const& node_name : remove_node_names) {
if (graph_view.GetNode(node_name) != nullptr) {
remove_nodes_deleted = false;
break;
}
}
EXPECT_TRUE(remove_nodes_deleted);
bool replace_node_exist = graph_view.HasNode("gelu") ? true : false;
EXPECT_TRUE(replace_node_exist);
}
TEST_F(PatternMatcherTest, CommutativeInputs) {
::tensorflow::Status status;
std::vector<string> commutative_ops = {"Mul", "Add", "AddV2"};
for (string op : commutative_ops) {
for (bool should_swap : {false, true}) {
std::vector<string> commutative_operands =
(should_swap ? std::vector<string>{"d", "c"}
: std::vector<string>{"c", "d"});
GraphDef graph = CreateGraph({{"e", op, commutative_operands},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{op, "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map,
&remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
}
}
}
}
}
} |
1,361 | cpp | tensorflow/tensorflow | scc | tensorflow/core/grappler/utils/scc.cc | tensorflow/core/grappler/utils/scc_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_SCC_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_SCC_H_
#include <unordered_map>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/lib/io/path.h"
namespace tensorflow {
namespace grappler {
void StronglyConnectedComponents(
const GraphDef& graph, std::unordered_map<const NodeDef*, int>* components,
int* num_components);
int IdentifyLoops(const GraphDef& graph,
std::unordered_map<const NodeDef*, std::vector<int>>* loops);
}
}
#endif
#include "tensorflow/core/grappler/utils/scc.h"
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
struct SCCNodeData {
SCCNodeData()
: node(nullptr),
index(-1),
lowlink(-1),
onstack(false),
caller(nullptr),
caller_loop_location(-1) {}
void ResetStack(int new_index, SCCNodeData* new_caller) {
index = new_index;
lowlink = new_index;
onstack = true;
caller = new_caller;
caller_loop_location = 0;
}
const NodeDef* node;
int index;
int lowlink;
bool onstack;
std::vector<SCCNodeData*> children;
SCCNodeData* caller;
int caller_loop_location;
};
void StrongConnect(SCCNodeData* v, std::stack<SCCNodeData*>* stack, int* index,
std::unordered_map<const NodeDef*, int>* components,
int* scc_index) {
v->ResetStack(*index , nullptr );
++*index;
stack->push(v);
v->caller = nullptr;
v->caller_loop_location = 0;
SCCNodeData* last = v;
while (true) {
if (last->caller_loop_location < last->children.size()) {
SCCNodeData* w = last->children[last->caller_loop_location];
++(last->caller_loop_location);
if (w->index == -1) {
w->ResetStack(*index , last );
++*index;
stack->push(w);
last = w;
} else if (w->onstack == true) {
last->lowlink = std::min(last->lowlink, w->index);
}
} else {
if (last->lowlink == last->index) {
SCCNodeData* top;
while (true) {
top = stack->top();
stack->pop();
top->onstack = false;
(*components)[top->node] = *scc_index;
if (top == last) {
break;
}
}
++*scc_index;
}
SCCNodeData* next_last = last->caller;
if (next_last == nullptr) {
break;
} else {
next_last->lowlink = std::min(next_last->lowlink, last->lowlink);
last = next_last;
}
}
}
}
void StronglyConnectedComponents(
const GraphDef& graph, std::unordered_map<const NodeDef*, int>* components,
int* num_components) {
std::stack<SCCNodeData*> stack;
std::unordered_map<string, SCCNodeData*> name_to_data;
std::vector<SCCNodeData> node_data_container;
node_data_container.reserve(graph.node_size());
std::unordered_map<const NodeDef*, SCCNodeData*> node_to_data;
for (const NodeDef& node : graph.node()) {
SCCNodeData node_data;
node_data.node = &node;
node_data_container.push_back(node_data);
name_to_data[node.name()] = &(*node_data_container.rbegin());
node_to_data[&node] = &(*node_data_container.rbegin());
}
for (const NodeDef& node : graph.node()) {
for (const string& input : node.input()) {
auto it = name_to_data.find(NodeName(input));
if (it != name_to_data.end()) {
it->second->children.push_back(node_to_data[&node]);
}
}
}
components->clear();
*num_components = 0;
int index = 0;
for (auto& v : node_data_container) {
if (v.index == -1) {
StrongConnect(&v, &stack, &index, components, num_components);
}
}
std::vector<int> counts_per_component(*num_components, 0);
for (auto& component : *components) {
DCHECK(component.second >= 0);
DCHECK(component.second < *num_components);
counts_per_component[component.second]++;
}
bool has_single_element_component = false;
for (auto& component : *components) {
if (counts_per_component[component.second] == 1) {
component.second = -1;
(*num_components)--;
has_single_element_component = true;
}
}
if (has_single_element_component) {
(*num_components) += 1;
}
}
int IdentifyLoops(const GraphDef& graph,
std::unordered_map<const NodeDef*, std::vector<int>>* loops) {
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
if (num_components <= 1) {
if (!components.empty() && components.begin()->second == -1) {
return 0;
}
}
std::unordered_map<int, std::vector<const NodeDef*>> component_ids;
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
component_ids[id].push_back(it.first);
}
int loop_id = 0;
for (const auto& component : component_ids) {
const std::vector<const NodeDef*>& component_nodes = component.second;
std::vector<std::pair<NodeDef*, string>> next_iter_nodes;
GraphDef subgraph;
std::unordered_map<const NodeDef*, const NodeDef*> subgraph_mapping;
for (const auto& component_node : component_nodes) {
NodeDef* node = subgraph.add_node();
*node = *component_node;
subgraph_mapping[node] = component_node;
if (IsNextIteration(*node)) {
CHECK_EQ(1, node->input_size());
next_iter_nodes.emplace_back(node, node->input(0));
}
}
if (next_iter_nodes.size() == 1) {
for (const auto& component_node : component_nodes) {
(*loops)[component_node].push_back(loop_id);
}
++loop_id;
} else {
for (int i = 0; i < next_iter_nodes.size(); ++i) {
for (int j = 0; j < next_iter_nodes.size(); ++j) {
next_iter_nodes[j].first->clear_input();
if (i == j) {
*next_iter_nodes[j].first->add_input() = next_iter_nodes[j].second;
}
}
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(subgraph, &components, &num_components);
CHECK_GE(num_components, 1);
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
(*loops)[subgraph_mapping[it.first]].push_back(loop_id);
}
++loop_id;
}
}
}
return loop_id;
}
}
} | #include "tensorflow/core/grappler/utils/scc.h"
#include <memory>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SCCTest : public ::testing::Test {
public:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties unknown_device;
devices["MY_DEVICE"] = unknown_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override { cluster_.reset(); }
protected:
static NodeDef CreateNode(const string& name,
absl::Span<const string> inputs) {
NodeDef node;
node.set_name(name);
for (const string& input : inputs) {
node.add_input(input);
}
return node;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(SCCTest, NoLoops) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 1);
for (const auto& node : item.graph.node()) {
EXPECT_EQ(-1, components[&node]);
}
}
TEST_F(SCCTest, DisjointCycleAndPath) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"d"});
*graph.add_node() = CreateNode("b", {"a"});
*graph.add_node() = CreateNode("c", {"b"});
*graph.add_node() = CreateNode("d", {"c"});
*graph.add_node() = CreateNode("e", {});
*graph.add_node() = CreateNode("f", {"e"});
*graph.add_node() = CreateNode("g", {"f"});
*graph.add_node() = CreateNode("h", {"g"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& pair : {std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("a", "d")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& node : {"e", "f", "g", "h"})
EXPECT_EQ(-1, components[name_to_node[node]]);
}
}
TEST_F(SCCTest, WikipediaExample) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"c"});
*graph.add_node() = CreateNode("b", {"a", "d"});
*graph.add_node() = CreateNode("c", {"b", "d", "f"});
*graph.add_node() = CreateNode("d", {"e"});
*graph.add_node() = CreateNode("e", {"d"});
*graph.add_node() = CreateNode("f", {"e", "g"});
*graph.add_node() = CreateNode("g", {"f", "h"});
*graph.add_node() = CreateNode("h", {"h"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 4);
for (const auto& pair :
{std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("d", "e"), std::make_pair("f", "g")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& pair :
{std::make_pair("a", "d"), std::make_pair("a", "f"),
std::make_pair("a", "h"), std::make_pair("d", "f"),
std::make_pair("d", "h"), std::make_pair("f", "h")}) {
EXPECT_NE(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
}
TEST_F(SCCTest, TensorFlowLoop) {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& node : item.graph.node()) {
if (node.name() == "Const" || node.name() == "while/Enter" ||
node.name() == "while/Exit") {
EXPECT_EQ(-1, components[&node]);
} else {
EXPECT_LE(0, components[&node]);
}
}
}
TEST_F(SCCTest, NestedLoops) {
GrapplerItem item;
string filename = io::JoinPath(
testing::TensorFlowSrcRoot(),
"core/grappler/costs/graph_properties_testdata/nested_loop.pbtxt");
TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
for (const auto& node : item.graph.node()) {
std::cout << node.DebugString() << std::endl;
}
std::unordered_map<const NodeDef*, std::vector<int>> loops;
int num_loops = IdentifyLoops(item.graph, &loops);
EXPECT_EQ(4, num_loops);
for (const auto& node_info : loops) {
std::cout << node_info.first->name() << " [";
for (int i : node_info.second) {
std::cout << " " << i;
}
std::cout << "]" << std::endl;
}
}
}
} |
1,362 | cpp | tensorflow/tensorflow | virtual_placer | tensorflow/core/grappler/costs/virtual_placer.cc | tensorflow/core/grappler/costs/virtual_placer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_VIRTUAL_PLACER_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_VIRTUAL_PLACER_H_
#include <unordered_map>
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
class NodeDef;
namespace grappler {
class Cluster;
class VirtualPlacer {
public:
explicit VirtualPlacer(
const std::unordered_map<string, DeviceProperties>& devices);
const DeviceProperties& get_device(const NodeDef& node) const;
string get_canonical_device_name(const NodeDef& node) const;
private:
string to_lfqn_or_empty(const string& device_name) const;
std::unordered_map<string, DeviceProperties> devices_;
std::unordered_map<string, string> lfqn_map_;
string default_device_name_;
string default_job_name_lowercase_;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
VirtualPlacer::VirtualPlacer(
const std::unordered_map<string, DeviceProperties>& devices)
: devices_(devices),
default_job_name_lowercase_("localhost") {
lfqn_map_.reserve(devices_.size());
for (const auto& kv : devices_) {
const auto lfqn = to_lfqn_or_empty(kv.first);
if (lfqn.empty()) {
LOG(ERROR) << "VirtualPlacer couldn't parse device name from cluster: "
<< kv.first;
} else {
lfqn_map_[lfqn] = kv.first;
}
}
if (devices_.empty()) {
default_device_name_ = "UNKNOWN";
DeviceProperties& prop = devices_["UNKNOWN"];
prop.set_type("UNKNOWN");
} else if (devices_.size() == 1) {
default_device_name_ = devices_.begin()->first;
} else {
std::map<int, string> cpu_devices;
std::map<int, string> gpu_devices;
for (const auto& kv : lfqn_map_) {
const auto& lfqn = kv.first;
const auto& cluster_device_name = kv.second;
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(lfqn, &parsed_name);
if (parsed) {
const auto type = absl::AsciiStrToLower(parsed_name.type);
if (type == "gpu") {
gpu_devices[parsed_name.id] = cluster_device_name;
} else if (type == "cpu") {
cpu_devices[parsed_name.id] = cluster_device_name;
}
}
}
if (!gpu_devices.empty()) {
default_device_name_ = gpu_devices.begin()->second;
} else if (!cpu_devices.empty()) {
default_device_name_ = cpu_devices.begin()->second;
} else {
default_device_name_ = devices_.begin()->first;
}
}
VLOG(3) << "default device name: " << default_device_name_;
std::unordered_set<string> job_names_from_cluster;
for (const auto& device : lfqn_map_) {
const auto& lfqn = device.first;
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(lfqn, &parsed_name);
if (parsed && !parsed_name.job.empty()) {
job_names_from_cluster.insert(parsed_name.job);
if (job_names_from_cluster.size() > 1) {
break;
}
}
}
if (job_names_from_cluster.size() == 1) {
auto it = job_names_from_cluster.begin();
default_job_name_lowercase_ = *it;
}
VLOG(3) << "default job name: " << default_job_name_lowercase_;
}
const DeviceProperties& VirtualPlacer::get_device(const NodeDef& node) const {
string device = get_canonical_device_name(node);
VLOG(3) << "node.name=" << node.name() << " node.device=" << node.device()
<< " is placed on: " << device;
auto it = devices_.find(device);
DCHECK(it != devices_.end());
return it->second;
}
string VirtualPlacer::get_canonical_device_name(const NodeDef& node) const {
if (node.device().empty()) {
return default_device_name_;
}
const auto lfqn = to_lfqn_or_empty(node.device());
if (lfqn.empty()) {
return default_device_name_;
}
const auto it = lfqn_map_.find(lfqn);
if (it != lfqn_map_.end()) {
return it->second;
}
return default_device_name_;
}
string VirtualPlacer::to_lfqn_or_empty(const string& device_name) const {
DeviceNameUtils::ParsedName parsed_name;
const auto lowercase_name = absl::AsciiStrToLower(device_name);
bool parsed = DeviceNameUtils::ParseFullName(lowercase_name, &parsed_name);
if (!parsed) {
parsed = DeviceNameUtils::ParseLocalName(lowercase_name, &parsed_name);
parsed_name.job = "localhost";
}
if (!parsed) {
if (lowercase_name == "gpu" || lowercase_name == "cpu") {
parsed_name.job = "localhost";
parsed_name.type = lowercase_name;
parsed = true;
}
}
if (!parsed) {
return {};
}
if (parsed_name.job.empty()) {
parsed_name.job = default_job_name_lowercase_;
}
parsed_name.type = absl::AsciiStrToLower(parsed_name.type);
string lfqn = strings::StrCat(
"/job:", parsed_name.job, "/replica:", parsed_name.replica,
"/task:", parsed_name.task, "/device:", parsed_name.type, ":",
parsed_name.id);
return lfqn;
}
}
} | #include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
TEST(VirtualPlacerTest, LocalDevices) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, ShortNames) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/CPU:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/GPU:0", placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/CPU:0", placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/GPU:0", placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, PlacementOnNonDefaultDevice) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties tpu_device;
tpu_device.set_type("TPU");
devices["/job:localhost/replica:0/task:0/device:TPU:0"] = tpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("/device:TPU:0");
EXPECT_EQ("TPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:TPU:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, EmptyJobName) {
for (const string& job_name : {"localhost", "worker", "worker_train"}) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices[strings::StrCat("/job:", job_name, "/replica:0/task:0/cpu:0")] =
cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices[strings::StrCat("/job:", job_name,
"/replica:0/task:0/device:GPU:0")] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
node.set_device("/device:CPU:0");
EXPECT_EQ(strings::StrCat("/job:", job_name, "/replica:0/task:0/cpu:0"),
placer.get_canonical_device_name(node));
node.set_device("/device:GPU:0");
EXPECT_EQ(
strings::StrCat("/job:", job_name, "/replica:0/task:0/device:GPU:0"),
placer.get_canonical_device_name(node));
}
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:ps/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:worker/replica:0/task:0/cpu:0"] = cpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
node.set_device("/device:CPU:0");
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
}
string GetDefaultDeviceName(
const std::unordered_map<string, DeviceProperties>& devices) {
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
return placer.get_canonical_device_name(node);
}
TEST(VirtualPlacerTest, DefaultDevice) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:worker/replica:0/task:0/cpu:0"] = cpu_device;
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
GetDefaultDeviceName(devices));
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
for (int i = 0; i < 8; i++) {
devices[strings::StrCat("/job:worker/replica:0/task:0/gpu:", i)] =
gpu_device;
EXPECT_EQ("/job:worker/replica:0/task:0/gpu:0",
GetDefaultDeviceName(devices));
}
}
TEST(VirtualPlacerTest, MultiReplica) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
for (int i = 0; i < 8; i++) {
devices[strings::StrCat("/job:worker/replica:", i, "/task:0/cpu:0")] =
cpu_device;
for (int j = 0; j < 8; j++) {
devices[strings::StrCat("/job:worker/replica:", i, "/task:0/gpu:", j)] =
gpu_device;
}
}
std::unique_ptr<VirtualCluster> cluster(new VirtualCluster(devices));
std::unique_ptr<VirtualPlacer> placer(new VirtualPlacer(devices));
auto get_device_name = [&placer](const string& device) -> string {
NodeDef node;
node.set_op("Conv2D");
node.set_device(device);
return placer->get_canonical_device_name(node);
};
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
get_device_name("/replica:0/cpu:0"));
EXPECT_EQ("/job:worker/replica:2/task:0/cpu:0",
get_device_name("/replica:2/cpu:0"));
EXPECT_EQ("/job:worker/replica:7/task:0/cpu:0",
get_device_name("/replica:7/cpu:0"));
EXPECT_EQ("/job:worker/replica:3/task:0/gpu:0",
get_device_name("/replica:3/gpu:0"));
EXPECT_EQ("/job:worker/replica:5/task:0/gpu:3",
get_device_name("/replica:5/gpu:3"));
EXPECT_EQ("/job:worker/replica:4/task:0/gpu:7",
get_device_name("/replica:4/gpu:7"));
for (int i = 0; i < 4; i++) {
devices[strings::StrCat("/job:ps/replica:", i, "/task:0/cpu:0")] =
cpu_device;
}
cluster.reset(new VirtualCluster(devices));
placer.reset(new VirtualPlacer(cluster->GetDevices()));
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
get_device_name("/job:worker/replica:0/cpu:0"));
EXPECT_EQ("/job:worker/replica:7/task:0/gpu:3",
get_device_name("/job:worker/replica:7/gpu:3"));
EXPECT_EQ("/job:ps/replica:0/task:0/cpu:0",
get_device_name("/job:ps/replica:0/cpu:0"));
EXPECT_EQ("/job:ps/replica:1/task:0/cpu:0",
get_device_name("/job:ps/replica:1/cpu:0"));
EXPECT_EQ("/job:ps/replica:2/task:0/cpu:0",
get_device_name("/job:ps/replica:2/cpu:0"));
EXPECT_EQ("/job:ps/replica:3/task:0/cpu:0",
get_device_name("/job:ps/replica:3/cpu:0"));
}
TEST(VirtualPlacerTest, FallBackUnknown) {
std::unordered_map<string, DeviceProperties> devices;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("UNKNOWN", placer.get_device(node).type());
EXPECT_EQ("UNKNOWN", placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, FallBackCPU) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:my_job/replica:0/task:0/cpu:0"] = cpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, RemoteDevices) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:my_job/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/job:my_job/replica:0/task:0/device:GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0/cpu:0");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0/device:GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
}
}
} |
1,363 | cpp | tensorflow/tensorflow | graph_properties | tensorflow/core/grappler/costs/graph_properties.cc | tensorflow/core/grappler/costs/graph_properties_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_PROPERTIES_H_
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
namespace grappler {
ABSL_CONST_INIT const char kOutputSlots[] = "_output_slot_vector";
ABSL_CONST_INIT const char kExecutionCount[] = "_execution_count";
ABSL_CONST_INIT const char kOutputSizes[] = "_output_sizes_vector";
ABSL_CONST_INIT const char kOutputSame[] = "_same_output_for_iterations";
ABSL_CONST_INIT const char kOutputTypes[] = "_output_dtype_vector";
ABSL_CONST_INIT const char kOutputShapes[] = "_output_shape_vector";
class SymbolicShapeRefiner;
class TopoQueue;
class GraphProperties {
public:
explicit GraphProperties(const GrapplerItem& item) : item_(item) {}
Status InferStatically(bool assume_valid_feeds,
bool aggressive_shape_inference,
bool include_input_tensor_values,
bool include_output_tensor_values);
Status InferStatically(bool assume_valid_feeds,
bool aggressive_shape_inference,
bool include_tensor_values) {
return InferStatically(
assume_valid_feeds,
aggressive_shape_inference,
include_tensor_values,
include_tensor_values);
}
Status InferStatically(bool assume_valid_feeds) {
return InferStatically(assume_valid_feeds,
false,
true);
}
Status InferDynamically(Cluster* cluster);
Status InferFromCostGraph(const CostGraphDef& cost_graph);
Status AnnotateOutputShapes(GraphDef* output_graph_def) const;
bool HasInputProperties(const string& node_name) const;
bool HasOutputProperties(const string& node_name) const;
const std::vector<OpInfo::TensorProperties>& GetInputProperties(
const string& node_name) const;
const std::vector<OpInfo::TensorProperties>& GetOutputProperties(
const string& node_name) const;
void ClearInputProperties(const string& node_name);
void ClearOutputProperties(const string& node_name);
bool has_properties() const {
return !input_properties_.empty() || !output_properties_.empty();
}
bool CheckShapeIncompatible(const string& node_name) const {
return incompatible_shape_nodes_.find(node_name) !=
incompatible_shape_nodes_.end();
}
void Clear() {
input_properties_.clear();
output_properties_.clear();
}
private:
static Status RelaxEnqueueShapesAndMergeTypes(
SymbolicShapeRefiner* shape_refiner, const NodeDef* qnode,
const std::vector<shape_inference::ShapeAndType>& shapes_and_types,
std::vector<shape_inference::ShapeAndType>* queue_shapes_and_types);
static Status UpdateEnqueue(
const NodeDef* enqueue_node,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
resource_handles,
SymbolicShapeRefiner* shape_refiner, bool* new_shapes);
static Status UpdateQueue(const NodeDef* queue_node,
SymbolicShapeRefiner* shape_refiner,
bool* new_shapes);
Status UpdateMerge(SymbolicShapeRefiner* shape_refiner, const NodeDef* node,
bool* new_shapes) const;
static Status UpdateEnter(SymbolicShapeRefiner* shape_refiner,
const NodeDef* node, bool* new_shapes);
Status UpdateShapes(SymbolicShapeRefiner* shape_refiner,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
resource_handles,
const NodeDef* n, bool* new_shapes) const;
Status PropagateShapes(
SymbolicShapeRefiner* shape_refiner, TopoQueue* new_shapes,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>&
resource_handles,
int num_loops) const;
const GrapplerItem& item_;
absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>
input_properties_;
absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>
output_properties_;
const std::vector<OpInfo::TensorProperties> missing_properties_;
std::unordered_set<string> incompatible_shape_nodes_;
};
bool IsShapeFullyDefinedIntegerVectorOrScalar(
shape_inference::InferenceContext* ic,
const shape_inference::ShapeHandle& shape,
const shape_inference::ShapeHandle& tensor_as_shape, const DataType& dtype);
}
}
#endif
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
using TensorVector = gtl::InlinedVector<TensorValue, 4>;
const int64_t kUnknownDimFromConst = INT64_MAX;
const int kThresholdToSkipConstTensorInstantiation = 128;
template <typename Handle>
struct HashHandle {
std::size_t operator()(const Handle& h) const {
return absl::HashOf(h.Handle());
}
};
template <typename Handle>
struct CompareHandle {
bool operator()(const Handle& h1, const Handle& h2) const {
return h1.SameHandle(h2);
}
};
template <typename Handle>
struct HandleToObject {};
template <>
struct HandleToObject<ShapeHandle> {
typedef ShapeHandle Object;
static ShapeHandle Unknown() { return ShapeHandle(); }
};
template <>
struct HandleToObject<DimensionHandle> {
typedef int64_t Object;
static int64_t Unknown() { return -1; }
};
template <typename Handle>
struct Processor {};
template <>
struct Processor<ShapeHandle> {
void ExtractValue(ShapeHandle h, ShapeHandle* result) { *result = h; }
Status Merge(ShapeHandle h1, ShapeHandle h2, ShapeHandle* result) {
if (InferenceContext::RankKnown(*result)) {
return OkStatus();
}
if (InferenceContext::RankKnown(h1)) {
*result = h1;
} else {
*result = h2;
}
return OkStatus();
}
};
template <>
struct Processor<DimensionHandle> {
void ExtractValue(DimensionHandle d, int64_t* result) {
if (!InferenceContext::ValueKnown(d)) {
*result = -counter;
counter++;
} else {
int64_t val = InferenceContext::Value(d);
if (val >= 0) {
*result = val;
} else {
*result = -counter;
counter++;
}
}
}
Status Merge(DimensionHandle d1, DimensionHandle d2, int64_t* result) {
const int64_t dim1 = InferenceContext::Value(d1);
const int64_t dim2 = InferenceContext::Value(d2);
if (dim1 >= 0 && dim2 >= 0) {
CHECK_EQ(dim1, dim2);
return RefineDim(dim1, result);
} else if (dim1 >= 0 && dim2 < 0) {
return RefineDim(dim1, result);
} else if (dim1 < 0 && dim2 >= 0) {
return RefineDim(dim2, result);
} else if (dim1 < -1) {
return RefineDim(dim1, result);
} else if (dim2 < -1) {
return RefineDim(dim2, result);
} else {
CHECK_EQ(dim1, dim2);
CHECK_EQ(-1, dim1);
return RefineDim(-1, result);
}
return OkStatus();
}
private:
Status RefineDim(int64_t dim, int64_t* result) {
if (*result >= 0) {
if (!(*result == dim || dim < 0)) {
return errors::InvalidArgument("Inconsistent dimensions detected");
}
} else if (dim >= 0) {
*result = dim;
} else if (dim < *result) {
*result = dim;
}
return OkStatus();
}
int64_t counter = 2;
};
template <typename Handle>
class DisjointSet {
public:
DisjointSet() {}
~DisjointSet() {
for (auto rep : nodes_) {
delete rep.second;
}
}
Status Merge(Handle x, Handle y);
const typename HandleToObject<Handle>::Object GetMergedValue(Handle value);
private:
struct Rep {
Rep* parent;
int rank;
typename HandleToObject<Handle>::Object value;
};
Rep* Find(Handle value);
private:
Processor<Handle> processor_;
absl::flat_hash_map<Handle, Rep*, HashHandle<Handle>, CompareHandle<Handle>>
nodes_;
};
template <typename Handle>
const typename HandleToObject<Handle>::Object
DisjointSet<Handle>::GetMergedValue(Handle value) {
Rep* rep = Find(value);
if (!rep) {
return HandleToObject<Handle>::Unknown();
}
return rep->value;
}
template <typename Handle>
Status DisjointSet<Handle>::Merge(Handle x, Handle y) {
Rep* x_root = Find(x);
Rep* y_root = Find(y);
if (x_root == y_root) {
return absl::OkStatus();
}
if (x_root->rank < y_root->rank) {
TF_RETURN_IF_ERROR(processor_.Merge(y, x, &y_root->value));
x_root->parent = y_root;
} else if (x_root->rank > y_root->rank) {
TF_RETURN_IF_ERROR(processor_.Merge(x, y, &x_root->value));
y_root->parent = x_root;
} else {
TF_RETURN_IF_ERROR(processor_.Merge(x, y, &x_root->value));
y_root->parent = x_root;
x_root->rank = x_root->rank + 1;
}
return absl::OkStatus();
}
template <typename Handle>
typename DisjointSet<Handle>::Rep* DisjointSet<Handle>::Find(Handle value) {
auto it = nodes_.find(value);
if (it == nodes_.end()) {
Rep* node = new Rep;
node->parent = node;
node->rank = 0;
processor_.ExtractValue(value, &node->value);
nodes_[value] = node;
return node;
}
Rep* node = it->second;
Rep* root = node->parent;
while (root != root->parent) {
root = root->parent;
}
while (node->parent != root) {
Rep* next = node->parent;
node->parent = root;
node = next;
}
return root;
}
bool IsEnqueue(const NodeDef& n) {
return (n.op().find("Enqueue") != string::npos &&
n.op().find("EnqueueMany") == string::npos);
}
bool IsDequeue(const NodeDef& n) {
return (n.op().find("Dequeue") != string::npos &&
n.op().find("DequeueMany") == string::npos);
}
bool HasAnyUnknownDimensions(const TensorShapeProto& proto) {
if (proto.unknown_rank()) {
return true;
}
for (const auto& dim : proto.dim()) {
if (dim.size() < 0) {
return true;
}
}
return false;
}
void VerboseLogUnknownDimensionSources(
const GraphDef& graph,
const absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>&
input_properties_map,
const absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>&
output_properties_map) {
if (!VLOG_IS_ON(2)) {
return;
}
VLOG(2) << "Nodes with known inputs, but with unknown output dimensions:";
std::map<string, int> op_to_count;
for (const NodeDef& node : graph.node()) {
const auto& input_properties = input_properties_map.at(node.name());
const auto& output_properties = output_properties_map.at(node.name());
bool has_unknown_inputs = false;
for (const auto& input_prop : input_properties) {
if (HasAnyUnknownDimensions(input_prop.shape())) {
has_unknown_inputs = true;
break;
}
}
if (has_unknown_inputs) {
continue;
}
for (const auto& output_prop : output_properties) {
if (HasAnyUnknownDimensions(output_prop.shape())) {
string inputs = "input_shapes=[";
for (const auto& input_prop : input_properties) {
inputs += PartialTensorShape::DebugString(input_prop.shape());
}
inputs += "]";
string outputs = "output_shapes=[";
for (const auto& output_prop : output_properties) {
outputs += PartialTensorShape::DebugString(output_prop.shape());
}
outputs += "]";
VLOG(2) << "Node: " << node.name() << ", Op: " << node.op() << ", "
<< inputs << ", " << outputs;
op_to_count[node.op()]++;
break;
}
}
}
VLOG(2) << "Op types with known inputs, but with unknown output dimensions "
<< "(format: <op_type> (<count>)):";
for (const auto& p : op_to_count) {
VLOG(2) << p.first << " (" << p.second << ")";
}
}
std::vector<ShapeHandle> ReplaceUnknownDimFromConstWithUnknownDim(
InferenceContext* ic, const std::vector<ShapeHandle>& shapes) {
std::vector<ShapeHandle> converted_shapes(shapes.size());
for (int i = 0, shapes_size = shapes.size(); i < shapes_size; i++) {
const auto& shape = shapes[i];
if (!ic->RankKnown(shape)) {
converted_shapes[i] = shape;
continue;
}
bool just_copy = true;
std::vector<DimensionHandle> dims;
for (int32_t i = 0; i < ic->Rank(shape); ++i) {
DimensionHandle dim = ic->Dim(shape, i);
if (ic->ValueKnown(dim) && ic->Value(dim) == kUnknownDimFromConst) {
just_copy = false;
dims.push_back(ic->UnknownDim());
} else {
dims.push_back(dim);
}
}
if (just_copy) {
converted_shapes[i] = shape;
continue;
}
converted_shapes[i] = ic->MakeShape(dims);
}
return converted_shapes;
}
TensorProto MakeTensorProtoFromShape(InferenceContext* ic,
const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape,
const DataType& dtype) {
TensorProto tensor_proto;
tensor_proto.set_dtype(dtype);
auto* shape_proto = tensor_proto.mutable_tensor_shape();
if (ic->Rank(shape) == 1) {
shape_proto->add_dim()->set_size(ic->Rank(tensor_as_shape));
}
for (int i = 0; i < ic->Rank(tensor_as_shape); i++) {
int64_t value = ic->Value(ic->Dim(tensor_as_shape, i));
if (dtype == DT_INT32) {
tensor_proto.add_int_val(value);
} else {
tensor_proto.add_int64_val(value);
}
}
return tensor_proto;
}
NodeDef MakeConstNodeDefFromTensorProto(InferenceContext* ic,
const TensorProto& tensor_proto,
const DataType& dtype) {
NodeDef const_node;
const_node.set_name("const_from_shape");
const_node.set_op("Const");
auto* attr = const_node.mutable_attr();
(*attr)["dtype"].set_type(dtype);
auto* tensor = (*attr)["value"].mutable_tensor();
*tensor = tensor_proto;
return const_node;
}
NodeDef MakeConstNodeDefFromShape(InferenceContext* ic,
const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape,
const DataType& dtype) {
return MakeConstNodeDefFromTensorProto(
ic, MakeTensorProtoFromShape(ic, shape, tensor_as_shape, dtype), dtype);
}
bool IsNumericType(const DataType dtype) {
static const gtl::FlatSet<DataType>* const kRealNumberTypes =
CHECK_NOTNULL((new gtl::FlatSet<DataType>{
DT_BFLOAT16,
DT_HALF,
DT_FLOAT,
DT_DOUBLE,
DT_INT8,
DT_INT16,
DT_INT32,
DT_INT64,
DT_UINT8,
DT_UINT16,
DT_UINT32,
DT_UINT64,
DT_QINT8,
DT_QUINT8,
DT_QINT16,
DT_QUINT16,
DT_QINT32,
DT_BOOL,
}));
return kRealNumberTypes->find(dtype) != kRealNumberTypes->end();
}
uint64 NumElementsFromTensorProto(const TensorProto& tensor_proto) {
if (!tensor_proto.has_tensor_shape()) {
return -1;
}
const auto& tensor_shape_proto = tensor_proto.tensor_shape();
if (tensor_shape_proto.unknown_rank()) {
return -1;
}
int64_t num_elements = 1;
for (const auto& dim : tensor_shape_proto.dim()) {
num_elements *= dim.size();
}
return num_elements;
}
}
bool IsShapeFullyDefinedIntegerVectorOrScalar(
InferenceContext* ic, const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape, const DataType& dtype) {
if (!ic->FullyDefined(shape) || ic->Rank(shape) > 1 ||
!ic->FullyDefined(tensor_as_shape) ||
(dtype != DT_INT32 && dtype != DT_INT64)) {
return false;
}
for (int32_t i = 0; i < ic->Rank(tensor_as_shape); ++i) {
DimensionHandle dim = ic->Dim(tensor_as_shape, i);
if (ic->Value(dim) == kUnknownDimFromConst) {
LOG(WARNING) << "IsShapeFullyDefinedIntegerVectorOrScalar(): "
<< "tensor_as_shape input includes kUnknownDimFromConst -- "
<< ic->DebugString(tensor_as_shape);
return false;
}
}
return true;
}
class TopoQueue {
public:
explicit TopoQueue(const std::vector<const NodeDef*>& topo_order)
: topo_order_(TopoOrder(topo_order)) {}
void push(const NodeDef* n) { queue_.emplace(n, topo_order_.at(n)); }
const NodeDef* pop() {
CHECK(!empty());
auto it = queue_.begin();
const NodeDef* n = it->first;
queue_.erase(it);
return n;
}
bool empty() const { return queue_.empty(); }
std::size_t size() const { return queue_.size(); }
private:
using NodeAndId = std::pair<const NodeDef*, int>;
struct OrderByIdAscending {
bool operator()(const NodeAndId& lhs, const NodeAndId& rhs) const {
return lhs.second < rhs.second;
}
};
const absl::flat_hash_map<const NodeDef*, int> TopoOrder(
const std::vector<const NodeDef*>& topo_order) const {
absl::flat_hash_map<const NodeDef*, int> map;
map.reserve(topo_order.size());
for (int i = 0, topo_order_size = topo_order.size(); i < topo_order_size;
++i) {
map.emplace(topo_order[i], i);
}
return map;
}
const absl::flat_hash_map<const NodeDef*, int> topo_order_;
std::set<NodeAndId, OrderByIdAscending> queue_;
};
bool IsAllowListedOpTypeForEvaluateNode(const string& op_type) {
static const gtl::FlatSet<string>* const kOpTpeAllowlist =
CHECK_NOTNULL((new gtl::FlatSet<string>{
"Floor",
"Round",
"Sqrt",
"Square",
"Sign",
"Add",
"AddV2",
"Div",
"FloorDiv",
"FloorMod",
"Greater",
"GreaterEqual",
"Less",
"LessEqual",
"LogicalAnd",
"LogicalNot",
"LogicalOr",
"Maximum",
"Minimum",
"Mod",
"Mul",
"NotEqual",
"QuantizedAdd",
"QuantizedMul",
"SquareDifference",
"Sub",
"TruncateDiv",
"TruncateMod",
"RealDiv",
"AddN",
"StridedSlice",
"OnesLike",
"ZerosLike",
"Concat",
"ConcatV2",
"Split",
"Range",
"Fill",
"Cast",
"Prod",
"Unpack",
"GatherV2",
"Pack",
"ExpandDims",
}));
return kOpTpeAllowlist->find(op_type) != kOpTpeAllowlist->end();
}
static void NormalizeShapeForOutput(TensorShapeProto* shape) {
for (int i = 0; i < shape->dim_size(); i++) {
if (shape->dim(i).size() < -1) {
VLOG(2) << "Normalizing dimension: " << i << " from "
<< shape->dim(i).size() << " to -1";
shape->mutable_dim(i)->set_size(-1);
}
}
}
class SymbolicShapeRefiner {
public:
explicit SymbolicShapeRefiner(
const GraphView& graph,
const absl::flat_hash_map<string, absl::flat_hash_set<int>>& fed_ports,
const bool aggressive_shape_inference)
: graph_(graph),
function_library_(OpRegistry::Global(), graph.graph()->library()),
fed_ports_(fed_ports),
aggressive_shape_inference_(aggressive_shape_inference) {
graph_def_version_ = graph.graph()->versions().producer();
node_to_context_.reserve(graph.graph()->node_size());
}
const GraphView& graph() const { return graph_; }
struct NodeContext {
const OpRegistrationData* op_data;
DataTypeVector input_types;
DataTypeVector output_types;
std::unique_ptr<InferenceContext> inference_context;
std::vector<const TensorProto*> input_tensor_protos;
std::vector<const TensorProto*> output_tensor_protos;
std::vector<ShapeHandle> input_tensors_as_shapes_to_propagate;
std::vector<ShapeHandle> output_tensors_as_shapes;
bool shape_incompatible = false;
std::string StringifyShapeHandle(ShapeHandle s) {
auto* ic = inference_context.get();
if (ic->RankKnown(s)) {
std::vector<std::string> vals;
for (int i = 0; i < ic->Rank(s); i++) {
DimensionHandle d = ic->Dim(s, i);
if (ic->ValueKnown(d) && ic->Value(d) == kUnknownDimFromConst) {
vals.push_back("?(Const)");
} else {
vals.push_back(ic->DebugString(d)); | #include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#ifdef INTEL_MKL
#include "tensorflow/core/graph/mkl_graph_util.h"
#endif
namespace tensorflow {
namespace grappler {
namespace {
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
const char kTestDataPath[] = "core/grappler/costs/graph_properties_testdata";
REGISTER_OP("TestOpWithNoInferenceFn")
.Input("x: float")
.Output("y: float")
.Doc(R"doc(
Test op with no Inference Function registered.
x: input
y: output
)doc");
class GraphPropertiesTest : public ::testing::Test {
public:
void SetUp() override {
cluster_.reset(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster_->Provision());
auto f = FunctionDefHelper::Create(
"MyFillFunc",
{"shape: int32", "value: float"},
{"out: float"},
{},
{
{{"a"},
"Fill",
{"shape", "value"},
{{"T", DataType::DT_FLOAT}, {"index_type", DataType::DT_INT32}}},
},
{{"out", "a:output:0"}});
function_lib_.add_function()->Swap(&f);
}
void TearDown() override {
TF_ASSERT_OK(cluster_->Shutdown());
cluster_.reset();
}
protected:
string PropToString(const OpInfo::TensorProperties& p) {
string s = strings::StrCat(DataTypeString(p.dtype()), ": ");
if (p.shape().unknown_rank()) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, "[");
for (int i = 0; i < p.shape().dim_size(); ++i) {
strings::StrAppend(&s, i == 0 ? "" : ",",
std::max<int64_t>(p.shape().dim(i).size(), -1));
}
strings::StrAppend(&s, "]");
}
return s;
}
void ExpectTensorValues(const std::vector<int64_t>& expected,
const TensorProto& tensor_proto_to_compare) {
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(tensor_proto_to_compare));
EXPECT_EQ(expected.size(), tensor.NumElements());
ASSERT_TRUE(tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT64);
if (tensor.dtype() == DT_INT32) {
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<int32>()(i));
}
} else {
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<int64_t>()(i));
}
}
}
void ExpectFloatTensorValues(const std::vector<float>& expected,
const TensorProto& tensor_proto_to_compare) {
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(tensor_proto_to_compare));
EXPECT_EQ(expected.size(), tensor.NumElements());
ASSERT_EQ(tensor.dtype(), DT_FLOAT);
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<float>()(i));
}
}
std::unique_ptr<SingleMachine> cluster_;
FunctionDefLibrary function_lib_;
};
TEST_F(GraphPropertiesTest, StaticProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(1, properties.GetInputProperties(node.name()).size());
const auto props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(10, prop.shape().dim(0).size());
EXPECT_EQ(1, prop.shape().dim(1).size());
} else if (node.op() == "AddN") {
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ(DT_FLOAT, in_prop.dtype());
EXPECT_FALSE(in_prop.shape().unknown_rank());
EXPECT_EQ(2, in_prop.shape().dim_size());
EXPECT_EQ(10, in_prop.shape().dim(0).size());
EXPECT_EQ(1, in_prop.shape().dim(1).size());
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
EXPECT_EQ(in_prop.dtype(), out_props[0].dtype());
EXPECT_EQ(in_prop.shape().DebugString(),
out_props[0].shape().DebugString());
}
}
}
TEST_F(GraphPropertiesTest, ClearProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(1, properties.GetInputProperties(node.name()).size());
const auto props = properties.GetOutputProperties(node.name());
properties.ClearOutputProperties(node.name());
const auto cleared_props = properties.GetOutputProperties(node.name());
EXPECT_TRUE(cleared_props.empty());
} else if (node.op() == "AddN") {
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
properties.ClearInputProperties(node.name());
const auto cleared_props = properties.GetInputProperties(node.name());
EXPECT_TRUE(cleared_props.empty());
}
}
}
TEST_F(GraphPropertiesTest, Clear) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
EXPECT_TRUE(properties.has_properties());
properties.Clear();
EXPECT_FALSE(properties.has_properties());
}
TEST_F(GraphPropertiesTest, DynamicProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
TF_ASSERT_OK(cluster_->Initialize(item));
Status s = properties.InferDynamically(cluster_.get());
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(0, properties.GetInputProperties(node.name()).size());
} else if (node.op() == "AddN") {
if (node.name() == "AddN") {
const auto props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_INVALID, prop.dtype());
EXPECT_TRUE(prop.shape().unknown_rank());
} else {
const auto props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(10, prop.shape().dim(0).size());
EXPECT_EQ(1, prop.shape().dim(1).size());
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
string prop_str;
::tensorflow::protobuf::TextFormat::PrintToString(prop, &prop_str);
string out_prop_str;
::tensorflow::protobuf::TextFormat::PrintToString(out_props[0],
&out_prop_str);
EXPECT_EQ(prop_str, out_prop_str);
}
}
}
}
REGISTER_OP("DetectInputValueInShapeInferenceOp")
.Input("a: T")
.Output("o: T")
.Attr("T: {numbertype, bool}")
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->input_tensor(0)) {
c->set_output(0, c->Matrix(10, 10));
return absl::OkStatus();
}
return shape_inference::UnknownShape(c);
});
class ConstTensorSkipTestCase {
public:
ConstTensorSkipTestCase(const DataType data_type,
const std::vector<int64_t> shape, const double value,
const bool expected)
: data_type_(data_type),
shape_(shape),
value_(value),
expected_(expected) {}
void RunTestAndValidate() const {
LOG(INFO) << "Run Const tensor skip test: "
<< "data_type: " << data_type_ << ", shape: {"
<< absl::StrJoin(shape_, ",") << "}, value: " << value_
<< ", expected: " << expected_;
GrapplerItem item;
const absl::Span<const int64_t> shape_array_slice(shape_);
Tensor const_tensor_value(data_type_, TensorShape(shape_array_slice));
switch (data_type_) {
case DT_INT32:
test::FillIota<int32>(&const_tensor_value, static_cast<int32>(value_));
break;
case DT_INT64:
test::FillIota<int64_t>(&const_tensor_value,
static_cast<int64_t>(value_));
break;
case DT_FLOAT:
test::FillIota<float>(&const_tensor_value, static_cast<float>(value_));
break;
case DT_DOUBLE:
test::FillIota<double>(&const_tensor_value,
static_cast<double>(value_));
break;
case DT_BFLOAT16:
test::FillIota<Eigen::bfloat16>(&const_tensor_value,
static_cast<Eigen::bfloat16>(value_));
break;
default:
CHECK(false) << "Unsupported data type (" << data_type_
<< ") in this test.";
break;
}
TF_ASSERT_OK(NodeDefBuilder("const", "Const")
.Attr("dtype", data_type_)
.Attr("value", const_tensor_value)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("const_identity", "Identity")
.Attr("dtype", data_type_)
.Input("const", 0, data_type_)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("detect", "DetectInputValueInShapeInferenceOp")
.Attr("T", data_type_)
.Input("const_identity", 0, data_type_)
.Finalize(item.graph.add_node()));
item.fetch.push_back("const");
item.fetch.push_back("const_identity");
item.fetch.push_back("detect");
GraphProperties graph_properties(item);
TF_ASSERT_OK(graph_properties.InferStatically(false));
const auto& const_output = graph_properties.GetOutputProperties("const");
EXPECT_EQ(1, const_output.size());
const OpInfo::TensorProperties& const_output0 = const_output[0];
const auto& const_identity_input =
graph_properties.GetInputProperties("const_identity");
EXPECT_EQ(1, const_identity_input.size());
const OpInfo::TensorProperties& const_identity_input0 =
const_identity_input[0];
const auto& const_identity_output =
graph_properties.GetOutputProperties("const_identity");
EXPECT_EQ(1, const_identity_output.size());
const OpInfo::TensorProperties& const_identity_output0 =
const_identity_output[0];
EXPECT_TRUE(const_output0.has_value());
EXPECT_TRUE(const_identity_input0.has_value());
EXPECT_TRUE(const_identity_output0.has_value());
const auto& detect_input = graph_properties.GetInputProperties("detect");
EXPECT_EQ(1, detect_input.size());
const OpInfo::TensorProperties& detect_input0 = detect_input[0];
const auto& detect_output = graph_properties.GetOutputProperties("detect");
EXPECT_EQ(1, detect_output.size());
const OpInfo::TensorProperties& detect_output0 = detect_output[0];
EXPECT_TRUE(const_output0.has_value());
EXPECT_TRUE(const_identity_input0.has_value());
EXPECT_TRUE(const_identity_output0.has_value());
EXPECT_TRUE(detect_input0.has_value());
if (expected_) {
EXPECT_EQ(detect_output0.shape().dim_size(), 2);
EXPECT_EQ(detect_output0.shape().dim(0).size(), 10);
EXPECT_EQ(detect_output0.shape().dim(1).size(), 10);
} else {
EXPECT_TRUE(detect_output0.shape().unknown_rank());
}
}
private:
DataType data_type_;
std::vector<int64_t> shape_;
double value_;
bool expected_;
};
TEST_F(GraphPropertiesTest, SkipInstantiatingConstTensor) {
std::vector<ConstTensorSkipTestCase> test_cases = {
{DT_INT32, {16, 8}, 1, true},
{DT_INT32, {1, 129}, 2, false},
{DT_INT64, {8, 8}, 3, true},
{DT_INT64, {128, 2}, 0, false},
{DT_FLOAT, {16, 8}, 1.0, true},
{DT_FLOAT, {16, 8}, 1.3, true},
{DT_FLOAT, {1, 129}, 0.7, false},
{DT_DOUBLE, {16, 8}, 1.0, true},
{DT_DOUBLE, {16, 8}, 1.3, true},
{DT_DOUBLE, {1, 129}, 0.7, false},
{DT_BFLOAT16, {16, 8}, 1.0, true},
{DT_BFLOAT16, {16, 8}, 1.3, true},
{DT_BFLOAT16, {1, 129}, 0.7, false},
};
for (const auto& test_case : test_cases) {
test_case.RunTestAndValidate();
}
}
TEST_F(GraphPropertiesTest, Variables) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "Variable")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
item.fetch.push_back("Var");
Tensor initial_val(DT_FLOAT, TensorShape({3, 7}));
test::FillIota<float>(&initial_val, 0);
TF_ASSERT_OK(NodeDefBuilder("InitialVal", "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", initial_val)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("InitVar", "Assign")
.Input("Var", 0, DT_FLOAT_REF)
.Input("InitialVal", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
item.init_ops.push_back("InitVar");
{
GraphProperties static_properties(item);
TF_ASSERT_OK(static_properties.InferStatically(false));
const auto props = static_properties.GetOutputProperties("Var");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT_REF, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
{
TF_ASSERT_OK(cluster_->Initialize(item));
GraphProperties dynamic_properties(item);
TF_ASSERT_OK(dynamic_properties.InferDynamically(cluster_.get()));
const auto props = dynamic_properties.GetOutputProperties("Var");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT_REF, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
}
TEST_F(GraphPropertiesTest, ReadVariableOpAfterEnter) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "VarHandleOp")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("Enter", "Enter")
.Attr("T", DT_RESOURCE)
.Attr("frame_name", "while_context")
.Attr("is_constant", true)
.Attr("parallel_iterations", 10)
.Input("Var", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("ReadVariableOpAfterEnter", "ReadVariableOp")
.Attr("dtype", DT_FLOAT)
.Input("Enter", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props = properties.GetOutputProperties("ReadVariableOpAfterEnter");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
TEST_F(GraphPropertiesTest, VarHandles) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "VarHandleOp")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("VarRead", "ReadVariableOp")
.Attr("dtype", DT_FLOAT)
.Input("Var", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props = properties.GetOutputProperties("VarRead");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
TEST_F(GraphPropertiesTest, WhileLoopWithVarHandleOpInput) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"while_loop_var_handle_op.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> resource_nodes{
"loop_var", "while/Enter", "while/Merge", "while/Switch",
"while/Identity", "while/NextIteration", "while/Exit"};
for (const string& node : resource_nodes) {
const auto props = properties.GetOutputProperties(node);
EXPECT_GE(props.size(), 1);
EXPECT_EQ("resource: []", PropToString(props[0]));
}
const auto props = properties.GetOutputProperties("while/ReadVariableOp");
EXPECT_EQ(1, props.size());
EXPECT_EQ("int32: []", PropToString(props[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_NoShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT});
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: ?", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_ShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT},
ops::FIFOQueue::Attrs().Shapes({{3, 7, 1}}));
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7,1]", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_PartialShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT},
ops::FIFOQueue::Attrs().Shapes({{3, 7, -1}}));
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7,-1]", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, Queues) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT});
Output rnd =
ops::RandomNormal(root.WithOpName("rnd"), {3, 7}, DataType::DT_FLOAT);
Output square1 = ops::Square(root.WithOpName("Square1"), rnd);
auto enqueue1 = ops::QueueEnqueue(root.WithOpName("Enqueue1"), q1, {square1});
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
auto q2 =
ops::RandomShuffleQueue(root.WithOpName("Queue2"), {DataType::DT_FLOAT});
Output square2 = ops::Square(root.WithOpName("Square2"), dequeue1[0]);
auto enqueue2 = ops::QueueEnqueue(root.WithOpName("Enqueue2"), q2, {square2});
auto dequeue2 =
ops::QueueDequeue(root.WithOpName("Dequeue2"), q2, {DataType::DT_FLOAT});
auto q4 =
ops::RandomShuffleQueue(root.WithOpName("Queue4"), {DataType::DT_FLOAT});
auto enqueue4 = ops::QueueEnqueue(root.WithOpName("Enqueue4"), q4, {square2});
auto enqueue4_2 =
ops::QueueEnqueue(root.WithOpName("Enqueue4_2"), q4, {dequeue2[0]});
auto dequeue4 =
ops::QueueDequeue(root.WithOpName("Dequeue4"), q4, {DataType::DT_FLOAT});
auto q5 = ops::RandomShuffleQueue(
root.WithOpName("Queue5"),
{DataType::DT_FLOAT, DataType::DT_DOUBLE, DataType::DT_FLOAT});
Output rnd2 =
ops::RandomNormal(root.WithOpName("rnd2"), {10}, DataType::DT_DOUBLE);
Output rnd3 =
ops::RandomNormal(root.WithOpName("rnd3"), {1, 2, 3}, DataType::DT_FLOAT);
auto enqueue5 =
ops::QueueEnqueue(root.WithOpName("Enqueue5"), q5, {rnd, rnd2, rnd3});
auto dequeue5 = ops::QueueDequeue(
root.WithOpName("Dequeue5"), q5,
{DataType::DT_FLOAT, DataType::DT_DOUBLE, DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7]", PropToString(props1[0]));
const auto props2 = properties.GetOutputProperties("Dequeue2");
ASSERT_EQ(1, props2.size());
EXPECT_EQ("float: [3,7]", PropToString(props2[0]));
const auto props4 = properties.GetOutputProperties("Dequeue4");
ASSERT_EQ(1, props4.size());
EXPECT_EQ("float: [3,7]", PropToString(props4[0]));
const auto props5 = properties.GetOutputProperties("Dequeue5");
ASSERT_EQ(3, props5.size());
EXPECT_EQ("float: [3,7]", PropToString(props5[0]));
EXPECT_EQ("double: [10]", PropToString(props5[1]));
EXPECT_EQ("float: [1,2,3]", PropToString(props5[2]));
}
TEST_F(GraphPropertiesTest, MergeWithoutLoops) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"merge_without_loops.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> nodes{"cond/Merge", "cond/concat", "cond/concat_1"};
std::vector<string> expected_outputs{"float: [-1,-1,1]", "float: [2,1,1]",
"float: [1,2,1]"};
for (int i = 0; i < nodes.size(); i++) {
const auto props = properties.GetOutputProperties(nodes[i]);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(expected_outputs[i], PropToString(prop));
}
const auto props = properties.GetInputProperties("Less");
EXPECT_EQ(2, props.size());
for (int i = 0; i < props.size(); ++i) {
EXPECT_EQ(DT_INT32, props[i].dtype());
EXPECT_TRUE(props[i].has_value());
EXPECT_EQ("int32: []", PropToString(props[i]));
}
}
TEST_F(GraphPropertiesTest, WhileLoop) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"while_loop.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
for (const string& node : nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,2]", PropToString(prop));
}
auto shape_in = properties.GetOutputProperties("ones").at(0).shape();
auto shape_out = properties.GetOutputProperties("while/Exit_1").at(0).shape();
EXPECT_GE(-2, shape_in.dim(0).size());
EXPECT_GE(-2, shape_out.dim(0).size());
EXPECT_NE(shape_in.dim(0).size(), shape_out.dim(0).size());
}
TEST_F(GraphPropertiesTest, NestedLoop) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"nested_loop.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> outer_nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
std::v |
1,364 | cpp | tensorflow/tensorflow | robust_stats | tensorflow/core/grappler/costs/robust_stats.cc | tensorflow/core/grappler/costs/robust_stats_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_ROBUST_STATS_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_ROBUST_STATS_H_
#include <vector>
namespace tensorflow {
namespace grappler {
class RobustStats {
public:
explicit RobustStats(const std::vector<double>& values);
explicit RobustStats(std::vector<double>&& values);
double lo() const { return lo_; }
double hi() const { return hi_; }
double mean() const { return mean_; }
private:
void HuberMAD(const std::vector<double>& values);
double lo_;
double hi_;
double mean_;
double stddev_;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/robust_stats.h"
#include <algorithm>
#include <cmath>
#include <utility>
namespace tensorflow {
namespace grappler {
static double SortedMedian(const std::vector<double> &values) {
const int n = values.size();
if (n == 0) return 0.0;
if (n & 1) {
return values[n / 2];
} else {
return (values[n / 2] + values[n / 2 - 1]) / 2.0;
}
}
static double Median(std::vector<double> &&values) {
const size_t n = values.size();
if (n == 0) return 0;
const auto middle = values.begin() + (n / 2);
std::nth_element(values.begin(), middle, values.end());
if (n & 1) {
return *middle;
}
const auto lower_middle = std::max_element(values.begin(), middle);
if (*lower_middle <= 0 && *middle >= 0) {
return (*lower_middle + *middle) / 2;
}
return *lower_middle + (*middle - *lower_middle) / 2;
}
static std::pair<double, double> ScaledMedianAbsoluteDeviation(
const std::vector<double> &sorted_values) {
double median = SortedMedian(sorted_values);
std::vector<double> deviations;
deviations.reserve(sorted_values.size());
for (double d : sorted_values) {
deviations.push_back(std::abs(d - median));
}
double mad = Median(std::move(deviations)) * 1.4826;
return std::pair<double, double>(median, mad);
}
RobustStats::RobustStats(const std::vector<double> &values)
: RobustStats(std::vector<double>(values)) {}
RobustStats::RobustStats(std::vector<double> &&values) {
std::sort(values.begin(), values.end());
lo_ = values[0];
hi_ = values.back();
HuberMAD(values);
}
double UpdateHuberMean(const std::vector<double> &sorted_values, double mean,
double margin) {
int num_within = 0;
double sum = 0.0;
for (double d : sorted_values) {
if (d < mean - margin) {
sum -= margin;
} else if (d > mean + margin) {
sum += margin;
} else {
sum += d;
++num_within;
}
}
if (num_within > 0) {
return sum / num_within;
} else {
return mean;
}
}
void RobustStats::HuberMAD(const std::vector<double> &sorted_values) {
const std::pair<double, double> median_mad =
ScaledMedianAbsoluteDeviation(sorted_values);
mean_ = median_mad.first;
stddev_ = median_mad.second;
const double c = 1.5;
const double margin = c * stddev_;
if (margin > 0.0) {
for (int k = 0; k < 10; ++k) {
double old_mean = mean_;
mean_ = UpdateHuberMean(sorted_values, mean_, margin);
if (mean_ == old_mean) break;
}
}
}
}
} | #include "tensorflow/core/grappler/costs/robust_stats.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class RobustStatsTest : public ::testing::Test {
public:
void SetUp() override {
for (double d = 1.0; d <= 5.0; d += 1.0) {
values1_.push_back(5.0 - d);
values1_.push_back(5.0 + d);
values2_.push_back(25.0 - 2 * d);
values2_.push_back(25.0 + 2 * d);
values3_.push_back(-3.0 - d);
values3_.push_back(-3.0 + d);
}
values1_.push_back(5.0);
values3_.push_back(197.0);
values3_.push_back(-203.0);
}
std::vector<double> values1_;
std::vector<double> values2_;
std::vector<double> values3_;
};
TEST_F(RobustStatsTest, Simple) {
RobustStats s1(values1_);
EXPECT_EQ(5.0, s1.mean());
EXPECT_EQ(0.0, s1.lo());
EXPECT_EQ(10.0, s1.hi());
RobustStats s2(values2_);
EXPECT_EQ(25.0, s2.mean());
EXPECT_EQ(15.0, s2.lo());
EXPECT_EQ(35.0, s2.hi());
RobustStats s3(values3_);
EXPECT_EQ(-3.0, s3.mean());
EXPECT_EQ(-203.0, s3.lo());
EXPECT_EQ(197.0, s3.hi());
}
}
}
} |
1,365 | cpp | tensorflow/tensorflow | cost_estimator | tensorflow/core/grappler/costs/cost_estimator.cc | tensorflow/core/grappler/costs/cost_estimator_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_COST_ESTIMATOR_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_COST_ESTIMATOR_H_
#include <cmath>
#include <limits>
#include <string>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class GraphDef;
class CostGraphDef;
namespace grappler {
struct GrapplerItem;
constexpr uint64_t kMemoryUnknown = std::numeric_limits<uint64_t>::max();
constexpr uint64_t kZeroMemory = 0ULL;
struct DeviceInfo {
double gigaops;
double gb_per_sec;
double intermediate_read_gb_per_sec;
double intermediate_write_gb_per_sec;
DeviceInfo()
: gigaops(INFINITY),
gb_per_sec(INFINITY),
intermediate_read_gb_per_sec(INFINITY),
intermediate_write_gb_per_sec(INFINITY) {}
DeviceInfo(const DeviceInfo& input)
: gigaops(input.gigaops),
gb_per_sec(input.gb_per_sec),
intermediate_read_gb_per_sec(input.intermediate_read_gb_per_sec),
intermediate_write_gb_per_sec(input.intermediate_write_gb_per_sec) {}
DeviceInfo(double gigaops, double gb_per_sec,
double intermediate_read_gb_per_sec = INFINITY,
double intermediate_write_gb_per_sec = INFINITY)
: gigaops(gigaops),
gb_per_sec(gb_per_sec),
intermediate_read_gb_per_sec(intermediate_read_gb_per_sec),
intermediate_write_gb_per_sec(intermediate_write_gb_per_sec) {}
};
struct Costs {
inline Costs();
static inline Costs ZeroCosts(bool inaccurate = false);
struct MilliSeconds : std::chrono::milliseconds {
MilliSeconds() : std::chrono::milliseconds(0) {}
MilliSeconds(double d)
: std::chrono::milliseconds(static_cast<int64_t>(d)) {}
MilliSeconds(const std::chrono::milliseconds& d)
: std::chrono::milliseconds(d) {}
MilliSeconds& operator=(const std::chrono::milliseconds& d) {
std::chrono::milliseconds::operator=(d);
return *this;
}
};
struct MicroSeconds : std::chrono::microseconds {
MicroSeconds() : std::chrono::microseconds(0) {}
MicroSeconds(double d)
: std::chrono::microseconds(static_cast<int64_t>(d)) {}
MicroSeconds(const std::chrono::microseconds& d)
: std::chrono::microseconds(d) {}
MicroSeconds& operator=(const std::chrono::microseconds& d) {
std::chrono::microseconds::operator=(d);
return *this;
}
MilliSeconds asMilliSeconds() const {
return std::chrono::duration_cast<std::chrono::milliseconds>(*this);
}
};
struct NanoSeconds : std::chrono::nanoseconds {
NanoSeconds() : std::chrono::nanoseconds(0) {}
NanoSeconds(double d) : std::chrono::nanoseconds(static_cast<int64_t>(d)) {}
NanoSeconds(const std::chrono::nanoseconds& d)
: std::chrono::nanoseconds(d) {}
NanoSeconds& operator=(const std::chrono::nanoseconds& d) {
std::chrono::nanoseconds::operator=(d);
return *this;
}
MicroSeconds asMicroSeconds() const {
return std::chrono::duration_cast<std::chrono::microseconds>(*this);
}
MilliSeconds asMilliSeconds() const {
return std::chrono::duration_cast<std::chrono::milliseconds>(*this);
}
static NanoSeconds infinity() {
return NanoSeconds(std::chrono::nanoseconds::max());
}
};
typedef NanoSeconds Duration;
Duration execution_time;
Duration compute_time;
Duration memory_time;
Duration intermediate_memory_time;
Duration intermediate_memory_read_time;
Duration intermediate_memory_write_time;
Duration network_time;
uint64_t max_memory;
uint64_t persistent_memory;
uint64_t temporary_memory;
absl::flat_hash_map<int32_t, int64_t> output_tensor_size_bytes;
absl::flat_hash_set<int32_t> persistent_output_ports;
int64_t max_per_op_buffers;
int64_t max_per_op_streaming;
int64_t num_ops_total = 1;
bool inaccurate = false;
int64_t num_ops_with_unknown_shapes = 0;
std::unordered_map<string, uint64> estimated_max_memory_per_device;
};
inline std::ostream& operator<<(std::ostream& os, const Costs::MilliSeconds d) {
os << d.count() << "ms";
return os;
}
inline std::ostream& operator<<(std::ostream& os, const Costs::MicroSeconds d) {
os << d.count() << "us";
return os;
}
inline std::ostream& operator<<(std::ostream& os, const Costs::NanoSeconds d) {
os << d.count() << "ns";
return os;
}
Costs::Costs() {
execution_time = Duration::zero();
compute_time = Duration::zero();
memory_time = Duration::zero();
intermediate_memory_time = Duration::zero();
network_time = Duration::zero();
max_memory = kMemoryUnknown;
persistent_memory = kMemoryUnknown;
temporary_memory = kMemoryUnknown;
max_per_op_buffers = kMemoryUnknown;
max_per_op_streaming = kMemoryUnknown;
}
Costs Costs::ZeroCosts(bool inaccurate) {
Costs costs;
costs.execution_time = Duration::zero();
costs.compute_time = Duration::zero();
costs.memory_time = Duration::zero();
costs.intermediate_memory_time = Duration::zero();
costs.network_time = Duration::zero();
costs.max_memory = kZeroMemory;
costs.persistent_memory = kZeroMemory;
costs.temporary_memory = kZeroMemory;
costs.max_per_op_buffers = kZeroMemory;
costs.max_per_op_streaming = kZeroMemory;
costs.inaccurate = inaccurate;
return costs;
}
Costs CombineCosts(const Costs& left, const Costs& right);
Costs MultiplyCosts(const Costs& costs, int multiplier);
class CostEstimator {
public:
virtual ~CostEstimator() {}
virtual Status Initialize(const GrapplerItem& item) = 0;
virtual Status PredictCosts(const GraphDef& optimized_graph,
RunMetadata* run_metadata, Costs* cost) const = 0;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/cost_estimator.h"
namespace tensorflow {
namespace grappler {
Costs CombineCosts(const Costs& left, const Costs& right) {
CHECK_NE(left.max_memory, kMemoryUnknown);
CHECK_NE(left.max_per_op_buffers, kMemoryUnknown);
CHECK_NE(left.max_per_op_streaming, kMemoryUnknown);
Costs result = left;
result.execution_time += right.execution_time;
result.compute_time += right.compute_time;
result.memory_time += right.memory_time;
result.network_time += right.network_time;
result.intermediate_memory_time += right.intermediate_memory_time;
result.intermediate_memory_read_time += right.intermediate_memory_read_time;
result.intermediate_memory_write_time += right.intermediate_memory_write_time;
if (right.max_per_op_buffers != kMemoryUnknown) {
result.max_per_op_buffers =
std::max(left.max_per_op_buffers, right.max_per_op_buffers);
}
if (right.max_per_op_streaming != kMemoryUnknown) {
result.max_per_op_streaming =
std::max(left.max_per_op_streaming, right.max_per_op_streaming);
}
result.num_ops_total += right.num_ops_total;
if (right.inaccurate) {
result.inaccurate = true;
}
result.num_ops_with_unknown_shapes += right.num_ops_with_unknown_shapes;
if (right.max_memory != kMemoryUnknown) {
result.max_memory += right.max_memory;
}
return result;
}
Costs MultiplyCosts(const Costs& costs, int multiplier) {
CHECK_GE(multiplier, 0);
if (multiplier == 0) {
return Costs::ZeroCosts();
}
if (multiplier == 1) {
return costs;
}
Costs result = costs;
result.execution_time *= multiplier;
result.compute_time *= multiplier;
result.memory_time *= multiplier;
result.network_time *= multiplier;
result.intermediate_memory_time *= multiplier;
result.intermediate_memory_read_time *= multiplier;
result.intermediate_memory_write_time *= multiplier;
return result;
}
}
} | #include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(CostEstimatorTest, CombineCosts) {
Costs c = Costs::ZeroCosts();
c.execution_time = Costs::NanoSeconds(1);
c.compute_time = Costs::NanoSeconds(2);
c.memory_time = Costs::NanoSeconds(3);
c.intermediate_memory_time = Costs::NanoSeconds(4);
c.intermediate_memory_read_time = Costs::NanoSeconds(5);
c.intermediate_memory_write_time = Costs::NanoSeconds(6);
c.max_memory = 1;
c.max_per_op_buffers = 2;
c.max_per_op_streaming = 3;
c.num_ops_total = 1;
c.inaccurate = false;
c.num_ops_with_unknown_shapes = 0;
Costs sum = CombineCosts(c, c);
EXPECT_EQ(sum.execution_time, Costs::NanoSeconds(2));
EXPECT_EQ(sum.compute_time, Costs::NanoSeconds(4));
EXPECT_EQ(sum.memory_time, Costs::NanoSeconds(6));
EXPECT_EQ(sum.intermediate_memory_time, Costs::NanoSeconds(8));
EXPECT_EQ(sum.intermediate_memory_read_time, Costs::NanoSeconds(10));
EXPECT_EQ(sum.intermediate_memory_write_time, Costs::NanoSeconds(12));
EXPECT_EQ(sum.max_memory, 2);
EXPECT_EQ(sum.max_per_op_buffers, 2);
EXPECT_EQ(sum.max_per_op_streaming, 3);
EXPECT_EQ(sum.num_ops_total, 2);
EXPECT_FALSE(sum.inaccurate);
EXPECT_EQ(sum.num_ops_with_unknown_shapes, 0);
}
TEST(CostEstimatorTest, MultiplyCosts) {
Costs c = Costs::ZeroCosts();
c.execution_time = Costs::NanoSeconds(1);
c.compute_time = Costs::NanoSeconds(2);
c.memory_time = Costs::NanoSeconds(3);
c.intermediate_memory_time = Costs::NanoSeconds(4);
c.intermediate_memory_read_time = Costs::NanoSeconds(5);
c.intermediate_memory_write_time = Costs::NanoSeconds(6);
c.max_memory = 1;
c.max_per_op_buffers = 2;
c.max_per_op_streaming = 3;
c.num_ops_total = 1;
c.inaccurate = false;
c.num_ops_with_unknown_shapes = 0;
Costs product = MultiplyCosts(c, 10);
EXPECT_EQ(product.execution_time, Costs::NanoSeconds(10));
EXPECT_EQ(product.compute_time, Costs::NanoSeconds(20));
EXPECT_EQ(product.memory_time, Costs::NanoSeconds(30));
EXPECT_EQ(product.intermediate_memory_time, Costs::NanoSeconds(40));
EXPECT_EQ(product.intermediate_memory_read_time, Costs::NanoSeconds(50));
EXPECT_EQ(product.intermediate_memory_write_time, Costs::NanoSeconds(60));
EXPECT_EQ(product.max_memory, 1);
EXPECT_EQ(product.max_per_op_buffers, 2);
EXPECT_EQ(product.max_per_op_streaming, 3);
EXPECT_EQ(product.num_ops_total, 1);
EXPECT_FALSE(product.inaccurate);
EXPECT_EQ(product.num_ops_with_unknown_shapes, 0);
}
}
}
} |
1,366 | cpp | tensorflow/tensorflow | analytical_cost_estimator | tensorflow/core/grappler/costs/analytical_cost_estimator.cc | tensorflow/core/grappler/costs/analytical_cost_estimator_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_ANALYTICAL_COST_ESTIMATOR_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_ANALYTICAL_COST_ESTIMATOR_H_
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class CostGraphDef;
class GraphDef;
}
namespace tensorflow {
namespace grappler {
class Cluster;
struct GrapplerItem;
class AnalyticalCostEstimator : public CostEstimator {
public:
AnalyticalCostEstimator(Cluster* cluster, bool use_static_shapes,
bool use_aggressive_shape_inference);
AnalyticalCostEstimator(Cluster* cluster,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager,
bool use_static_shapes,
bool use_aggressive_shape_inference);
AnalyticalCostEstimator(Cluster* cluster,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager,
std::unique_ptr<VirtualPlacer> placer,
bool use_static_shapes,
bool use_aggressive_shape_inference);
~AnalyticalCostEstimator() override {}
Status Initialize(const GrapplerItem& item) override;
Status PredictCosts(const GraphDef& optimized_graph,
RunMetadata* run_metadata, Costs* cost) const override;
const VirtualScheduler* GetScheduler() const { return scheduler_.get(); }
private:
const GrapplerItem* item_;
std::unique_ptr<OpLevelCostEstimator> node_estimator_;
std::unique_ptr<ReadyNodeManager> node_manager_;
std::unique_ptr<VirtualScheduler> scheduler_;
bool use_static_shapes_;
bool use_aggressive_shape_inference_;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include <limits>
#include <unordered_map>
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
namespace grappler {
namespace {
Status AddCostNode(ReadyNodeManager* node_manager, const OpContext& op_context,
int node_id, const Costs& node_costs,
gtl::FlatMap<string, CostGraphDef::Node*>* name_to_cost_node,
gtl::FlatMap<string, int>* name_to_id,
CostGraphDef* cost_graph) {
const string& op_name = op_context.name;
auto it = name_to_cost_node->find(op_name);
CostGraphDef::Node* node;
if (it != name_to_cost_node->end()) {
node = it->second;
node->clear_input_info();
node->clear_output_info();
} else {
node = cost_graph->add_node();
(*name_to_cost_node)[op_name] = node;
node->set_name(op_name);
node->set_id(node_id);
(*name_to_id)[node->name()] = node->id();
}
node->set_device(op_context.device_name);
node->set_compute_cost(node_costs.execution_time.asMicroSeconds().count());
node->set_compute_time(node_costs.compute_time.asMicroSeconds().count());
node->set_memory_time(node_costs.memory_time.asMicroSeconds().count());
node->set_temporary_memory_size(node_costs.temporary_memory);
node->set_persistent_memory_size(node_costs.persistent_memory);
node->set_inaccurate(node_costs.inaccurate);
for (const string& input : node_manager->GetCurrNode()->input()) {
int input_port;
string input_name = ParseNodeName(input, &input_port);
if (name_to_id->find(input_name) == name_to_id->end()) {
if (!IsMerge(*node_manager->GetCurrNode()))
VLOG(1) << "input: " << input
<< " not found for non-Merge node: " << op_name;
continue;
}
if (IsControlInput(input)) {
node->add_control_input(name_to_id->at(input_name));
} else {
auto* input_info = node->add_input_info();
input_info->set_preceding_node(name_to_id->at(input_name));
input_info->set_preceding_port(input_port);
}
}
for (const auto& output : op_context.op_info.outputs()) {
auto output_info = node->add_output_info();
output_info->set_alias_input_port(-1);
output_info->set_dtype(output.dtype());
*output_info->mutable_shape() = output.shape();
int64_t size = DataTypeSize(output.dtype());
for (const auto& dim : output.shape().dim()) {
size = MultiplyWithoutOverflow(size, std::max<int64_t>(1, dim.size()));
if (size < 0) {
return errors::InvalidArgument(
"Integer overflow encountered in dimension size.");
}
}
output_info->set_size(size);
}
return absl::OkStatus();
}
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, bool use_static_shapes,
bool use_aggressive_shape_inference)
: AnalyticalCostEstimator(
cluster, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady"), use_static_shapes,
use_aggressive_shape_inference) {}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(),
std::make_unique<VirtualPlacer>(cluster->GetDevices()));
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager,
std::unique_ptr<VirtualPlacer> placer, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(), std::move(placer));
}
Status AnalyticalCostEstimator::Initialize(const GrapplerItem& item) {
item_ = &item;
return absl::OkStatus();
}
Status AnalyticalCostEstimator::PredictCosts(const GraphDef& optimized_graph,
RunMetadata* run_metadata,
Costs* costs) const {
std::unique_ptr<GrapplerItem> item_storage;
const GrapplerItem* item;
if (&optimized_graph == &item_->graph) {
item = item_;
} else {
GraphDef graph_copy = optimized_graph;
item_storage = std::make_unique<GrapplerItem>(
item_->WithGraph(std::move(graph_copy)));
item = item_storage.get();
}
auto status = scheduler_->Init(item);
if (!status.ok()) {
if (costs) {
costs->execution_time = Costs::Duration::max();
}
return status;
}
gtl::FlatMap<string, CostGraphDef::Node*> name_to_cost_node;
CostGraphDef* cost_graph = nullptr;
if (run_metadata) {
cost_graph = run_metadata->mutable_cost_graph();
for (auto& node : *cost_graph->mutable_node()) {
name_to_cost_node[node.name()] = &node;
}
}
std::vector<string> inaccurate_nodes;
int nodes_executed = 0;
int node_id = 0;
gtl::FlatMap<string, int> name_to_id;
Costs node_costs;
do {
++nodes_executed;
OpContext op_context = scheduler_->GetCurrNode();
node_costs = node_estimator_->PredictCosts(op_context);
if (node_costs.inaccurate) {
inaccurate_nodes.push_back(op_context.name);
if (node_costs.num_ops_with_unknown_shapes > 0)
VLOG(4) << op_context.name << " has "
<< node_costs.num_ops_with_unknown_shapes << " unknown shapes";
}
if (cost_graph) {
Status s =
AddCostNode(node_manager_.get(), op_context, node_id++, node_costs,
&name_to_cost_node, &name_to_id, cost_graph);
if (!s.ok()) {
return s;
}
}
} while (scheduler_->MarkCurrNodeExecuted(node_costs));
VLOG(1) << inaccurate_nodes.size() << " out of " << nodes_executed
<< " nodes have inaccurate time estimation";
if (VLOG_IS_ON(3)) {
for (const auto& node : inaccurate_nodes) {
VLOG(4) << "Node with inaccurate time estimation: " << node;
}
}
if (costs) {
*costs = scheduler_->Summary(run_metadata);
} else if (run_metadata) {
scheduler_->GenerateRunMetadata(run_metadata);
}
if (VLOG_IS_ON(1)) {
bool verbose = VLOG_IS_ON(2);
if (run_metadata) {
VLOG(1) << GetStatsStringFromRunMetadata(*run_metadata, verbose);
} else {
RunMetadata run_metadata;
scheduler_->GenerateRunMetadata(&run_metadata);
VLOG(1) << GetStatsStringFromRunMetadata(run_metadata, verbose);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class AnalyticalCostEstimatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_num_cores(4);
cpu_device.set_frequency(2600);
cpu_device.set_bandwidth(24 * 1024 * 1024);
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.set_num_cores(12);
gpu_device.set_frequency(1100);
gpu_device.set_bandwidth(180 * 1024 * 1024);
(*gpu_device.mutable_environment())["architecture"] = "6";
devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
cluster_.reset(new VirtualCluster(devices));
}
GrapplerItem CreateMiniGraph() {
const int batch = 1;
const int width = 28;
const int height = 28;
const int num_channels = 1;
const int num_labels = 10;
const int kernel_size = 3;
const int conv_filters = 32;
Scope s = Scope::NewRootScope();
auto images = ops::RandomUniform(
s.WithOpName("image"), {batch, width, height, num_channels}, DT_FLOAT);
auto labels = ops::RandomUniform(s.WithOpName("label"), {batch, num_labels},
DT_FLOAT);
auto w = ops::Variable(
s.WithOpName("W"),
{kernel_size, kernel_size, num_channels, conv_filters}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("B"), {conv_filters}, DT_FLOAT);
auto conv =
ops::Conv2D(s.WithOpName("conv"), images, w, {1, 1, 1, 1}, "SAME");
auto bias = ops::Add(s.WithOpName("bias"), conv, b);
auto relu = ops::Relu(s.WithOpName("relu"), bias);
auto flat_shape = ops::Const(s.WithOpName("flat_shape"),
{batch, width * height * conv_filters});
auto flat = ops::Reshape(s.WithOpName("flat"), relu, flat_shape);
auto w2 =
ops::Variable(s.WithOpName("W2"),
{width * height * conv_filters, num_labels}, DT_FLOAT);
auto b2 = ops::Variable(s.WithOpName("B2"), {num_labels}, DT_FLOAT);
auto matmul = ops::MatMul(s.WithOpName("matmul"), flat, w2);
auto logits = ops::Add(s.WithOpName("logits"), matmul, b2);
auto softmax = ops::Softmax(s.WithOpName("softmax"), logits);
auto lsm = ops::Log(s.WithOpName("lsm"), softmax);
GrapplerItem item;
item.fetch.push_back("lsm");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
return item;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(AnalyticalCostEstimatorTest, SimpleTest) {
GrapplerItem item = CreateMiniGraph();
AnalyticalCostEstimator estimator(cluster_.get(), true,
true);
TF_ASSERT_OK(estimator.Initialize(item));
RunMetadata run_metadata;
Costs summary;
TF_ASSERT_OK(estimator.PredictCosts(item.graph, &run_metadata, &summary));
EXPECT_EQ(Costs::NanoSeconds(9158), summary.execution_time);
EXPECT_EQ(15, summary.num_ops_total);
EXPECT_TRUE(summary.inaccurate);
EXPECT_EQ(0, summary.num_ops_with_unknown_shapes);
}
}
} |
1,367 | cpp | tensorflow/tensorflow | op_level_cost_estimator | tensorflow/core/grappler/costs/op_level_cost_estimator.cc | tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_OP_LEVEL_COST_ESTIMATOR_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_OP_LEVEL_COST_ESTIMATOR_H_
#include <cstdint>
#include <functional>
#include <map>
#include <numeric>
#include <set>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_context.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/padding.h"
namespace tensorflow {
namespace grappler {
bool GetTensorShapeProtoFromTensorProto(const TensorProto& tensor_proto,
TensorShapeProto* tensor_shape_proto);
std::vector<int64_t> MaybeGetMinimumShape(
const TensorShapeProto& original_shape, int rank,
bool* found_unknown_shapes);
struct NodeCosts {
bool minimum_cost_op = false;
int64_t num_compute_ops = 0;
std::vector<int64_t> num_input_bytes_accessed;
std::vector<int64_t> num_output_bytes_accessed;
int64_t internal_read_bytes = 0;
int64_t internal_write_bytes = 0;
int64_t num_total_input_bytes() const {
return std::accumulate(num_input_bytes_accessed.begin(),
num_input_bytes_accessed.end(), 0LL);
}
int64_t num_total_read_bytes() const {
return num_total_input_bytes() + internal_read_bytes;
}
int64_t num_total_output_bytes() const {
return std::accumulate(num_output_bytes_accessed.begin(),
num_output_bytes_accessed.end(), 0LL);
}
int64_t num_total_write_bytes() const {
return num_total_output_bytes() + internal_write_bytes;
}
int64_t num_bytes_accessed() const {
return num_total_read_bytes() + num_total_write_bytes();
}
int64_t max_memory = 0;
int64_t persistent_memory = 0;
int64_t temporary_memory = 0;
int64_t num_nodes = 1;
int64_t num_nodes_with_unknown_shapes = 0;
int64_t num_nodes_with_unknown_op_type = 0;
int64_t num_nodes_with_pure_memory_op = 0;
bool inaccurate = false;
bool has_costs = false;
Costs costs;
};
class OpLevelCostEstimator {
public:
OpLevelCostEstimator();
virtual ~OpLevelCostEstimator() {}
virtual Costs PredictCosts(const OpContext& op_context) const;
virtual DeviceInfo GetDeviceInfo(const DeviceProperties& device) const;
protected:
Costs PredictOpCountBasedCost(double operations, const OpInfo& op_info) const;
Costs PredictOpCountBasedCost(double operations, double input_io_bytes,
double output_io_bytes,
const OpInfo& op_info) const;
absl::Status PredictNodeCosts(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictCostOfAnUnknownOp(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictNaryOp(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictConv2D(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictCwiseOp(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictConv2DBackpropInput(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictConv2DBackpropFilter(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictFusedConv2DBiasActivation(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictMatMul(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictSparseTensorDenseMatMul(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictNoOp(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictIdentity(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictVariable(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictBatchMatMul(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictMetadata(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictGatherOrSlice(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictScatter(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictMaxPool(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictMaxPoolGrad(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictAvgPool(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictAvgPoolGrad(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictFusedBatchNorm(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictFusedBatchNormGrad(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictEinsum(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictAssignVariableOps(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictPureMemoryOp(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictSoftmax(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictResizeBilinear(const OpContext& op_context,
NodeCosts* node_costs) const;
absl::Status PredictCropAndResize(const OpContext& op_context,
NodeCosts* node_costs) const;
int64_t GetSoftmaxComputeOps(const OpContext& op_context) const;
absl::Status PredictFusedOp(const OpContext& op_context,
const std::vector<OpContext>& fused_op_contexts,
NodeCosts* node_costs) const;
static double SafeDiv(const double lhs, const double rhs) {
if (rhs > 0) {
return lhs / rhs;
} else {
return 0.0;
}
}
struct MatMulDimensions {
int m;
int n;
int k;
};
struct BatchMatMulDimensions {
std::vector<int> batch_dims;
MatMulDimensions matmul_dims;
};
struct ConvolutionDimensions {
int64_t batch;
int64_t ix;
int64_t iy;
int64_t iz;
int64_t kx;
int64_t ky;
int64_t kz;
int64_t oz;
int64_t ox;
int64_t oy;
int64_t sx;
int64_t sy;
Padding padding;
};
static int64_t CountConv2DOperations(const OpInfo& op_info,
bool* found_unknown_shapes);
static int64_t CountConv2DOperations(const OpInfo& op_info,
ConvolutionDimensions* conv_info,
bool* found_unknown_shapes);
static int64_t CountMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes);
static int64_t CountMatMulOperations(const OpInfo& op_info,
MatMulDimensions* mat_mul,
bool* found_unknown_shapes);
static int64_t CountMatMulOperations(const OpInfo& op_info, bool transpose_a,
bool transpose_b,
MatMulDimensions* mat_mul,
bool* found_unknown_shapes);
bool GenerateBatchMatmulContextFromEinsum(const OpContext& einsum_context,
OpContext* batch_matmul_context,
bool* found_unknown_shapes) const;
static int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes);
static int64_t CountBatchMatMulOperations(
const OpInfo& op_info, BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes);
static int64_t CountConv2DBackpropInputOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes);
static int64_t CountConv2DBackpropFilterOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes);
static int64_t CalculateTensorElementCount(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes);
static int64_t CalculateTensorSize(const OpInfo::TensorProperties& tensor,
bool* found_unknown_shapes);
static int64_t CalculateLargestInputCount(const OpInfo& op_info,
bool* found_unknown_shapes);
static int64_t CalculateInputSize(const OpInfo& op_info,
bool* found_unknown_shapes);
static std::vector<int64_t> CalculateInputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes);
static int64_t CalculateOutputSize(const OpInfo& op_info,
bool* found_unknown_shapes);
static std::vector<int64_t> CalculateOutputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes);
static ConvolutionDimensions ConvolutionDimensionsFromInputs(
const TensorShapeProto& original_image_shape,
const TensorShapeProto& original_filter_shape, const OpInfo& op_info,
bool* found_unknown_shapes);
static absl::StatusOr<ConvolutionDimensions> OpDimensionsFromInputs(
const TensorShapeProto& original_image_shape, const OpInfo& op_info,
bool* found_unknown_shapes);
static OpContext FusedChildContext(
const OpContext& parent, const string& op_name,
const OpInfo::TensorProperties& output,
const std::vector<OpInfo::TensorProperties>& inputs);
static OpInfo::TensorProperties DescribeTensor(
DataType type, const std::vector<int64_t>& dims);
static absl::Status PredictDefaultNodeCosts(int64_t num_compute_ops,
const OpContext& op_context,
bool* found_unknown_shapes,
NodeCosts* node_costs);
protected:
std::map<string, int> elementwise_ops_;
typedef std::function<absl::Status(const OpContext& op_context, NodeCosts*)>
CostImpl;
std::map<string, CostImpl> device_cost_impl_;
bool compute_memory_overlap_;
std::set<string> persistent_ops_;
private:
friend class OpLevelCostEstimatorTest;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_context.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/util/overflow.h"
#include "tensorflow/core/util/padding.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
constexpr int kOpsPerMac = 2;
constexpr char kGuaranteeConst[] = "GuaranteeConst";
constexpr char kAddN[] = "AddN";
constexpr char kBitCast[] = "BitCast";
constexpr char kConcatV2[] = "ConcatV2";
constexpr char kConv2d[] = "Conv2D";
constexpr char kConv2dBackpropFilter[] = "Conv2DBackpropFilter";
constexpr char kConv2dBackpropInput[] = "Conv2DBackpropInput";
constexpr char kFusedConv2dBiasActivation[] = "FusedConv2DBiasActivation";
constexpr char kDataFormatVecPermute[] = "DataFormatVecPermute";
constexpr char kDepthToSpace[] = "DepthToSpace";
constexpr char kDepthwiseConv2dNative[] = "DepthwiseConv2dNative";
constexpr char kDepthwiseConv2dNativeBackpropFilter[] =
"DepthwiseConv2dNativeBackpropFilter";
constexpr char kDepthwiseConv2dNativeBackpropInput[] =
"DepthwiseConv2dNativeBackpropInput";
constexpr char kMatMul[] = "MatMul";
constexpr char kXlaEinsum[] = "XlaEinsum";
constexpr char kEinsum[] = "Einsum";
constexpr char kExpandDims[] = "ExpandDims";
constexpr char kFill[] = "Fill";
constexpr char kSparseMatMul[] = "SparseMatMul";
constexpr char kSparseTensorDenseMatMul[] = "SparseTensorDenseMatMul";
constexpr char kPlaceholder[] = "Placeholder";
constexpr char kIdentity[] = "Identity";
constexpr char kIdentityN[] = "IdentityN";
constexpr char kRefIdentity[] = "RefIdentity";
constexpr char kNoOp[] = "NoOp";
constexpr char kReshape[] = "Reshape";
constexpr char kSplit[] = "Split";
constexpr char kSqueeze[] = "Squeeze";
constexpr char kRecv[] = "_Recv";
constexpr char kSend[] = "_Send";
constexpr char kBatchMatMul[] = "BatchMatMul";
constexpr char kBatchMatMulV2[] = "BatchMatMulV2";
constexpr char kOneHot[] = "OneHot";
constexpr char kPack[] = "Pack";
constexpr char kRank[] = "Rank";
constexpr char kRange[] = "Range";
constexpr char kShape[] = "Shape";
constexpr char kShapeN[] = "ShapeN";
constexpr char kSize[] = "Size";
constexpr char kStopGradient[] = "StopGradient";
constexpr char kPreventGradient[] = "PreventGradient";
constexpr char kGather[] = "Gather";
constexpr char kGatherNd[] = "GatherNd";
constexpr char kGatherV2[] = "GatherV2";
constexpr char kScatterAdd[] = "ScatterAdd";
constexpr char kScatterDiv[] = "ScatterDiv";
constexpr char kScatterMax[] = "ScatterMax";
constexpr char kScatterMin[] = "ScatterMin";
constexpr char kScatterMul[] = "ScatterMul";
constexpr char kScatterSub[] = "ScatterSub";
constexpr char kScatterUpdate[] = "ScatterUpdate";
constexpr char kSlice[] = "Slice";
constexpr char kStridedSlice[] = "StridedSlice";
constexpr char kSpaceToDepth[] = "SpaceToDepth";
constexpr char kTranspose[] = "Transpose";
constexpr char kTile[] = "Tile";
constexpr char kMaxPool[] = "MaxPool";
constexpr char kMaxPoolGrad[] = "MaxPoolGrad";
constexpr char kAvgPool[] = "AvgPool";
constexpr char kAvgPoolGrad[] = "AvgPoolGrad";
constexpr char kFusedBatchNorm[] = "FusedBatchNorm";
constexpr char kFusedBatchNormGrad[] = "FusedBatchNormGrad";
constexpr char kQuantizedMatMul[] = "QuantizedMatMul";
constexpr char kQuantizedMatMulV2[] = "QuantizedMatMulV2";
constexpr char kUnpack[] = "Unpack";
constexpr char kSoftmax[] = "Softmax";
constexpr char kResizeBilinear[] = "ResizeBilinear";
constexpr char kCropAndResize[] = "CropAndResize";
constexpr char kSwitch[] = "Switch";
constexpr char kMerge[] = "Merge";
constexpr char kEnter[] = "Enter";
constexpr char kExit[] = "Exit";
constexpr char kNextIteration[] = "NextIteration";
constexpr char kConst[] = "Const";
constexpr char kVariable[] = "Variable";
constexpr char kVariableV2[] = "VariableV2";
constexpr char kAutoReloadVariable[] = "AutoReloadVariable";
constexpr char kVarHandleOp[] = "VarHandleOp";
constexpr char kVarHandlesOp[] = "_VarHandlesOp";
constexpr char kReadVariableOp[] = "ReadVariableOp";
constexpr char kReadVariablesOp[] = "_ReadVariablesOp";
constexpr char kAssignVariableOp[] = "AssignVariableOp";
constexpr char kAssignAddVariableOp[] = "AssignAddVariableOp";
constexpr char kAssignSubVariableOp[] = "AssignSubVariableOp";
static const Costs::Duration kMinComputeTime(1);
static const int64_t kMinComputeOp = 1;
namespace {
std::string GetDataFormat(const OpInfo& op_info) {
std::string data_format = "NHWC";
if (op_info.attr().find("data_format") != op_info.attr().end()) {
data_format = op_info.attr().at("data_format").s();
}
return data_format;
}
std::string GetFilterFormat(const OpInfo& op_info) {
std::string filter_format = "HWIO";
if (op_info.attr().find("filter_format") != op_info.attr().end()) {
filter_format = op_info.attr().at("filter_format").s();
}
return filter_format;
}
Padding GetPadding(const OpInfo& op_info) {
if (op_info.attr().find("padding") != op_info.attr().end() &&
op_info.attr().at("padding").s() == "VALID") {
return Padding::VALID;
}
return Padding::SAME;
}
bool IsTraining(const OpInfo& op_info) {
if (op_info.attr().find("is_training") != op_info.attr().end() &&
op_info.attr().at("is_training").b()) {
return true;
}
return false;
}
std::vector<int64_t> GetStrides(const OpInfo& op_info) {
if (op_info.attr().find("strides") != op_info.attr().end()) {
const auto strides = op_info.attr().at("strides").list().i();
DCHECK(strides.size() == 4)
<< "Attr strides is not a length-4 vector: " << op_info.DebugString();
if (strides.size() != 4) return {1, 1, 1, 1};
return {strides[0], strides[1], strides[2], strides[3]};
}
return {1, 1, 1, 1};
}
std::vector<int64_t> GetKernelSize(const OpInfo& op_info) {
if (op_info.attr().find("ksize") != op_info.attr().end()) {
const auto ksize = op_info.attr().at("ksize").list().i();
DCHECK(ksize.size() == 4)
<< "Attr ksize is not a length-4 vector: " << op_info.DebugString();
if (ksize.size() != 4) return {1, 1, 1, 1};
return {ksize[0], ksize[1], ksize[2], ksize[3]};
}
return {1, 1, 1, 1};
}
int64_t GetOutputSize(const int64_t input, const int64_t filter,
const int64_t stride, const Padding& padding) {
if (padding == Padding::VALID) {
return (input - filter + stride) / stride;
} else {
return (input + stride - 1) / stride;
}
}
int64_t CwiseOutputElementCount(const OpInfo& op_info) {
int max_rank = 1;
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
max_rank = std::max(max_rank, input_properties.shape().dim_size());
}
TensorShapeProto output_shape;
output_shape.mutable_dim()->Reserve(max_rank);
for (int i = 0; i < max_rank; ++i) {
output_shape.add_dim();
}
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
const TensorShapeProto& input_shape = input_properties.shape();
for (int i = input_shape.dim_size() - 1; i >= 0; --i) {
int output_shape_dim_index =
i + output_shape.dim_size() - input_shape.dim_size();
output_shape.mutable_dim(output_shape_dim_index)
->set_size(std::max(output_shape.dim(output_shape_dim_index).size(),
input_shape.dim(i).size()));
}
}
int64_t count = 1;
for (int i = 0; i < output_shape.dim_size(); i++) {
count *= output_shape.dim(i).size();
}
return count;
}
bool CheckRepeatedDimensions(const absl::string_view dim_str) {
int str_size = dim_str.size();
for (int idx = 0; idx < str_size - 1; idx++) {
if (dim_str.find(dim_str[idx], idx + 1) != std::string::npos) {
return true;
}
}
return false;
}
bool IsEinsumCorrectlyFormed(const OpContext& einsum_context) {
const auto& op_info = einsum_context.op_info;
auto it = op_info.attr().find("equation");
if (it == op_info.attr().end()) return false;
const absl::string_view equation = it->second.s();
std::vector<std::string> equation_split = absl::StrSplit(equation, "->");
if (equation_split.empty()) {
LOG(WARNING) << "Einsum with malformed equation";
return false;
}
std::vector<absl::string_view> input_split =
absl::StrSplit(equation_split[0], ',');
if (op_info.inputs_size() != 2 || equation_split.size() != 2) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
absl::string_view rhs_str = equation_split[1];
absl::string_view a_input_str = input_split[0];
absl::string_view b_input_str = input_split[1];
if (absl::StrContains(a_input_str, "...") ||
absl::StrContains(b_input_str, "...")) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", ellipsis not supported";
return false;
}
constexpr int kMatrixRank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(kMatrixRank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(kMatrixRank, b_input.shape().dim_size()),
&b_input_shape_unknown);
if (a_input_str.size() != a_input_shape.size() ||
b_input_str.size() != b_input_shape.size()) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", equation subscripts don't match tensor rank.";
return false;
}
if (CheckRepeatedDimensions(a_input_str) ||
CheckRepeatedDimensions(b_input_str) ||
CheckRepeatedDimensions(rhs_str)) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", Subscripts where axis appears more than once for a single "
"input are not yet supported";
return false;
}
return true;
}
}
std::vector<int64_t> MaybeGetMinimumShape(
const TensorShapeProto& original_shape, int rank,
bool* found_unknown_shapes) {
std::vector<int64_t> minimal_shape(rank, 1L);
if (original_shape.dim_size() == 0) {
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
*found_unknown_shapes |= original_shape.dim_size() != rank;
for (int i = 0; i < std::min(rank, original_shape.dim_size()); ++i) {
if (original_shape.dim(i).size() < 0) {
*found_unknown_shapes = true;
} else {
minimal_shape[i] = original_shape.dim(i).size();
}
}
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
OpLevelCostEstimator::OpLevelCostEstimator() {
typedef absl::Status (OpLevelCostEstimator::*CostImpl)(
const OpContext& op_context, NodeCosts*) const;
auto wrap = [this](CostImpl impl)
-> std::function<absl::Status(const OpContext&, NodeCosts*)> {
return [this, impl](const OpContext& op_context, NodeCosts* node_costs) {
return (this->*impl)(op_context, node_costs);
};
};
device_cost_impl_.emplace(kConv2d,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kConv2dBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kConv2dBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(
kFusedConv2dBiasActivation,
wrap(&OpLevelCostEstimator::PredictFusedConv2DBiasActivation));
device_cost_impl_.emplace(kDepthwiseConv2dNative,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(kMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kSparseMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(
kSparseTensorDenseMatMul,
wrap(&OpLevelCostEstimator::PredictSparseTensorDenseMatMul));
device_cost_impl_.emplace(kBatchMatMul,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kBatchMatMulV2,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kQuantizedMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kQuantizedMatMulV2,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kXlaEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kNoOp, wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGuaranteeConst,
wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGather,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherNd,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherV2,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kScatterAdd,
wrap(&OpLevelCostEstimator | #include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <unordered_set>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
using ::testing::ElementsAreArray;
namespace {
class TestOpLevelCostEstimator : public OpLevelCostEstimator {
public:
TestOpLevelCostEstimator() {
compute_memory_overlap_ = true;
device_info_ = DeviceInfo();
}
~TestOpLevelCostEstimator() override {}
void SetDeviceInfo(const DeviceInfo& device_info) {
device_info_ = device_info;
}
void SetComputeMemoryOverlap(bool value) { compute_memory_overlap_ = value; }
protected:
DeviceInfo GetDeviceInfo(const DeviceProperties& device) const override {
return device_info_;
}
DeviceInfo device_info_;
};
void ExpectZeroCost(const Costs& cost) {
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.compute_time, Costs::Duration::zero());
EXPECT_EQ(cost.execution_time, Costs::Duration::zero());
EXPECT_EQ(cost.memory_time, Costs::Duration::zero());
}
void DescribeMatrix(int rows, int columns, OpInfo* op_info) {
auto input = op_info->add_inputs();
auto shape = input->mutable_shape();
auto shape_rows = shape->add_dim();
shape_rows->set_size(rows);
auto shape_columns = shape->add_dim();
shape_columns->set_size(columns);
input->set_dtype(DT_FLOAT);
}
void SetCpuDevice(OpInfo* op_info) {
auto device = op_info->mutable_device();
device->set_type("CPU");
device->set_num_cores(10);
device->set_bandwidth(10000000);
device->set_frequency(1000);
}
OpContext DescribeMatMul(int m, int n, int l, int k) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("MatMul");
DescribeMatrix(m, l, &op_context.op_info);
DescribeMatrix(k, n, &op_context.op_info);
return op_context;
}
void DescribeArbitraryRankInput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto input = op_info->add_inputs();
input->set_dtype(dtype);
auto shape = input->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
void DescribeArbitraryRankOutput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto output = op_info->add_outputs();
output->set_dtype(dtype);
auto shape = output->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
OpContext DescribeSparseTensorDenseMatMul(const int nnz_a,
const std::vector<int>& dims_b,
const std::vector<int>& dims_out) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("SparseTensorDenseMatMul");
DescribeArbitraryRankInput({nnz_a, 2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({nnz_a}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput(dims_out, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeXlaEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("XlaEinsum");
AttrValue equation_attribute;
equation_attribute.set_s(equation);
(*op_context.op_info.mutable_attr())["equation"] = equation_attribute;
if (!dims_a.empty())
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
if (!dims_b.empty())
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context = DescribeXlaEinsum(dims_a, dims_b, equation);
op_context.op_info.set_op("Einsum");
return op_context;
}
void DescribeDummyTensor(OpInfo::TensorProperties* tensor) {
}
void DescribeTensor1D(int dim0, OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor4D(int dim0, int dim1, int dim2, int dim3,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor5D(int dim0, int dim1, int dim2, int dim3, int dim4,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
shape->add_dim()->set_size(dim4);
tensor->set_dtype(DT_FLOAT);
}
OpContext DescribeConvolution(int batch, int ix, int iy, int iz1, int iz2,
int kx, int ky, int oz) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Conv2D");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeDepthwiseConv2dNative(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int cm) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("DepthwiseConv2dNative");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, cm, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeFusedConv2DBiasActivation(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int ox,
int oy, int oz, bool has_side_input,
const string& data_format,
const string& filter_format) {
const int kVecWidth = 4;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("FusedConv2DBiasActivation");
auto* attr_data_format = op_context.op_info.mutable_attr();
SetAttrValue(data_format, &(*attr_data_format)["data_format"]);
auto* attr_filter_format = op_context.op_info.mutable_attr();
SetAttrValue(filter_format, &(*attr_filter_format)["filter_format"]);
if (data_format == "NHWC") {
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, iz1, ix, iy, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(iz1 % kVecWidth, 0);
DescribeTensor5D(batch, iz1 / kVecWidth, ix, iy, kVecWidth,
op_context.op_info.add_inputs());
}
if (filter_format == "HWIO") {
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
} else if (filter_format == "OIHW") {
DescribeTensor4D(oz, iz2, kx, ky, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(filter_format, "OIHW_VECT_I");
EXPECT_EQ(iz2 % kVecWidth, 0);
DescribeTensor5D(oz, iz2 / kVecWidth, kx, ky, kVecWidth,
op_context.op_info.add_inputs());
}
DescribeTensor1D(oz, op_context.op_info.add_inputs());
auto side_input = op_context.op_info.add_inputs();
if (has_side_input) {
if (data_format == "NHWC") {
DescribeTensor4D(batch, ox, oy, oz, side_input);
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, oz, ox, oy, side_input);
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(oz % kVecWidth, 0);
DescribeTensor5D(batch, oz / kVecWidth, ox, oy, kVecWidth, side_input);
}
}
DescribeTensor1D(1, op_context.op_info.add_inputs());
DescribeTensor1D(1, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeUnaryOp(const string& op, int size1) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBinaryOp(const string& op, int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBiasAdd(int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("BiasAdd");
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_inputs());
DescribeTensor1D(size1, op_context.op_info.add_inputs());
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_outputs());
return op_context;
}
int GetOutputSize(const int x, const int k, const int s,
const string& padding) {
if (padding == "SAME") {
return (x + s - 1) / s;
} else {
return (x - k + s) / s;
}
}
std::vector<int> GetPoolingOutputSize(const std::vector<int>& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format,
const string& padding) {
int h_index = 1;
int w_index = 2;
int c_index = 3;
if (data_format == "NCHW") {
h_index = 2;
w_index = 3;
c_index = 1;
}
int n = input[0];
int h = input[h_index];
int w = input[w_index];
int c = input[c_index];
int sx = strides[h_index];
int sy = strides[w_index];
int kx = ksize[h_index];
int ky = ksize[w_index];
int ho = GetOutputSize(h, kx, sx, padding);
int wo = GetOutputSize(w, ky, sy, padding);
std::vector<int> output;
if (data_format == "NHWC") {
output = {n, ho, wo, c};
} else {
output = {n, c, ho, wo};
}
return output;
}
void GetTensorProto(const DataType dtype, const std::vector<int64_t>& shape,
const std::vector<int64_t> values,
const bool tensor_content, TensorProto* tensor_proto) {
tensor_proto->Clear();
TensorProto temp_tensor_proto;
temp_tensor_proto.set_dtype(dtype);
for (const auto& x : shape) {
temp_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(x);
}
for (const auto& x : values) {
if (dtype == DT_INT64) {
temp_tensor_proto.add_int64_val(x);
} else if (dtype == DT_INT32 || dtype == DT_INT16 || dtype == DT_INT8 ||
dtype == DT_UINT8) {
temp_tensor_proto.add_int_val(x);
} else if (dtype == DT_UINT32) {
temp_tensor_proto.add_uint32_val(x);
} else if (dtype == DT_UINT64) {
temp_tensor_proto.add_uint64_val(x);
} else {
CHECK(false) << "Unsupported dtype: " << dtype;
}
}
Tensor tensor(dtype);
CHECK(tensor.FromProto(temp_tensor_proto));
if (tensor_content) {
tensor.AsProtoTensorContent(tensor_proto);
} else {
tensor.AsProtoField(tensor_proto);
}
}
OpContext DescribePoolingOp(const string& op_name, const std::vector<int>& x,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format, const string& padding) {
OpContext op_context;
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op(op_name);
const std::vector<int> y =
GetPoolingOutputSize(x, ksize, strides, data_format, padding);
if (op_name == "AvgPool" || op_name == "MaxPool") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_outputs());
} else if (op_name == "AvgPoolGrad") {
DescribeArbitraryRankInput({4}, DT_INT32, &op_info);
auto* tensor_proto = op_info.mutable_inputs(0)->mutable_value();
GetTensorProto(DT_INT32, {4}, {x[0], x[1], x[2], x[3]},
false, tensor_proto);
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
} else if (op_name == "MaxPoolGrad") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
}
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
return op_context;
}
OpContext DescribeFusedBatchNorm(const bool is_training, const bool is_grad,
const std::vector<int>& x,
const string& data_format) {
OpContext op_context = DescribePoolingOp("MaxPool", x, {1, 1, 1, 1},
{1, 1, 1, 1}, data_format, "SAME");
auto& op_info = op_context.op_info;
if (is_grad) {
op_info.set_op("FusedBatchNormGrad");
} else {
op_info.set_op("FusedBatchNorm");
}
if (is_grad) {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
}
int num_1d_inputs = is_grad ? 3 : 4;
for (int i = 0; i < num_1d_inputs; i++) {
auto* tensor = op_info.add_inputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
for (int i = 0; i < 4; i++) {
auto* tensor = op_info.add_outputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
auto* attr = op_context.op_info.mutable_attr();
attr->erase("ksize");
attr->erase("strides");
attr->erase("padding");
SetAttrValue(is_training, &(*attr)["is_training"]);
return op_context;
}
}
class OpLevelCostEstimatorTest : public ::testing::Test {
protected:
using BatchMatMulDimensions = OpLevelCostEstimator::BatchMatMulDimensions;
Costs PredictCosts(const OpContext& op_context) const {
return estimator_.PredictCosts(op_context);
}
int64_t CountMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, batch_mat_mul,
found_unknown_shapes);
}
void SetComputeMemoryOverlap(bool value) {
estimator_.compute_memory_overlap_ = value;
}
void ValidateOpDimensionsFromInputs(const int n, const int h, const int w,
const int c, const int kx, const int ky,
const int sx, const int sy,
const string& data_format,
const string& padding) {
OpContext op_context;
int ho;
int wo;
if (data_format == "NHWC") {
op_context = DescribePoolingOp("MaxPool", {n, h, w, c}, {1, kx, ky, 1},
{1, sx, sy, 1}, "NHWC", padding);
ho = op_context.op_info.outputs(0).shape().dim(1).size();
wo = op_context.op_info.outputs(0).shape().dim(2).size();
} else {
op_context = DescribePoolingOp("MaxPool", {n, c, h, w}, {1, 1, kx, ky},
{1, 1, sx, sy}, "NCHW", padding);
ho = op_context.op_info.outputs(0).shape().dim(2).size();
wo = op_context.op_info.outputs(0).shape().dim(3).size();
}
bool found_unknown_shapes;
TF_ASSERT_OK_AND_ASSIGN(
auto dims, OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes));
Padding padding_enum;
if (padding == "VALID") {
padding_enum = Padding::VALID;
} else {
padding_enum = Padding::SAME;
}
EXPECT_EQ(n, dims.batch);
EXPECT_EQ(h, dims.ix);
EXPECT_EQ(w, dims.iy);
EXPECT_EQ(c, dims.iz);
EXPECT_EQ(kx, dims.kx);
EXPECT_EQ(ky, dims.ky);
EXPECT_EQ(sx, dims.sx);
EXPECT_EQ(sy, dims.sy);
EXPECT_EQ(ho, dims.ox);
EXPECT_EQ(wo, dims.oy);
EXPECT_EQ(c, dims.oz);
EXPECT_EQ(padding_enum, dims.padding);
}
absl::StatusOr<OpLevelCostEstimator::ConvolutionDimensions>
CallOpDimensionsFromInputs(const int n, const int h, const int w, const int c,
const int kx, const int ky, const int sx,
const int sy, const string& data_format,
const string& padding) {
OpContext op_context;
const std::vector<int> x = {n, h, w, c};
const std::vector<int> ksize = {1, kx, ky, 1};
std::vector<int> strides;
if (data_format == "NHWC") {
strides = {1, sy, sx, 1};
} else {
strides = {1, 1, sy, sx};
}
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op("MaxPool");
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
bool found_unknown_shapes;
return OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes);
}
OpLevelCostEstimator estimator_;
};
class OpLevelBatchMatMulCostEstimatorTest
: public OpLevelCostEstimatorTest,
public ::testing::WithParamInterface<const char*> {
protected:
OpContext DescribeBatchMatMul(const std::vector<int>& dims_a,
const std::vector<int>& dims_b) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(GetParam());
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulDimProduct(const OpInfo& op_info,
bool* found_unknown_shapes) const {
BatchMatMulDimensions batch_mat_mul;
batch_mat_mul.matmul_dims.n = 0;
batch_mat_mul.matmul_dims.m = 0;
batch_mat_mul.matmul_dims.k = 0;
OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, &batch_mat_mul, found_unknown_shapes);
int dimension_product = 1;
for (auto dim : batch_mat_mul.batch_dims) dimension_product *= dim;
dimension_product *= batch_mat_mul.matmul_dims.n;
dimension_product *= batch_mat_mul.matmul_dims.m;
dimension_product *= batch_mat_mul.matmul_dims.k;
return dimension_product;
}
};
TEST_F(OpLevelCostEstimatorTest, TestPersistentOpCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
std::unordered_set<string> persistent_ops = {
"Const", "Variable", "VariableV2", "AutoReloadVariable",
"VarHandleOp", "ReadVariableOp",
};
for (const auto& op : persistent_ops) {
op_context.op_info.set_op(op);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(1), cost.compute_time);
EXPECT_EQ(Costs::Duration(1), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCosts) {
std::vector<std::string> gather_ops = {"Gather", "GatherNd", "GatherV2"};
for (const auto& op : gather_ops) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({16, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(130), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(146), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCostsWithoutOutput) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Gather");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(0), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Slice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestStridedSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("StridedSlice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestScatterOps) {
std::vector<string> scatter_ops = {"ScatterAdd", "ScatterDiv", "ScatterMax",
"ScatterMin", "ScatterMul", "ScatterSub",
"ScatterUpdate"};
for (const auto& op : scatter_ops) {
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({16, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(205), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(221), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT32, &op_context.op_info);
DescribeArbitraryRankInput({}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(135), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(151), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
}
}
TEST_F(OpLevelCostEstimatorTest, BiasAddExecutionTime) {
auto cost = PredictCosts(DescribeBiasAdd(1000, 10));
EXPECT_EQ(Costs::Duration(8400), cost.memory_time);
EXPECT_EQ(Costs::Duration(1000), cost.compute_time);
EXPECT_EQ(Costs::Duration(9400), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, Conv2DExecutionTime) {
auto cost = PredictCosts(DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(233780), cost.memory_time);
EXPECT_EQ(Costs::Duration(354877440), cost.compute_time);
EXPECT_EQ(Costs::Duration(355111220), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, InvalidConv2DConfig) { |
1,368 | cpp | tensorflow/tensorflow | virtual_scheduler | tensorflow/core/grappler/costs/virtual_scheduler.cc | tensorflow/core/grappler/costs/virtual_scheduler_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_VIRTUAL_SCHEDULER_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_VIRTUAL_SCHEDULER_H_
#include <functional>
#include <list>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_context.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
namespace grappler {
ABSL_CONST_INIT extern const char kAttrInputSrc[];
ABSL_CONST_INIT extern const char kAttrSrcDevice[];
ABSL_CONST_INIT extern const char kAttrDstDevice[];
ABSL_CONST_INIT extern const char kAttrTensorName[];
ABSL_CONST_INIT extern const char kChannelDevice[];
ABSL_CONST_INIT extern const char kStreaming[];
struct NodeState {
std::vector<std::pair<const NodeDef*, int>> inputs;
std::unordered_map<int, std::vector<const NodeDef*>> outputs;
std::vector<OpInfo::TensorProperties> input_properties;
std::vector<OpInfo::TensorProperties> output_properties;
string device_name;
int num_inputs_ready;
std::unordered_map<int, int> num_outputs_executed;
Costs::Duration time_ready;
Costs::Duration time_scheduled;
Costs::Duration time_finished;
std::unordered_map<int, Costs::Duration> time_no_references;
Costs node_costs;
Costs TotalNodeCosts() const {
return MultiplyCosts(node_costs, execution_count);
}
int execution_count;
bool shape_incompatible;
NodeState() {
num_inputs_ready = 0;
time_ready = Costs::Duration::max();
time_scheduled = Costs::Duration::max();
time_finished = Costs::Duration::max();
execution_count = 0;
shape_incompatible = false;
}
};
struct DeviceState {
std::vector<const NodeDef*> nodes_executed;
struct NodePairHash {
public:
const std::size_t operator()(
const std::pair<const NodeDef*, int>& element) const {
return std::hash<const NodeDef*>()(element.first);
}
};
std::unordered_set<std::pair<const NodeDef*, int>, NodePairHash>
nodes_in_memory;
std::unordered_set<std::pair<const NodeDef*, int>, NodePairHash>
persistent_nodes;
std::unordered_set<std::pair<const NodeDef*, int>, NodePairHash>
mem_usage_snapshot_at_peak;
std::vector<std::pair<std::string, int64_t>> temporary_memory_usage_trace;
Costs device_costs;
std::map<string, Costs> op_to_cost;
int64_t memory_usage;
int64_t max_memory_usage;
struct ShapeAnnotationStats {
int64_t num_ops_annotated = 0;
int64_t num_ops_executed_more_than_once = 0;
int64_t num_ops_executed = 0;
int64_t num_ops_with_dynamic_shapes = 0;
int64_t num_ops_with_incompatible_shapes = 0;
} shape_annotation_stats;
DeviceState() {
device_costs = Costs::ZeroCosts();
device_costs.num_ops_total = 0;
memory_usage = 0;
max_memory_usage = 0;
}
Costs::Duration GetCurrTime() const { return device_costs.execution_time; }
};
class ReadyNodeManager {
public:
ReadyNodeManager() {}
virtual ~ReadyNodeManager() {}
virtual Status Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
return absl::OkStatus();
}
virtual void AddNode(const NodeDef* node) = 0;
virtual const NodeDef* GetCurrNode() = 0;
virtual void RemoveCurrNode() = 0;
virtual bool Empty() const = 0;
};
class FIFOManager : public ReadyNodeManager {
public:
FIFOManager() : ReadyNodeManager() {}
~FIFOManager() override {}
void AddNode(const NodeDef* node) override { nodes_.push_back(node); }
const NodeDef* GetCurrNode() override {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
return nodes_.front();
}
void RemoveCurrNode() override { nodes_.pop_front(); }
bool Empty() const override { return nodes_.empty(); }
private:
std::list<const NodeDef*> nodes_;
};
class LIFOManager : public ReadyNodeManager {
public:
LIFOManager() : ReadyNodeManager() {}
~LIFOManager() override {}
void AddNode(const NodeDef* node) override;
const NodeDef* GetCurrNode() override;
void RemoveCurrNode() override;
bool Empty() const override { return nodes_.empty(); }
private:
std::list<const NodeDef*> nodes_;
std::list<const NodeDef*>::iterator curr_pos_ = nodes_.end();
};
class HeapReadyManager : public ReadyNodeManager {
public:
HeapReadyManager();
Status Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) override;
~HeapReadyManager() override {}
void AddNode(const NodeDef* node) override;
const NodeDef* GetCurrNode() override;
void RemoveCurrNode() override;
bool Empty() const override;
protected:
virtual std::function<bool(const NodeDef*, const NodeDef*)> Greater() = 0;
std::vector<const NodeDef*> nodes_;
std::function<bool(const NodeDef*, const NodeDef*)> greater_;
const std::unordered_map<const NodeDef*, NodeState>* node_map_;
const NodeDef* curr_node_;
};
class FirstReadyManager : public HeapReadyManager {
public:
FirstReadyManager() : HeapReadyManager() {}
~FirstReadyManager() override {}
protected:
std::function<bool(const NodeDef*, const NodeDef*)> Greater() override;
};
class PriorityReadyManager : public HeapReadyManager {
public:
PriorityReadyManager() : HeapReadyManager() {}
~PriorityReadyManager() override {}
void AddNode(const NodeDef* node) override;
Status SetPriority(const std::unordered_map<string, int>& node_priority);
protected:
std::function<bool(const NodeDef*, const NodeDef*)> Greater() override;
private:
std::unordered_map<string, int> node_priority_;
};
class CompositeNodeManager : public ReadyNodeManager {
public:
CompositeNodeManager();
~CompositeNodeManager() override {}
Status Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) override;
void AddNode(const NodeDef* node) override;
const NodeDef* GetCurrNode() override;
void RemoveCurrNode() override;
bool Empty() const override;
private:
std::unordered_map<string, LIFOManager> ops_lifo_map_;
FirstReadyManager send_manager_;
FirstReadyManager recv_manager_;
const std::unordered_map<const NodeDef*, NodeState>* node_map_;
const NodeDef* curr_node_;
};
std::unique_ptr<ReadyNodeManager> ReadyNodeManagerFactory(
const string& ready_node_manager);
class SchedulerState {
public:
SchedulerState(const bool use_static_shapes,
const bool use_aggressive_shape_inference, Cluster* cluster,
std::unique_ptr<VirtualPlacer> placer);
SchedulerState(SchedulerState&& arg) = default;
SchedulerState& operator=(SchedulerState&& arg) = delete;
SchedulerState(const SchedulerState&) = delete;
SchedulerState& operator=(const SchedulerState&) = delete;
virtual ~SchedulerState();
Status Init(const GrapplerItem* item,
std::vector<const NodeDef*>* initial_nodes,
bool create_explicit_channel_device = true);
virtual Costs Summary() const;
virtual Costs Summary(RunMetadata* metadata);
void GenerateRunMetadata(RunMetadata* metadata);
const std::unordered_map<string, int64_t> GetPeakMemoryUsage() const;
const std::unordered_map<string, int64_t> GetPersistentMemoryUsage() const;
void enable_mem_usage_tracking() { track_mem_usage_snapshot_ = true; }
const std::unordered_map<string, DeviceState>* GetDeviceStates() const {
return &device_;
}
const std::unordered_map<const NodeDef*, NodeState>* GetNodeStates() const {
return &node_map_;
}
virtual OpContext CreateOpContext(const NodeDef* node) const;
std::vector<const NodeDef*> MarkNodeExecuted(
const NodeDef* node, const Costs& node_costs, const OpContext& op_context,
bool extract_execution_count_attr = true,
const std::string& override_device_name = "");
const GrapplerItem* GetGrapplerItem() { return grappler_item_; }
Costs GetGraphCost() { return graph_costs_; }
Cluster* GetCluster() { return cluster_; }
bool GetUseStaticShape() { return use_static_shapes_; }
bool GetUseAggressiveShapeInference() {
return use_aggressive_shape_inference_;
}
const std::unordered_map<const NodeDef*, NodeState>& GetNodeMap() {
return node_map_;
}
protected:
void SetNodeStateTimeScheduled(const NodeDef* node);
std::unordered_map<string, DeviceState>* GetMutableDeviceState() {
return &device_;
}
private:
void MaybeUpdateInputOutput(const NodeDef* node);
NodeState& GetNodeStateOrCreateIt(const NodeDef* node);
std::pair<const NodeDef*, const NodeDef*> CreateSendRecv(
const NodeDef* from, const NodeDef* to, const NodeDef* input_node,
const string& input_name, bool create_channel_device);
string DeviceName(const NodeDef* node) const;
string SanitizedDeviceName(const NodeDef* node) const;
string ChannelDeviceName(const NodeDef* from, const NodeDef* to) const;
void GetOutputNodes(const NodeDef* node, const Costs::Duration& curr_time,
std::vector<const NodeDef*>* output_nodes);
int64_t GetOrCalculateOutputSize(const NodeState& node_state,
int port_num) const;
std::unordered_map<const NodeDef*, NodeState> node_map_;
std::unordered_map<string, DeviceState> device_;
std::vector<std::unique_ptr<NodeDef>> additional_nodes_;
std::map<string, int> op_counts_;
std::map<string, std::pair<int, bool>> op_costs_;
Costs graph_costs_;
std::map<string, Costs> op_to_cost_;
std::unique_ptr<GraphProperties> graph_properties_;
Cluster* cluster_;
const GrapplerItem* grappler_item_;
bool use_static_shapes_;
bool initialized_;
bool track_mem_usage_snapshot_;
const bool use_aggressive_shape_inference_;
std::unique_ptr<VirtualPlacer> placer_;
};
class VirtualScheduler {
public:
VirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference, Cluster* cluster,
ReadyNodeManager* ready_nodes,
std::unique_ptr<VirtualPlacer> placer);
VirtualScheduler(ReadyNodeManager* ready_nodes,
std::unique_ptr<SchedulerState> scheduler_state);
virtual ~VirtualScheduler();
virtual Status Init(const GrapplerItem* item);
virtual OpContext GetCurrNode();
virtual bool MarkCurrNodeExecuted(const Costs& node_costs);
Costs Summary() const { return scheduler_state_->Summary(); }
Costs Summary(RunMetadata* metadata) {
return scheduler_state_->Summary(metadata);
}
void GenerateRunMetadata(RunMetadata* metadata) {
scheduler_state_->GenerateRunMetadata(metadata);
}
const std::unordered_map<string, int64_t> GetPeakMemoryUsage() const {
return scheduler_state_->GetPeakMemoryUsage();
}
const std::unordered_map<string, int64_t> GetPersistentMemoryUsage() const {
return scheduler_state_->GetPersistentMemoryUsage();
}
const std::unordered_map<string, DeviceState>* GetDeviceStates() const {
return scheduler_state_->GetDeviceStates();
}
const std::unordered_map<const NodeDef*, NodeState>* GetNodeStates() const {
return scheduler_state_->GetNodeStates();
}
void enable_mem_usage_tracking() {
scheduler_state_->enable_mem_usage_tracking();
}
protected:
std::unique_ptr<SchedulerState> scheduler_state_;
ReadyNodeManager* ready_nodes_;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
const char kAttrInputSrc[] = "input_source_";
const char kAttrSrcDevice[] = "send_device";
const char kAttrDstDevice[] = "recv_device";
const char kAttrTensorName[] = "tensor_name";
const char kChannelDevice[] = "Channel";
const char kStreaming[] = "_streaming";
namespace {
using ::tensorflow::strings::HumanReadableNumBytes;
float Round2(const float x) {
return ::round(100.0 * x) / 100.0;
}
Costs& FindOrCreateZero(const string& op_name,
std::map<string, Costs>* op_cost) {
auto it = op_cost->find(op_name);
if (it == op_cost->end()) {
it = op_cost->emplace(op_name, Costs::ZeroCosts()).first;
}
return it->second;
}
struct RecvNodeDescriptor {
const NodeDef* node;
const int port_num;
const string device;
RecvNodeDescriptor(const NodeDef* node_, const int port_num_,
const string& device_)
: node(node_), port_num(port_num_), device(device_) {}
};
struct RecvNodeDescriptorHash {
std::size_t operator()(const RecvNodeDescriptor& recv_node) const {
return std::hash<const NodeDef*>()(recv_node.node) ^
std::hash<int>()(recv_node.port_num) ^
std::hash<string>()(recv_node.device);
}
};
struct RecvNodeDescriptorEqual {
bool operator()(const RecvNodeDescriptor& a,
const RecvNodeDescriptor& b) const {
return a.node == b.node && a.port_num == b.port_num && a.device == b.device;
}
};
void UpdateDeviceAnnotationState(const NodeDef* node,
const NodeState& node_state,
DeviceState* device) {
if (node->attr().count(kOutputShapes) == 0) return;
int64_t execution_count = node->attr().count(kExecutionCount) == 0
? 1
: node->attr().at(kExecutionCount).i();
auto& shape_annotation_stats = device->shape_annotation_stats;
shape_annotation_stats.num_ops_annotated += 1;
shape_annotation_stats.num_ops_executed += execution_count;
shape_annotation_stats.num_ops_executed_more_than_once +=
execution_count > 1 ? 1 : 0;
shape_annotation_stats.num_ops_with_incompatible_shapes +=
node_state.shape_incompatible ? 1 : 0;
shape_annotation_stats.num_ops_with_dynamic_shapes +=
(execution_count > 1 && node->attr().count(kOutputSame) == 0) ? 1 : 0;
}
bool IsStreamingPort(const NodeDef& node, const int port) {
if (!node.attr().contains(kStreaming)) return false;
auto& attr_list = node.attr().at(kStreaming).list();
bool is_streaming_port = false;
if (port >= 0 && port < attr_list.b().size()) {
is_streaming_port = attr_list.b(port);
}
return is_streaming_port;
}
}
void LIFOManager::AddNode(const NodeDef* node) {
if (IsMerge(*node)) {
nodes_.push_front(node);
} else {
nodes_.push_back(node);
}
}
const NodeDef* LIFOManager::GetCurrNode() {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
if (curr_pos_ == nodes_.end()) {
curr_pos_ = --(nodes_.rbegin().base());
}
return *curr_pos_;
}
void LIFOManager::RemoveCurrNode() {
GetCurrNode();
nodes_.erase(curr_pos_);
curr_pos_ = nodes_.end();
}
HeapReadyManager::HeapReadyManager() : ReadyNodeManager() {
std::make_heap(nodes_.begin(), nodes_.end());
}
Status HeapReadyManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
nodes_.clear();
curr_node_ = nullptr;
greater_ = Greater();
return absl::OkStatus();
}
void HeapReadyManager::AddNode(const NodeDef* node) {
nodes_.push_back(node);
std::push_heap(nodes_.begin(), nodes_.end(), greater_);
}
const NodeDef* HeapReadyManager::GetCurrNode() {
if (curr_node_) return curr_node_;
if (nodes_.empty()) {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
}
const std::string node_name = nodes_.front()->name();
curr_node_ = nodes_.front();
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
return curr_node_;
}
void HeapReadyManager::RemoveCurrNode() {
if (curr_node_) {
curr_node_ = nullptr;
} else {
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
}
}
bool HeapReadyManager::Empty() const {
return nodes_.empty() && curr_node_ == nullptr;
}
bool FirstReadyCmp(
const std::unordered_map<const NodeDef*, NodeState>* node_map,
const NodeDef* a, const NodeDef* b) {
if (node_map->at(a).time_ready == node_map->at(b).time_ready) {
return a->name().compare(b->name()) > 0;
} else {
return node_map->at(a).time_ready > node_map->at(b).time_ready;
}
}
std::function<bool(const NodeDef*, const NodeDef*)>
FirstReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
return FirstReadyCmp(node_map_, a, b);
};
return greater;
}
std::function<bool(const NodeDef*, const NodeDef*)>
PriorityReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
auto pri_a = node_priority_.at(a->name());
auto pri_b = node_priority_.at(b->name());
if (pri_a == pri_b) {
return FirstReadyCmp(node_map_, a, b);
}
return pri_a > pri_b;
};
return greater;
}
void PriorityReadyManager::AddNode(const NodeDef* node) {
if (node_priority_.count(node->name()) == 0) {
VLOG(3) << "Priority of node " << node->name() << " not found.";
node_priority_[node->name()] = 0;
}
HeapReadyManager::AddNode(node);
}
Status PriorityReadyManager::SetPriority(
const std::unordered_map<string, int>& node_priority) {
node_priority_ = node_priority;
return absl::OkStatus();
}
CompositeNodeManager::CompositeNodeManager()
: ReadyNodeManager(), send_manager_(), recv_manager_() {}
Status CompositeNodeManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
TF_RETURN_IF_ERROR(send_manager_.Init(node_map));
TF_RETURN_IF_ERROR(recv_manager_.Init(node_map));
curr_node_ = nullptr;
return absl::OkStatus();
}
void CompositeNodeManager::AddNode(const NodeDef* node) {
if (IsSend(*node)) {
send_manager_.AddNode(node);
} else if (IsRecv(*node)) {
recv_manager_.AddNode(node);
} else {
const auto | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kCPU0[] = "/job:localhost/replica:0/task:0/cpu:0";
constexpr char kCPU1[] = "/job:localhost/replica:0/task:0/cpu:1";
constexpr char kChannelFrom0To1[] = "Channel from CPU0 to CPU1";
constexpr char kChannelFrom1To0[] = "Channel from CPU1 to CPU0";
constexpr char kConv2D[] = "Conv2D";
constexpr char kSend[] = "_Send";
constexpr char kRecv[] = "_Recv";
class ReadyNodeManagerTest : public ::testing::Test {
protected:
ReadyNodeManagerTest() {
NodeSetUp("Node1", kConv2D, kCPU0, 6000, &node1_);
NodeSetUp("Node2", kConv2D, kCPU0, 5000, &node2_);
NodeSetUp("Node3", kConv2D, kCPU0, 4000, &node3_);
NodeSetUp("Node4", kConv2D, kCPU0, 3000, &node4_);
NodeSetUp("Node5", kConv2D, kCPU0, 2000, &node5_);
NodeSetUp("Node6", kConv2D, kCPU0, 1000, &node6_);
}
void NodeSetUp(const string& name, const string& op_name,
const string& device_name, const uint64 time_ready,
NodeDef* node) {
node->set_name(name);
node->set_op(op_name);
node->set_device(device_name);
node_states_[node] = NodeState();
node_states_[node].time_ready = time_ready;
node_states_[node].device_name = device_name;
}
NodeDef node1_, node2_, node3_, node4_, node5_, node6_;
std::unordered_map<const NodeDef*, NodeState> node_states_;
};
TEST_F(ReadyNodeManagerTest, GetSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MergeOrderInLIFOManager) {
LIFOManager manager = LIFOManager();
node3_.set_op("Merge");
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetCurrNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ("Node6", manager.GetCurrNode()->name());
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU0, 5, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 4, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 3, &node9);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInFirstReadyManager) {
FirstReadyManager manager1;
TF_EXPECT_OK(manager1.Init(&node_states_));
FirstReadyManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 1000, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 1000, &node10);
NodeSetUp("Node11", kConv2D, kCPU0, 1000, &node11);
NodeSetUp("Node12", kConv2D, kCPU0, 1000, &node12);
manager1.AddNode(&node7);
manager1.AddNode(&node8);
manager1.AddNode(&node9);
manager1.AddNode(&node10);
manager1.AddNode(&node11);
manager1.AddNode(&node12);
manager2.AddNode(&node8);
manager2.AddNode(&node11);
manager2.AddNode(&node9);
manager2.AddNode(&node10);
manager2.AddNode(&node7);
manager2.AddNode(&node12);
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager1.Empty());
EXPECT_TRUE(manager2.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultiplePriorityReadyManager) {
PriorityReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
std::unordered_map<string, int> node_priority = {
{"Node1", 1}, {"Node2", 2}, {"Node3", 2}, {"Node4", 4}, {"Node5", 5}};
TF_EXPECT_OK(manager.SetPriority(node_priority));
manager.AddNode(&node3_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node2_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MultiDeviceSendRecvCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU1, 1001, &node7);
NodeSetUp("Node8", kConv2D, kCPU1, 2001, &node8);
NodeSetUp("Node9", kConv2D, kCPU1, 3001, &node9);
NodeDef send1;
NodeDef send2;
NodeDef recv1;
NodeDef recv2;
NodeSetUp("Send1", kSend, kChannelFrom0To1, 2002, &send1);
NodeSetUp("Send2", kSend, kChannelFrom1To0, 2005, &send2);
NodeSetUp("Recv1", kRecv, kCPU0, 2003, &recv1);
NodeSetUp("Recv2", kRecv, kCPU1, 2004, &recv2);
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node6_);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
manager.AddNode(&send1);
manager.AddNode(&send2);
manager.AddNode(&recv1);
manager.AddNode(&recv2);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
CompositeNodeManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kSend, kCPU0, 1000, &node8);
NodeSetUp("Node9", kRecv, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 999, &node10);
NodeSetUp("Node11", kRecv, kCPU0, 999, &node11);
NodeSetUp("Node12", kConv2D, kCPU1, 1000, &node12);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node9);
manager.AddNode(&node8);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node8);
manager.AddNode(&node10);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node10");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node11);
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node11");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node7);
manager.AddNode(&node12);
manager2.AddNode(&node12);
manager2.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
class TestVirtualScheduler : public VirtualScheduler {
public:
TestVirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
ReadyNodeManager* ready_node_manager, Cluster* cluster)
: VirtualScheduler(
use_static_shapes, use_aggressive_shape_inference, cluster,
ready_node_manager,
std::make_unique<VirtualPlacer>(cluster->GetDevices())) {
enable_mem_usage_tracking();
}
FRIEND_TEST(VirtualSchedulerTest, MemoryUsage);
FRIEND_TEST(VirtualSchedulerTest, ControlDependency);
FRIEND_TEST(VirtualSchedulerTest, ComplexDependency);
FRIEND_TEST(VirtualSchedulerTest, Variable);
FRIEND_TEST(VirtualSchedulerTest, InterDeviceTransfer);
};
class VirtualSchedulerTest : public ::testing::Test {
protected:
VirtualSchedulerTest() {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device = GetDummyCPUDevice();
devices[kCPU0] = cpu_device;
devices[kCPU1] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
scheduler_ = std::make_unique<TestVirtualScheduler>(
true,
true, &first_ready_manager_,
cluster_.get());
}
DeviceProperties GetDummyCPUDevice() {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(4000);
cpu_device.set_num_cores(2);
cpu_device.set_bandwidth(2000000);
return cpu_device;
}
void CreateGrapplerItemWithConv2Ds() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::RandomUniform(
s.WithOpName("y"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::RandomUniform(
s.WithOpName("f"), {kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto c0 = ops::Conv2D(s.WithOpName("c0"), x, f, strides, "SAME");
auto c1 = ops::Conv2D(s.WithOpName("c1"), y, f, strides, "SAME");
auto c2 = ops::Conv2D(s.WithOpName("c2"), z, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_graph";
grappler_item_->fetch = {"c0", "c1"};
dependency_["c0"] = {"x", "f"};
dependency_["c1"] = {"y", "f"};
}
void CreateGrapplerItemWithConv2DAndVariable() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::Variable(s.WithOpName("f"),
{kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto y = ops::Conv2D(s.WithOpName("y"), x, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_var_graph";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"x", "f"};
}
void CreateGrapplerItemWithMatmulChain() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto a = ops::RandomUniform(s.WithOpName("a"), {3200, 3200}, DT_FLOAT);
auto b = ops::RandomUniform(s.WithOpName("b").WithControlDependencies(a),
{3200, 3200}, DT_FLOAT);
auto c = ops::RandomUniform(s.WithOpName("c").WithControlDependencies(b),
{3200, 3200}, DT_FLOAT);
auto d = ops::RandomUniform(s.WithOpName("d").WithControlDependencies(c),
{3200, 3200}, DT_FLOAT);
auto e = ops::RandomUniform(s.WithOpName("e").WithControlDependencies(d),
{3200, 3200}, DT_FLOAT);
auto ab = ops::MatMul(s.WithOpName("ab").WithControlDependencies(e), a, b);
auto abc = ops::MatMul(s.WithOpName("abc"), ab, c);
auto abcd = ops::MatMul(s.WithOpName("abcd"), abc, d);
auto abcde = ops::MatMul(s.WithOpName("abcde"), abcd, e);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_matmul_sequence_graph";
grappler_item_->fetch = {"abcde"};
dependency_["ab"] = {"a", "b"};
dependency_["abc"] = {"ab", "c"};
dependency_["abcd"] = {"abc", "d"};
dependency_["abcde"] = {"abcd", "e"};
}
void CreateGrapplerItemWithAddN() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(s.WithOpName("x"), {10, 10, 10, 10}, DT_FLOAT);
auto y = ops::RandomUniform(s.WithOpName("y"), {10, 10, 10, 10}, DT_FLOAT);
auto z = ops::RandomUniform(s.WithOpName("z"), {10, 10, 10, 10}, DT_FLOAT);
auto w = ops::RandomUniform(s.WithOpName("w"), {10, 10, 10, 10}, DT_FLOAT);
OutputList input_tensors = {x, y, z, w};
auto add = ops::AddN(s.WithOpName("add"), input_tensors);
auto out = ops::Identity(s.WithOpName("out"), add);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_addn_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = {"x", "y", "z", "w", "add"};
}
void CreateGrapplerItemWithUnnecessaryPlaceholderNodes() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto unnecessary = ops::Placeholder(s.WithOpName("unnecessary"), DT_FLOAT);
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_extra_placeholders";
grappler_item_->fetch = {"x"};
grappler_item_->feed = {{"x", Tensor()}, {"unnecessary", Tensor()}};
}
void CreateGrapplerItemWithControlDependency() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
std::vector<string> input_noop_names = {"x", "y", "z", "w", "u", "v", "t"};
std::vector<Operation> input_tensors;
for (const auto& input : input_noop_names) {
auto x = ops::NoOp(s.WithOpName(input));
input_tensors.push_back(x.operation);
}
auto out =
ops::NoOp(s.WithControlDependencies(input_tensors).WithOpName("out"));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_control_dependency_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = input_noop_names;
}
void CreateGrapplerItemWithAddFromOneTensor() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = tensorflow::ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = tensorflow::ops::Add(s.WithOpName("y"), x, x);
Output fetch = ops::Identity(s.WithOpName("fetch"), y);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_from_one_tensor";
grappler_item_->fetch = {"fetch"};
dependency_["fetch"] = {"y"};
dependency_["y"] = {"x"};
}
void CreateGrapplerItemWithSwitchMergeInput() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto pred = ops::Const(s.WithOpName("pred"), false, {});
auto sw = ops::Switch(s.WithOpName("switch"), x, pred);
auto b = ops::RandomUniform(
s.WithOpName("b"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto a = ops::Add(s.WithOpName("a"), sw.output_true, b);
auto m = ops::Merge(s.WithOpName("m"), {sw.output_false, a.z});
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::Add(s.WithOpName("y"), m.output, z);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_merge_switch";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"m", "z"};
}
void CreateGrapplerItemWithBatchNorm() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto scale =
ops::Ran |
1,369 | cpp | tensorflow/tensorflow | graph_memory | tensorflow/core/grappler/costs/graph_memory.cc | tensorflow/core/grappler/costs/graph_memory_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_MEMORY_H_
#define TENSORFLOW_CORE_GRAPPLER_COSTS_GRAPH_MEMORY_H_
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
namespace grappler {
class GraphMemory {
public:
struct LiveTensor {
string node;
int output_id;
size_t memory_used;
Costs::Duration allocation_time;
Costs::Duration deallocation_time;
};
struct MemoryUsage {
int64_t used_memory;
std::vector<LiveTensor> live_tensors;
};
explicit GraphMemory(const GrapplerItem& item)
: item_(item), unknown_usage_({-1, {}}) {}
Status InferStatically(
const std::unordered_map<string, DeviceProperties>& devices);
Status InferDynamically(Cluster* cluster);
int64_t GetWorstCaseMemoryUsage() const;
const MemoryUsage& GetPeakMemoryUsage(const string& device) const {
auto it = peak_usage_.find(device);
if (it == peak_usage_.end()) {
return unknown_usage_;
}
return it->second;
}
private:
void InferMemUsageForNodes(const std::vector<const NodeDef*>& nodes,
GraphProperties* properties, int64_t* worst_case,
int64_t* best_case) const;
int64_t InferMemUsageForNeighbors(
const std::vector<OpInfo::TensorProperties>& props) const;
void InferFromTrace(const StepStats& timeline);
const GrapplerItem& item_;
std::unordered_map<string, int64_t> worst_case_memory_usage_;
std::unordered_map<string, MemoryUsage> peak_usage_;
const MemoryUsage unknown_usage_;
};
}
}
#endif
#include "tensorflow/core/grappler/costs/graph_memory.h"
#include <deque>
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
Status GraphMemory::InferStatically(
const std::unordered_map<string, DeviceProperties>& devices) {
VirtualCluster cluster(devices);
TF_RETURN_IF_ERROR(cluster.Provision());
TF_RETURN_IF_ERROR(cluster.Initialize(item_));
RunMetadata metadata;
Status s = cluster.Run(item_, &metadata);
if (!s.ok() && s.code() != error::RESOURCE_EXHAUSTED) {
return s;
}
InferFromTrace(metadata.step_stats());
return absl::OkStatus();
}
Status GraphMemory::InferDynamically(Cluster* cluster) {
if (!cluster->DetailedStatsEnabled()) {
return errors::Unavailable("Detailed stats collection must be enabled");
}
TF_RETURN_IF_ERROR(cluster->Initialize(item_));
RunMetadata metadata;
TF_RETURN_IF_ERROR(cluster->Run(item_, &metadata));
InferFromTrace(metadata.step_stats());
return absl::OkStatus();
}
int64_t GraphMemory::GetWorstCaseMemoryUsage() const {
int64_t worst_case = -1;
for (const auto& peak_usage : peak_usage_) {
worst_case = std::max(worst_case, peak_usage.second.used_memory);
}
return worst_case;
}
void GraphMemory::InferMemUsageForNodes(
const std::vector<const NodeDef*>& nodes, GraphProperties* properties,
int64_t* worst_case_memory_usage, int64_t* best_case_memory_usage) const {
*worst_case_memory_usage = 0;
*best_case_memory_usage = 0;
for (const auto& node : item_.graph.node()) {
std::vector<OpInfo::TensorProperties> outputs =
properties->GetOutputProperties(node.name());
int64_t node_memory_usage = InferMemUsageForNeighbors(outputs);
*worst_case_memory_usage += node_memory_usage;
std::vector<OpInfo::TensorProperties> inputs =
properties->GetInputProperties(node.name());
node_memory_usage += InferMemUsageForNeighbors(inputs);
*best_case_memory_usage =
std::max(*best_case_memory_usage, node_memory_usage);
}
}
int64_t GraphMemory::InferMemUsageForNeighbors(
const std::vector<OpInfo::TensorProperties>& props) const {
int64_t neighbors_memory_usage = 0;
for (const auto& prop : props) {
DataType dtype = prop.dtype();
int size = DataTypeSize(dtype);
TensorShapeProto shape = prop.shape();
if (shape.unknown_rank()) {
continue;
}
for (int i = 0; i < shape.dim_size(); ++i) {
if (shape.dim(i).size() < 0) {
shape.mutable_dim(i)->set_size(1);
}
}
int num_elems = TensorShape(shape).num_elements();
neighbors_memory_usage += num_elems * size;
}
return neighbors_memory_usage;
}
static GraphMemory::LiveTensor* FindOrCreateLiveTensor(
const string& node_name, int output_id,
std::unordered_map<string, GraphMemory::LiveTensor*>* live_tensors,
std::deque<GraphMemory::LiveTensor>* device_tensors) {
string name = strings::StrCat(node_name, ":", output_id);
GraphMemory::LiveTensor* live;
auto it = live_tensors->find(name);
if (it == live_tensors->end()) {
GraphMemory::LiveTensor temp;
temp.node = node_name;
temp.output_id = output_id;
temp.allocation_time = 0;
temp.deallocation_time = 0;
device_tensors->push_front(temp);
live = &device_tensors->front();
(*live_tensors)[name] = live;
} else {
live = it->second;
}
return live;
}
namespace {
struct Event {
Event(int64_t _timestamp, bool _allocated,
const GraphMemory::LiveTensor* _tensor)
: timestamp(_timestamp), allocated(_allocated), tensor(_tensor) {}
int64_t timestamp;
bool allocated;
const GraphMemory::LiveTensor* tensor;
bool operator<(const Event& other) const {
return timestamp < other.timestamp;
}
};
}
void GraphMemory::InferFromTrace(const StepStats& timeline) {
std::unordered_map<string, string> node_placement;
for (const auto& dev_stats : timeline.dev_stats()) {
for (const auto& node_stats : dev_stats.node_stats()) {
node_placement[node_stats.node_name()] = dev_stats.device();
}
}
std::unordered_map<string, LiveTensor*> live_tensors;
std::unordered_map<string, std::deque<LiveTensor>> live_tensors_per_device;
std::unordered_map<string, const NodeDef*> node_map;
for (const NodeDef& node : item_.graph.node()) {
node_map[node.name()] = &node;
}
for (const auto& dev_stats : timeline.dev_stats()) {
const string& device_name = dev_stats.device();
const bool is_gpu = (device_name.find("GPU:") || device_name.find("gpu:"));
std::deque<LiveTensor>& device_tensors =
live_tensors_per_device[dev_stats.device()];
for (const auto& node_stats : dev_stats.node_stats()) {
for (int i = 0; i < node_stats.output_size(); ++i) {
const auto& output = node_stats.output(i);
LiveTensor* live = FindOrCreateLiveTensor(
node_stats.node_name(), i, &live_tensors, &device_tensors);
live->memory_used = output.tensor_description()
.allocation_description()
.allocated_bytes();
live->allocation_time =
Costs::MicroSeconds(node_stats.all_start_micros());
live->deallocation_time = std::max<Costs::Duration>(
live->deallocation_time,
Costs::NanoSeconds(1) +
Costs::MicroSeconds(node_stats.all_start_micros() +
node_stats.op_end_rel_micros()));
}
auto it = node_map.find(node_stats.node_name());
if (it == node_map.end()) {
continue;
}
const NodeDef* node = it->second;
std::unordered_set<int> swapped_inputs;
if (is_gpu) {
auto it = node->attr().find("_swap_to_host");
if (it != node->attr().end()) {
const AttrValue& val = it->second;
for (int port_id : val.list().i()) {
swapped_inputs.insert(port_id);
}
}
}
for (int i = 0; i < node->input_size(); ++i) {
if (swapped_inputs.find(i) != swapped_inputs.end()) {
continue;
}
const string& input = node->input(i);
int position;
string input_node = ParseNodeName(input, &position);
if (position < 0) {
continue;
}
LiveTensor* live = FindOrCreateLiveTensor(
input_node, position, &live_tensors,
&live_tensors_per_device[node_placement[input_node]]);
live->deallocation_time = std::max<Costs::Duration>(
live->deallocation_time,
Costs::NanoSeconds(1) +
Costs::MicroSeconds(node_stats.all_start_micros() +
node_stats.op_end_rel_micros()));
}
}
}
for (const auto& live_per_device : live_tensors_per_device) {
std::vector<Event> events;
events.reserve(2 * live_per_device.second.size());
for (const auto& live : live_per_device.second) {
events.emplace_back(static_cast<int64_t>(live.allocation_time.count()),
true, &live);
events.emplace_back(static_cast<int64_t>(live.deallocation_time.count()),
false, &live);
}
std::stable_sort(events.begin(), events.end());
size_t peak = 0;
std::unordered_set<const LiveTensor*> live_at_peak;
size_t current = 0;
std::unordered_set<const LiveTensor*> currently_live;
int events_size = events.size();
for (int i = 0; i < events_size; ++i) {
const auto& event = events[i];
if (event.allocated) {
VLOG(1) << "At time " << event.timestamp << " allocated "
<< event.tensor->memory_used << " for tensor "
<< event.tensor->node << ":" << event.tensor->output_id;
current += event.tensor->memory_used;
currently_live.insert(event.tensor);
} else {
VLOG(1) << "At time " << event.timestamp << " deallocated "
<< event.tensor->memory_used << " for tensor "
<< event.tensor->node << ":" << event.tensor->output_id;
current -= event.tensor->memory_used;
currently_live.erase(event.tensor);
}
if (i + 1 == events_size || event.timestamp != events[i + 1].timestamp) {
if (current > peak) {
peak = current;
live_at_peak = currently_live;
}
}
}
MemoryUsage& peak_mem_usage = peak_usage_[live_per_device.first];
peak_mem_usage.used_memory = peak;
peak_mem_usage.live_tensors.clear();
peak_mem_usage.live_tensors.reserve(live_at_peak.size());
for (const auto& live : live_at_peak) {
peak_mem_usage.live_tensors.push_back(*live);
}
}
}
}
} | #include "tensorflow/core/grappler/costs/graph_memory.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GraphMemoryTest : public ::testing::Test {
protected:
std::unordered_map<string, DeviceProperties> devices_;
public:
GraphMemoryTest() {
devices_["/CPU:0"].set_type("CPU");
devices_["/CPU:0"].set_num_cores(1);
devices_["/CPU:0"].set_frequency(1);
devices_["/CPU:0"].set_bandwidth(1);
devices_["/GPU:0"].set_type("GPU");
devices_["/GPU:0"].set_num_cores(1);
devices_["/GPU:0"].set_frequency(1);
devices_["/CPU:0"].set_bandwidth(1);
(*devices_["/GPU:0"].mutable_environment())["architecture"] = "3";
}
};
TEST_F(GraphMemoryTest, Basic) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"/CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& mem_usage =
memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(120, mem_usage.used_memory);
std::set<string> tensors;
for (const auto& t : mem_usage.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("Sign:0");
expected.insert("Sign_1:0");
expected.insert("x:0");
EXPECT_EQ(expected, tensors);
}
TEST_F(GraphMemoryTest, UnknownBatchSize) {
TrivialTestGraphInputYielder fake_input(4, 1, -1, false, {"/CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& mem_usage =
memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(16, mem_usage.used_memory);
std::set<string> tensors;
for (const auto& t : mem_usage.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("Const/Const:0");
expected.insert("Sign:0");
expected.insert("x:0");
EXPECT_EQ(expected, tensors);
}
TEST_F(GraphMemoryTest, MultiDevice) {
TrivialTestGraphInputYielder fake_input(4, 2, 1024 * 1024, false,
{"/CPU:0", "/GPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& cpu_mem = memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(16777216, cpu_mem.used_memory);
std::set<string> cpu_tensors;
for (const auto& t : cpu_mem.live_tensors) {
cpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> cpu_expected;
cpu_expected.insert("Recv_Sign_1_0_on_/CPU_0:0");
cpu_expected.insert("Sign:0");
cpu_expected.insert("x:0");
cpu_expected.insert("AddN:0");
EXPECT_EQ(cpu_expected, cpu_tensors);
const GraphMemory::MemoryUsage& gpu_mem = memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(16777216, gpu_mem.used_memory);
std::set<string> gpu_tensors;
for (const auto& t : gpu_mem.live_tensors) {
gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> gpu_expected;
gpu_expected.insert("Recv_AddN_0_on_/GPU_0:0");
gpu_expected.insert("Sign_1:0");
gpu_expected.insert("AddN_1:0");
gpu_expected.insert("AddN_3:0");
EXPECT_EQ(gpu_expected, gpu_tensors);
}
TEST_F(GraphMemoryTest, GpuSwapping) {
TrivialTestGraphInputYielder fake_input(4, 2, 1024 * 1024, false, {"/GPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
{
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& gpu_mem =
memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(20971520, gpu_mem.used_memory);
std::set<string> gpu_tensors;
for (const auto& t : gpu_mem.live_tensors) {
gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> gpu_expected;
gpu_expected.insert("Sign:0");
gpu_expected.insert("Sign_1:0");
gpu_expected.insert("AddN:0");
gpu_expected.insert("AddN_1:0");
gpu_expected.insert("AddN_2:0");
EXPECT_EQ(gpu_expected, gpu_tensors);
}
{
for (auto& node : *item.graph.mutable_node()) {
if (node.name() == "AddN_1") {
(*node.mutable_attr())["_swap_to_host"].mutable_list()->add_i(0);
}
}
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& new_gpu_mem =
memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(20971520, new_gpu_mem.used_memory);
std::set<string> new_gpu_tensors;
for (const auto& t : new_gpu_mem.live_tensors) {
new_gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> new_gpu_expected;
new_gpu_expected.insert("AddN:0");
new_gpu_expected.insert("AddN_1:0");
new_gpu_expected.insert("AddN_2:0");
new_gpu_expected.insert("AddN_3:0");
new_gpu_expected.insert("AddN_4:0");
EXPECT_EQ(new_gpu_expected, new_gpu_tensors);
}
}
TEST_F(GraphMemoryTest, CtrlDependencies) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a").WithDevice("/CPU:0"), 10.0f, {3});
Output v =
ops::Variable(s.WithOpName("v").WithDevice("/CPU:0"), {3}, DT_FLOAT);
Output assign =
ops::Assign(s.WithOpName("assign").WithDevice("/CPU:0"), v, a);
ops::NoOp init(
s.WithOpName("init").WithDevice("/CPU:0").WithControlDependencies(
assign));
GrapplerItem item;
item.fetch.push_back("init");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphMemory memory(item);
Status status = memory.InferStatically(devices_);
TF_CHECK_OK(status);
const GraphMemory::MemoryUsage& mem = memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(36, mem.used_memory);
std::set<string> tensors;
for (const auto& t : mem.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("a:0");
expected.insert("v:0");
expected.insert("assign:0");
EXPECT_EQ(expected, tensors);
}
}
}
} |
1,370 | cpp | tensorflow/tensorflow | virtual_cluster | tensorflow/core/grappler/clusters/virtual_cluster.cc | tensorflow/core/grappler/clusters/virtual_cluster_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_CLUSTERS_VIRTUAL_CLUSTER_H_
#define TENSORFLOW_CORE_GRAPPLER_CLUSTERS_VIRTUAL_CLUSTER_H_
#include <unordered_map>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
class VirtualCluster : public Cluster {
public:
explicit VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices);
VirtualCluster(const std::unordered_map<string, DeviceProperties>& devices,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager);
explicit VirtualCluster(const DeviceSet* device_set);
~VirtualCluster() override;
string type() const override { return "virtual"; }
Status Provision() override;
Status Initialize(const GrapplerItem& item) override;
Status Run(const GraphDef& graph,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* metadata) override;
Status Run(const GrapplerItem& item, RunMetadata* metadata) override;
const DeviceSet* GetDeviceSet() const override { return device_set_; }
private:
std::unique_ptr<AnalyticalCostEstimator> estimator_;
const DeviceSet* device_set_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
namespace tensorflow {
namespace grappler {
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices)
: VirtualCluster(devices, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady")) {}
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager)
: Cluster(0) {
devices_ = devices;
estimator_ = std::make_unique<AnalyticalCostEstimator>(
this, std::move(node_estimator), std::move(node_manager),
true, false);
}
VirtualCluster::VirtualCluster(const DeviceSet* device_set)
: VirtualCluster(std::unordered_map<string, DeviceProperties>()) {
device_set_ = device_set;
for (const auto& device : device_set_->devices()) {
DeviceProperties props = GetDeviceInfo(device->parsed_name());
if (props.type() == "UNKNOWN") continue;
auto attrs = device->attributes();
props.set_memory_size(attrs.memory_limit());
devices_[device->name()] = props;
}
}
VirtualCluster::~VirtualCluster() {}
Status VirtualCluster::Provision() { return absl::OkStatus(); }
Status VirtualCluster::Initialize(const GrapplerItem& item) {
return absl::OkStatus();
}
Status VirtualCluster::Run(const GraphDef& graph,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
GrapplerItem item;
item.graph = graph;
item.feed = feed;
item.fetch = fetch;
return Run(item, metadata);
}
Status VirtualCluster::Run(const GrapplerItem& item, RunMetadata* metadata) {
if (metadata) {
metadata->clear_step_stats();
metadata->clear_cost_graph();
metadata->clear_partition_graphs();
}
TF_RETURN_IF_ERROR(estimator_->Initialize(item));
TF_RETURN_IF_ERROR(
estimator_->PredictCosts(item.graph, metadata, nullptr));
const std::unordered_map<string, DeviceProperties>& device = GetDevices();
std::unordered_map<string, int64_t> peak_mem_usage =
estimator_->GetScheduler()->GetPeakMemoryUsage();
for (const auto& mem_usage : peak_mem_usage) {
const string& device_name = mem_usage.first;
auto it = device.find(device_name);
if (it == device.end()) {
continue;
}
const DeviceProperties& dev = it->second;
if (dev.memory_size() <= 0) {
continue;
}
int64_t peak_mem = mem_usage.second;
if (peak_mem >= dev.memory_size()) {
return errors::ResourceExhausted(
"Graph requires ", peak_mem, " bytes of memory on device ",
device_name, " to run ", " but device only has ", dev.memory_size(),
" available.");
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class VirtualClusterTest : public ::testing::Test {
public:
void SetUp() override {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
}
protected:
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(VirtualClusterTest, ClusterType) {
CHECK_EQ("virtual", cluster_->type());
}
TEST_F(VirtualClusterTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name().find("Const/Const") != string::npos) {
continue;
}
EXPECT_EQ(1, node.output_info_size());
EXPECT_EQ(40, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
if (node.name() == "x") {
EXPECT_EQ(1500, node.compute_cost());
} else {
EXPECT_EQ(2500, node.compute_cost());
}
}
for (const auto& dev_stat : metadata.step_stats().dev_stats()) {
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0", dev_stat.device());
for (const auto& node : dev_stat.node_stats()) {
if (node.node_name() == "AddN") {
EXPECT_EQ(2500, node.op_end_rel_micros());
}
}
}
}
TEST_F(VirtualClusterTest, OutOfMemory) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Variable(root.WithOpName("zero"), {1024, 1024}, DT_FLOAT);
auto identity = ops::Identity(root.WithOpName("i"), zero);
auto identity2 = ops::Identity(root.WithOpName("i2"), identity);
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("i2");
TF_CHECK_OK(cluster_->Initialize(item));
Status s = cluster_->Run(item.graph, item.feed, item.fetch, nullptr);
EXPECT_EQ(error::RESOURCE_EXHAUSTED, s.code());
}
}
}
} |
1,371 | cpp | tensorflow/tensorflow | single_machine | tensorflow/core/grappler/clusters/single_machine.cc | tensorflow/core/grappler/clusters/single_machine_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_CLUSTERS_SINGLE_MACHINE_H_
#define TENSORFLOW_CORE_GRAPPLER_CLUSTERS_SINGLE_MACHINE_H_
#include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
class SingleMachine : public Cluster {
public:
SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus);
~SingleMachine() override;
string type() const override { return "single_machine"; }
Status Provision() override;
Status Shutdown() override;
Status Initialize(const GrapplerItem& item) override;
Status Run(const GraphDef& item,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* metadata) override;
const DeviceSet* GetDeviceSet() const override { return device_set_.get(); }
Status EnablePeakMemoryStats() override;
Status GetPeakMemoryUsage(
std::unordered_map<string, uint64>* device_peak_memory) const override;
private:
Status RunWithTimeout(const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* run_metadata);
Status RunWithTimeout(const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* run_metadata, int64_t timeout_s);
Status ResetSession();
Status CloseSession(bool use_timeout);
Status ShutdownSession();
void MergeCosts(CostGraphDef* graph_costs, const CostGraphDef& init_costs,
const CostGraphDef& queue_costs);
Status ClearAllocatorStats() const;
std::unique_ptr<Session> session_;
std::vector<QueueRunnerDef> queue_runner_defs_;
string last_graph_id_;
mutex last_graph_mu_;
const GraphDef* last_graph_ TF_GUARDED_BY(last_graph_mu_) = nullptr;
std::vector<string> init_ops_;
int64_t expected_init_time_s_;
std::unique_ptr<Coordinator> coordinator_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
std::unique_ptr<DeviceSet> device_set_;
RunMetadata init_metadata_;
mutex close_mu_;
bool closing_ TF_GUARDED_BY(close_mu_);
bool cpu_allocator_stats_enabled_ = false;
};
}
}
#endif
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include <atomic>
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/training/queue_runner.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
static std::atomic<bool> already_provisioned(false);
SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus)
: Cluster(timeout_s), expected_init_time_s_(0), closing_(false) {
VLOG(1) << "Number of CPU cores: " << num_cpu_cores
<< " Number of GPUs: " << num_gpus;
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
(*options_.config.mutable_device_count())["CPU"] = 1;
if (num_gpus > 0) {
(*options_.config.mutable_device_count())["GPU"] = num_gpus;
}
CHECK_GE(num_cpu_cores, 1);
options_.config.set_intra_op_parallelism_threads(num_cpu_cores);
options_.config.add_session_inter_op_thread_pool()->set_num_threads(
num_cpu_cores);
if (timeout_s > 0) {
options_.config.set_operation_timeout_in_ms(timeout_s * 1000);
}
}
SingleMachine::~SingleMachine() {
CloseSession(false ).IgnoreError();
thread_pool_.reset();
}
Status SingleMachine::Provision() {
if (already_provisioned) {
return absl::UnavailableError(
"Can't provision more than one single cluster at a time");
}
TF_RETURN_IF_ERROR(ResetSession());
std::vector<DeviceAttributes> devices;
TF_RETURN_IF_ERROR(session_->ListDevices(&devices));
for (const auto& dev : devices) {
DeviceProperties attr;
if (dev.device_type() == "CPU") {
attr = GetLocalCPUInfo();
} else if (dev.device_type() == "GPU") {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(dev.name(), &parsed)) {
return absl::InvalidArgumentError(
absl::StrCat("Not able to parse GPU device name: ", dev.name()));
}
TfDeviceId tf_device_id(parsed.id);
PlatformDeviceId platform_device_id;
Status s =
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id);
if (!s.ok()) {
return absl::UnavailableError(
absl::StrCat("Unknown TF GPU device with id ", tf_device_id.value(),
": ", s.message()));
}
attr = GetLocalGPUInfo(platform_device_id);
} else if (dev.device_type().find("XLA") == string::npos) {
attr.set_type(dev.device_type());
}
attr.set_memory_size(dev.memory_limit());
devices_[dev.name()] = attr;
}
already_provisioned = true;
if (cpu_allocator_stats_enabled_) {
TF_RETURN_IF_ERROR(ClearAllocatorStats());
}
return absl::OkStatus();
}
Status SingleMachine::Initialize(const GrapplerItem& item) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &item.graph || last_graph_id_ != item.id) {
init_ops_ = item.init_ops;
expected_init_time_s_ = item.expected_init_time;
last_graph_ = nullptr;
queue_runner_defs_ = item.queue_runners;
last_graph_id_ = item.id;
}
return absl::OkStatus();
}
Status SingleMachine::Shutdown() {
TF_RETURN_IF_ERROR(ShutdownSession());
mutex_lock l(this->last_graph_mu_);
last_graph_ = nullptr;
already_provisioned = false;
return absl::OkStatus();
}
Status SingleMachine::Run(const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &graph_def) {
TF_RETURN_IF_ERROR(ResetSession());
TF_RETURN_IF_ERROR(session_->Create(graph_def));
if (!init_ops_.empty()) {
init_metadata_ = RunMetadata();
int64_t timeout_s = timeout_s_ + expected_init_time_s_;
TF_RETURN_IF_ERROR(
RunWithTimeout({}, init_ops_, &init_metadata_, timeout_s));
for (auto node : *init_metadata_.mutable_cost_graph()->mutable_node()) {
node.clear_compute_cost();
}
init_metadata_.clear_step_stats();
}
RunOptions queue_options = run_options_;
if (queue_options.trace_level() >= RunOptions::HARDWARE_TRACE) {
queue_options.set_trace_level(RunOptions::SOFTWARE_TRACE);
}
for (size_t i = 0; i < queue_runner_defs_.size(); ++i) {
std::unique_ptr<QueueRunner> queue_runner;
TF_RETURN_IF_ERROR(QueueRunner::New(queue_runner_defs_[i],
coordinator_.get(), &queue_runner));
TF_RETURN_IF_ERROR(queue_runner->StartAndCollectCostGraph(session_.get(),
queue_options));
TF_RETURN_IF_ERROR(coordinator_->RegisterRunner(std::move(queue_runner)));
TF_RETURN_IF_ERROR(coordinator_->GetStatus());
}
for (int i = 0; i < NumWarmupSteps(); ++i) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
}
if (metadata) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, metadata));
CostGraphDef queue_costs;
TF_RETURN_IF_ERROR(coordinator_->ExportCostGraph(&queue_costs));
MergeCosts(metadata->mutable_cost_graph(), init_metadata_.cost_graph(),
queue_costs);
} else {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
last_graph_ = &graph_def;
return absl::OkStatus();
}
Status SingleMachine::EnablePeakMemoryStats() {
EnableCPUAllocatorStats();
cpu_allocator_stats_enabled_ = true;
return absl::OkStatus();
}
Status SingleMachine::GetPeakMemoryUsage(
std::unordered_map<string, uint64>* device_peak_memory) const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
device_peak_memory->clear();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
absl::optional<AllocatorStats> stats = allocator->GetStats();
(*device_peak_memory)[device->name()] =
(stats ? stats->peak_bytes_in_use : 0);
}
return absl::OkStatus();
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata) {
return RunWithTimeout(feed, fetch, run_metadata, timeout_s_);
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata,
int64_t timeout_s) {
{
mutex_lock l(close_mu_);
CHECK(!closing_);
}
auto status = std::make_shared<Status>();
auto local_metadata = std::make_shared<RunMetadata>();
const bool executed_in_time = ExecuteWithTimeout(
[this, status, local_metadata, feed, fetch]() {
*status = session_->Run(run_options_, feed, {}, fetch, nullptr,
local_metadata.get());
},
timeout_s * 1000, thread_pool_.get());
if (!executed_in_time) {
return absl::DeadlineExceededError(absl::StrCat(
"Failed to run the graph after ", timeout_s, " seconds, aborting"));
} else if (run_metadata && status->ok()) {
*run_metadata = *local_metadata;
}
return *status;
}
Status SingleMachine::CloseSession(bool use_timeout) {
if (!session_ || !thread_pool_) {
return absl::OkStatus();
}
{
mutex_lock l(close_mu_);
if (!closing_) {
closing_ = true;
}
}
const bool executed_in_time = ExecuteWithTimeout(
[&]() {
if (this->coordinator_) {
this->coordinator_->RequestStop().IgnoreError();
while (!this->coordinator_->AllRunnersStopped()) {
Env::Default()->SleepForMicroseconds(1000000);
}
this->session_->Close().IgnoreError();
this->coordinator_.reset();
} else {
this->session_->Close().IgnoreError();
}
mutex_lock l2(close_mu_);
closing_ = false;
},
use_timeout ? timeout_s_ * 1000 : -1, thread_pool_.get());
if (!executed_in_time) {
return absl::UnavailableError(
absl::StrCat("Failed to close the previous session after ", timeout_s_,
" seconds, aborting"));
}
return absl::OkStatus();
}
Status SingleMachine::ShutdownSession() {
TF_RETURN_IF_ERROR(CloseSession(true ));
auto n = std::make_shared<Notification>();
Env::Default()->SchedClosure([this, n]() {
thread_pool_.reset();
n->Notify();
});
int64_t timeout_us = 1000000ll * timeout_s_;
const bool notified = WaitForNotificationWithTimeout(n.get(), timeout_us);
if (!notified) {
return absl::UnavailableError(absl::StrCat(
"The session is still running graphs after ", timeout_s_, " seconds"));
}
return absl::OkStatus();
}
Status SingleMachine::ResetSession() {
if (session_) {
LOG(INFO) << "Cleaning up previous session";
TF_RETURN_IF_ERROR(ShutdownSession());
session_.reset();
}
LOG(INFO) << "Starting new session";
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
session_.reset(NewSession(options_));
if (!session_) {
return absl::UnknownError("Failed to create session");
}
coordinator_ = std::make_unique<Coordinator>();
device_set_ = std::make_unique<DeviceSet>();
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
for (auto d : device_mgr->ListDevices()) {
device_set_->AddDevice(d);
}
return absl::OkStatus();
}
void SingleMachine::MergeCosts(CostGraphDef* graph_costs,
const CostGraphDef& init_costs,
const CostGraphDef& queue_costs) {
graph_costs->mutable_node()->Reserve(graph_costs->node_size() +
init_costs.node_size() +
queue_costs.node_size());
std::unordered_set<string> nodes_seen;
int queue_costs_id_offset = graph_costs->node_size();
for (const auto& node : graph_costs->node()) {
nodes_seen.insert(node.name());
if (node.id() >= queue_costs_id_offset) {
queue_costs_id_offset = node.id() + 1;
}
}
int init_costs_id_offset = queue_costs_id_offset + queue_costs.node_size();
for (const auto& node : queue_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + queue_costs_id_offset);
if (new_node->id() >= init_costs_id_offset) {
init_costs_id_offset = new_node->id() + 1;
}
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
queue_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += queue_costs_id_offset;
}
}
for (const auto& node : init_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + init_costs_id_offset);
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
init_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += init_costs_id_offset;
}
}
}
Status SingleMachine::ClearAllocatorStats() const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
if (!allocator->ClearStats()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Clearing allocation stats is not supported for ",
device->name()));
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/single_machine.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class SingleMachineTest : public ::testing::Test {
public:
void SetUp() override {
#if TENSORFLOW_USE_ROCM
int timeout_s = 10;
#else
int timeout_s = 5;
#endif
#ifdef THREAD_SANITIZER
timeout_s *= 5;
#endif
cluster_ = std::make_unique<SingleMachine>(timeout_s, 3 ,
0 );
TF_CHECK_OK(cluster_->EnablePeakMemoryStats());
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
if (cluster_) {
TF_CHECK_OK(cluster_->Shutdown());
}
cluster_.reset();
}
protected:
std::unique_ptr<SingleMachine> cluster_;
};
TEST_F(SingleMachineTest, ClusterType) {
CHECK_EQ("single_machine", cluster_->type());
}
TEST_F(SingleMachineTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
const int64_t start_micros = Env::Default()->NowMicros();
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
const int64_t run_duration_micros =
Env::Default()->NowMicros() - start_micros;
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
EXPECT_LE(8, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
EXPECT_LE(0, node.compute_cost());
EXPECT_GE(run_duration_micros, node.compute_cost());
}
}
TEST_F(SingleMachineTest, Queue) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, true,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
}
TEST_F(SingleMachineTest, MultipleItems) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
for (int i = 0; i < 3; ++i) {
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata1;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata1));
RunMetadata metadata2;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata2));
EXPECT_LE(6, metadata1.cost_graph().node_size());
for (const auto& node : metadata1.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos ||
node.name() == "queue") {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
}
for (int i = 0; i < metadata1.cost_graph().node_size(); ++i) {
metadata1.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata1.clear_step_stats();
}
for (int i = 0; i < metadata2.cost_graph().node_size(); ++i) {
metadata2.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata2.clear_step_stats();
}
string s1;
::tensorflow::protobuf::TextFormat::PrintToString(metadata1, &s1);
string s2;
::tensorflow::protobuf::TextFormat::PrintToString(metadata2, &s2);
EXPECT_EQ(s1, s2);
}
}
TEST_F(SingleMachineTest, GraphOptimizations) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Const(root.WithOpName("zero"), 0.0f, {2, 3});
auto one = ops::Const(root.WithOpName("one"), 1.0f, {2, 3});
auto add = ops::Add(root.WithOpName("add"), zero, one);
auto square = ops::Square(root.WithOpName("square"), add);
auto new_shape = ops::Const(root.WithOpName("new_shape"), {3, -1}, {2});
auto reshaped = ops::Reshape(root.WithOpName("reshaped"), square, new_shape);
auto final_shape = ops::Shape(root.WithOpName("final_shape"), reshaped);
auto expected_shape =
ops::Const(root.WithOpName("expected_shape"), {3, 2}, {2});
auto valid =
ops::Equal(root.WithOpName("valid"), final_shape, expected_shape);
auto all_dims = ops::Const(root.WithOpName("all_dims"), {0}, {1});
auto all_valid = ops::All(root.WithOpName("all_valid"), valid, all_dims);
auto assert_valid = ops::Assert(root.WithOpName("assert_valid"), all_valid,
{final_shape.output});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("assert_valid");
for (auto& node : *item.graph.mutable_node()) {
node.set_device("/cpu:0");
}
TF_CHECK_OK(cluster_->Shutdown());
cluster_->DisableOptimizer(true);
TF_CHECK_OK(cluster_->Provision());
RunMetadata metadata;
TF_CHECK_OK(cluster_->Initialize(item));
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::set<string> cost_nodes;
for (const auto& node : metadata.cost_graph().node()) {
#ifdef INTEL_MKL
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
cost_nodes.insert(node.name());
#else
if (node.name()[0] != '_') {
cost_nodes.insert(node.name());
}
#endif
}
const std::set<string> expected_cost_nodes = {
"zero", "one", "add", "square",
"new_shape", "reshaped", "final_shape", "expected_shape",
"valid", "all_dims", "all_valid", "assert_valid"};
EXPECT_EQ(expected_cost_nodes, cost_nodes);
}
TEST_F(SingleMachineTest, TimeOuts) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q = ops::FIFOQueue(root.WithOpName("queue"), {DataType::DT_INT32});
auto dequeue =
ops::QueueDequeue(root.WithOpName("dequeue"), q, {DataType::DT_INT32});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("dequeue");
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
Status s1 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s1));
Status s2 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s2));
}
static void RunInfiniteTFLoop() {
GrapplerItem item;
NodeDef* shp = item.graph.add_node();
shp->set_name("shape");
shp->set_op("Const");
(*shp->mutable_attr())["dtype"].set_type(DT_INT32);
Tensor shp_tensor(DT_INT32, TensorShape({1}));
shp_tensor.flat<int32>()(0) = 1;
shp_tensor.AsProtoTensorContent(
(*shp->mutable_attr())["value"].mutable_tensor());
NodeDef* r = item.graph.add_node();
r->set_name("random");
r->set_op("RandomUniform");
(*r->mutable_attr())["dtype"].set_type(DT_FLOAT);
(*r->mutable_attr())["T"].set_type(DT_INT32);
*r->add_input() = "shape";
NodeDef* e = item.graph.add_node();
e->set_name("while/Enter");
e->set_op("Enter");
(*e->mutable_attr())["T"].set_type(DT_FLOAT);
(*e->mutable_attr())["frame_name"].set_s("while/while/");
*e->add_input() = "random";
NodeDef* m = item.graph.add_node();
m->set_name("while/Merge");
m->set_op("Merge");
(*m->mutable_attr())["T"].set_type(DT_FLOAT);
(*m->mutable_attr())["N"].set_i(2);
*m->add_input() = "while/Enter";
*m->add_input() = "while/NextIteration";
NodeDef* t = item.graph.add_node();
t->set_name("always_true");
t->set_op("Const");
(*t->mutable_attr())["dtype"].set_type(DT_BOOL);
*t->add_input() = "^while/Merge";
Tensor true_tensor(DT_BOOL, TensorShape());
true_tensor.flat<bool>()(0) = true;
true_tensor.AsProtoTensorContent(
(*t->mutable_attr())["value"].mutable_tensor());
NodeDef* c = item.graph.add_node();
c->set_name("while/LoopCond");
c->set_op("LoopCond");
*c->add_input() = "always_true";
NodeDef* s = item.graph.add_node();
s->set_name("while/Switch");
(*s->mutable_attr())["T"].set_type(DT_FLOAT);
s->set_op("Switch");
*s->add_input() = "while/Merge";
*s->add_input() = "while/LoopCond";
NodeDef* i = item.graph.add_node();
i->set_name("while/Identity");
i->set_op("Identity");
(*i->mutable_attr())["T"].set_type(DT_FLOAT);
*i->add_input() = "while/Switch:1";
NodeDef* n = item.graph.add_node();
n->set_name("while/NextIteration");
n->set_op("NextIteration");
(*n->mutable_attr())["T"].set_type(DT_FLOAT);
*n->add_input() = "while/Identity";
NodeDef* x = item.graph.add_node();
x->set_name("while/Exit");
x->set_op("Exit");
(*x->mutable_attr())["T"].set_type(DT_FLOAT);
*x->add_input() = "while/Switch";
item.fetch.push_back("while/Exit");
SingleMachine cluster(5, 3, 0);
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
Status s1 = cluster.Run(item.graph, item.feed, item.fetch, nullptr);
if (!errors::IsDeadlineExceeded(s1)) {
LOG(ERROR) << "Expected 'deadline exceeded' error, got " << s1;
_exit(1);
}
Status s2 = cluster.Shutdown();
if (!errors::IsUnavailable(s2)) {
LOG(ERROR) << "Expected 'unavailable' error, got " << s2;
_exit(2);
}
_exit(0);
}
TEST_F(SingleMachineTest, InfiniteLoops) {
#if !(TENSORFLOW_USE_ROCM)
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EXIT(RunInfiniteTFLoop(), ::testing::ExitedWithCode(0), ".*");
#endif
}
TEST_F(SingleMachineTest, InitializationMemory) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
int batch_size = 10;
Output x =
ops::RandomNormal(s.WithOpName("x"), {batch_size, 1}, DataType::DT_FLOAT);
Output v = ops::Variable(s.WithOpName("v"), TensorShape({batch_size, 1}),
DataType::DT_FLOAT);
Output init = ops::Assign(s.WithOpName("init"), v, x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.init_ops.push_back(init.name());
item.fetch.push_back(v.name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found = false;
for (const auto& node : metadata.cost_graph().node()) {
found |= (node.name() == NodeName(init.name()));
}
EXPECT_TRUE(found);
}
namespace {
template <class T>
inline void SetNodeAttr(const string& key, const T& value, NodeDef* node) {
AttrValue attr_value;
SetAttrValue(value, &attr_value);
auto* attr_map = node->mutable_attr();
(*attr_map)[key] = attr_value;
}
template <>
inline void SetNodeAttr(const string& key, const Tensor& tensor,
NodeDef* node) {
TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr(key, tensor_proto, node);
}
}
TEST_F(SingleMachineTest, PersistentMemory) {
GrapplerItem item;
const DataType key_dtype = DT_INT64;
const DataType data_dtype = DT_INT64;
NodeDef* hashtable_node = item.graph.add_node();
hashtable_node->set_op("HashTable");
hashtable_node->set_name("hash_table");
SetNodeAttr("key_dtype", key_dtype, hashtable_node);
SetNodeAttr("value_dtype", data_dtype, hashtable_node);
NodeDef* keys_node = item.graph.add_node();
keys_node->set_op("Const");
keys_node->set_name("table_keys");
SetNodeAttr("dtype", key_dtype, keys_node);
Tensor keys(key_dtype, TensorShape{2});
keys.vec<int64_t>()(0) = 123;
keys.vec<int64_t>()(1) = 321;
SetNodeAttr("value", keys, keys_node);
NodeDef* values_node = item.graph.add_node();
values_node->set_op("Const");
values_node->set_name("table_values");
SetNodeAttr("dtype", data_dtype, values_node);
Tensor values(data_dtype, TensorShape{2});
values.vec<int64_t>()(0) = 789;
values.vec<int64_t>()(1) = 987;
SetNodeAttr("value", values, values_node);
NodeDef* init_table_node = item.graph.add_node();
init_table_node->set_op("InitializeTable");
init_table_node->set_name("initialize_table");
SetNodeAttr("Tkey", key_dtype, init_table_node);
SetNodeAttr("Tval", data_dtype, init_table_node);
*init_table_node->add_input() = "hash_table";
*init_table_node->add_input() = "table_keys";
*init_table_node->add_input() = "table_values";
item.init_ops.push_back(init_table_node->name());
NodeDef* query_node = item.graph.add_node();
query_node->set_op("Const");
query_node->set_name("query");
SetNodeAttr("dtype", key_dtype, query_node);
Tensor query(key_dtype, TensorShape({}));
query.flat<int64_t>()(0) = 0;
SetNodeAttr("value", query, query_node);
NodeDef* default_value_node = item.graph.add_node();
default_value_node->set_op("Const");
default_value_node->set_name("default_table_value");
SetNodeAttr("dtype", data_dtype, default_value_node);
Tensor dflt(data_dtype, TensorShape({}));
dflt.flat<int64_t>()(0) = 456;
SetNodeAttr("value", dflt, default_value_node);
NodeDef* lookup_node = item.graph.add_node();
lookup_node->set_op("LookupTableFind");
lookup_node->set_name("table_lookup");
SetNodeAttr("Tin", key_dtype, lookup_node);
SetNodeAttr("Tout", data_dtype, lookup_node);
*lookup_node->add_input() = "hash_table";
*lookup_node->add_input() = "query";
*lookup_node->add_input() = "default_table_value";
item.fetch.push_back(lookup_node->name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found_table_init = false;
bool found_hashtable = false;
for (const auto& node : metadata.cost_graph().node()) {
if (node.name() == "hash_table") {
found_hashtable = true;
EXPECT_EQ(0, node.persistent_memory_size());
} else if (node.name() == "initialize_table") {
found_table_init = true;
EXPECT_LE(4 * sizeof(int64_t), node.persistent_memory_size());
}
}
EXPECT_TRUE(found_table_init);
EXPECT_TRUE(found_hashtable);
}
GrapplerItem CreateGrapplerItemWithResourceMemory() {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), TensorShape({128, 256}),
DataType::DT_FLOAT);
Output a_init =
ops::RandomNormal(s.WithOpName("a/init"), {128, 256}, DataType::DT_FLOAT);
Output a_init_assign = ops::Assign(s.WithOpName("a/init/assign"), a, a_init);
Output b =
ops::VarHandleOp(s.WithOpName("b"), DataType::DT_FLOAT, {256, 512});
Output b_read =
ops::ReadVariableOp(s.WithOpName("b/read"), b, DataType::DT_FLOAT);
Output b_init =
ops::RandomNormal(s.WithOpName("b/init"), {256, 512}, DataType::DT_FLOAT);
auto b_init_assign =
ops::AssignVariableOp(s.WithOpName("b/init/assign"), b, b_init);
ops::FIFOQueue queue(s.WithOpName("queue"), {DataType::DT_STRING});
Output some_string =
ops::Const(s.WithOpName("some_string"), string("nothing"));
ops::QueueEnqueue enqueue(s.WithOpName("enqueue"), queue, {some_string});
ops::QueueDequeue dequeue(s.WithOpName("dequeue"), queue,
{DataType::DT_STRING});
ops::IdentityReader reader(s.WithOpName("identity_reader"));
ops::ReaderRead read(s.WithOpName("read_from_queue"), reader, queue);
Output var_mul = ops::MatMul(s.WithOpName("var_matmul"), a, b_read);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
QueueRunnerDef queue_runner;
queue_runner.set_queue_name("queue");
*queue_runner.add_enqueue_op_name() = "enqueue";
item.queue_runners.push_back(queue_runner);
item.init_ops.push_back("a/init/assign");
item.init_ops.push_back("b/init/assign");
item.fetch.push_back("var_matmul");
item.fetch.push_back("dequeue");
return item;
}
#if defined(PLATFORM_GOOGLE)
TEST_F(SingleMachineTest, ReleaseMemoryAfterDestruction) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
std::unordered_map<string, uint64> device_peak_memory_before;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_before));
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
EXPECT_EQ(device_peak_memory.size(), 1);
EXPECT_GT(device_peak_memory.begin()->second, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
std::unordered_map<string, uint64> device_peak_memory_after;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_after));
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_EQ(device_peak_memory_after.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
EXPECT_LT(device_peak_memory_after.begin()->second, 400);
}
TEST_F(SingleMachineTest, PeakMemory) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
uint64 cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_GT(cpu_memory, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
device_peak_memory.clear();
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
TF_CHECK_OK(cluster_->Shutdown());
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_LT(cpu_memory, 200);
}
TEST_F(SingleMachineTest, PeakMemoryStatsNotEnabled) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
SingleMachine cluster(60 , 3 ,
0 );
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
std::unordered_map<string, uint64> device_peak_memory;
Status s = cluster.GetPeakMemoryUsage(&device_peak_memory);
TF_CHECK_OK(cluster.Shutdown());
ASSERT_FALSE(s.ok());
EXPECT_TRUE(errors::IsInvalidArgument(s));
}
#endif
}
}
} |
1,372 | cpp | tensorflow/tensorflow | structure_verifier | tensorflow/core/grappler/verifiers/structure_verifier.cc | tensorflow/core/grappler/verifiers/structure_verifier_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_VERIFIERS_STRUCTURE_VERIFIER_H_
#define TENSORFLOW_CORE_GRAPPLER_VERIFIERS_STRUCTURE_VERIFIER_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/verifiers/graph_verifier.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
class StructureVerifier : public GraphVerifier {
public:
StructureVerifier() {}
~StructureVerifier() override {}
string name() const override { return "structure_verifier"; };
Status Verify(const GraphDef& graph) override;
};
}
}
#endif
#include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <string>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/verifiers/graph_verifier.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
Status StructureVerifier::Verify(const GraphDef& graph) {
StatusGroup status_group;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.library());
status_group.Update(tensorflow::graph::ValidateGraphDefAgainstOpRegistry(
graph, function_library));
status_group.Update(tensorflow::graph::VerifyNoDuplicateNodeNames(graph));
std::vector<const NodeDef*> topo_order;
status_group.Update(ComputeTopologicalOrder(graph, &topo_order));
return status_group.as_concatenated_status();
}
}
} | #include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <memory>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/parsing_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class StructureVerifierTest : public ::testing::Test {
protected:
StructureVerifierTest() { verifier_ = std::make_unique<StructureVerifier>(); }
void SetGraph(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &graph_));
}
GraphDef graph_;
std::unique_ptr<StructureVerifier> verifier_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
TEST_F(StructureVerifierTest, ValidGraphs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GraphDef graph;
TF_CHECK_OK(s.ToGraphDef(&graph));
TF_EXPECT_OK(verifier_->Verify(graph));
SetGraph(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
TF_EXPECT_OK(verifier_->Verify(graph_));
}
TEST_F(StructureVerifierTest, OpNotRegistered) {
SetGraph(
"node { name: 'input' op: 'OpNotRegistered' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsNotFound(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Op type not registered"));
}
TEST_F(StructureVerifierTest, DuplicateNodeNames) {
SetGraph(
"node { name: 'A' op: 'TestParams' }"
"node { name: 'A' op: 'TestInput' }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsAlreadyExists(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Node already exists:"));
}
TEST_F(StructureVerifierTest, GraphWithInvalidCycle) {
SetGraph(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StrContains(
status.message(), "The graph couldn't be sorted in topological order"));
}
}
}
} |
1,373 | cpp | tensorflow/tensorflow | sig_node | tensorflow/core/grappler/graph_analyzer/sig_node.cc | tensorflow/core/grappler/graph_analyzer/sig_node_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_SIG_NODE_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_SIG_NODE_H_
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "tensorflow/core/grappler/graph_analyzer/hash_tools.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
class SigBaseTest;
}
class SigNode;
using SigNodeMap = std::map<string, std::unique_ptr<SigNode>>;
class SigNode {
public:
friend struct Signature;
explicit SigNode(const NodeDef* node);
const string& name() const { return node_->name(); }
const string& opcode() const { return node_->op(); }
const NodeDef* node_def() const { return node_; }
using TranslationMap =
std::unordered_map<const GenNode* , SigNode* >;
void CopyLinks(const GenNode& from, const TranslationMap& map);
struct LinkTag {
struct Hasher {
size_t operator()(const LinkTag& tag) const noexcept {
size_t hval = port_hasher(tag.local);
CombineHash(port_hasher(tag.remote), &hval);
return hval;
}
GenNode::Port::Hasher port_hasher;
};
LinkTag(GenNode::Port a_local, GenNode::Port a_remote)
: local(a_local), remote(a_remote) {}
LinkTag() : local(false, 99), remote(false, 99) {}
GenNode::Port local;
GenNode::Port remote;
bool operator==(const LinkTag& other) const {
return local == other.local && remote == other.remote;
}
bool operator<(const LinkTag& other) const {
return local < other.local ||
(local == other.local && remote < other.remote);
}
};
struct Link {
LinkTag tag;
size_t unique_hash;
using PeerVector = std::vector<SigNode*>;
PeerVector peers;
};
using LinkHashMap = std::map<size_t, Link>;
const LinkHashMap& hash_to_link() const { return hash_to_link_; }
struct HashedPeer {
HashedPeer(size_t l, SigNode* p) : link_hash(l), peer(p) {}
struct LessByRank {
bool operator()(const SigNode::HashedPeer& left,
const SigNode::HashedPeer& right) {
return left.peer->unique_rank_ < right.peer->unique_rank_;
}
};
size_t link_hash;
SigNode* peer;
};
using HashedPeerVector = std::vector<HashedPeer>;
const HashedPeerVector& hashed_peers() const { return hashed_peers_; }
bool operator==(const SigNode& other) const;
bool operator!=(const SigNode& other) const { return !(*this == other); }
private:
friend class test::SigBaseTest;
void CopyLinksPass1(const GenNode& from, const TranslationMap& map,
std::map<LinkTag, Link>* link_map);
void CopyLinksPass2(std::map<LinkTag, Link>* link_map);
void ComputeTopoHash0();
void ComputeTopoHash(int distance);
size_t GetTopoHash(int distance) const;
size_t GetHighTopoHash() const {
CHECK(!topo_hash_.empty());
return topo_hash_.back();
}
void ReHighTopoHash() {
CHECK(!topo_hash_.empty());
CombineHash(1, &topo_hash_.back());
}
struct NodeOrderLess {
bool operator()(const SigNode* left, const SigNode* right) {
return left->topo_hash_.back() < right->topo_hash_.back();
}
};
private:
const NodeDef* node_;
uint64_t node_mask_ = 0;
LinkHashMap hash_to_link_;
HashedPeerVector hashed_peers_;
size_t unique_rank_ = ~0;
bool hash_is_final_ = false;
std::vector<size_t> topo_hash_;
uint64_t last_hashed_nodes_ = 0;
uint64_t next_hashed_nodes_ = 0;
};
struct Signature {
friend class test::SigBaseTest;
static constexpr int kMaxGraphSize = 64;
Status Compute();
string ToString() const;
SigNodeMap map;
size_t sig_short = 0;
std::vector<size_t> sig_full;
std::vector<SigNode*> nodes;
size_t Hash() const { return sig_short; }
bool operator==(const Signature& other) const;
private:
void PrepareNodes();
void FindUniqueHashes(size_t* next_node_id_p);
void ComputeOneRound(size_t next_node_id);
void OrderLinks();
};
}
}
}
#endif
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <algorithm>
#include "absl/strings/str_format.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
static constexpr bool debug = false;
SigNode::SigNode(const NodeDef* node) : node_(node) {}
void SigNode::CopyLinks(const GenNode& from, const TranslationMap& map) {
hash_to_link_.clear();
hashed_peers_.clear();
std::map<LinkTag, Link> link_map;
CopyLinksPass1(from, map, &link_map);
CopyLinksPass2(&link_map);
}
void SigNode::CopyLinksPass1(const GenNode& from, const TranslationMap& map,
std::map<LinkTag, Link>* link_map) {
LinkTag::Hasher link_hasher;
for (const auto& entry : from.links()) {
for (const auto& target : entry.second) {
auto nodeit = map.find(target.node);
if (nodeit == map.end()) {
continue;
}
LinkTag tag(entry.first, target.port);
size_t hval = link_hasher(tag);
Link& map_entry = (*link_map)[tag];
if (map_entry.peers.empty()) {
map_entry.tag = tag;
map_entry.unique_hash = hval;
}
map_entry.peers.push_back(nodeit->second);
}
}
}
void SigNode::CopyLinksPass2(std::map<LinkTag, Link>* link_map) {
for (auto& entry : *link_map) {
Link* hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
while (!hl_entry_ptr->peers.empty()) {
CombineHash(1, &entry.second.unique_hash);
hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
}
for (const auto& peer : entry.second.peers) {
hashed_peers_.emplace_back(HashedPeer(entry.second.unique_hash, peer));
}
hl_entry_ptr->tag = entry.second.tag;
hl_entry_ptr->unique_hash = entry.second.unique_hash;
hl_entry_ptr->peers.swap(entry.second.peers);
}
}
void SigNode::ComputeTopoHash0() {
topo_hash_.clear();
last_hashed_nodes_ = next_hashed_nodes_ = node_mask_;
size_t hval = std::hash<string>()(opcode());
for (const auto& entry : hashed_peers_) {
CombineHash(entry.link_hash, &hval);
}
topo_hash_.push_back(hval);
}
void SigNode::ComputeTopoHash(int distance) {
next_hashed_nodes_ = last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " mask=" << std::hex
<< next_hashed_nodes_;
}
if (hash_is_final_) {
return;
}
const int64_t topo_hash_size = topo_hash_.size();
CHECK(topo_hash_size == distance);
int prev = distance - 1;
size_t hval = topo_hash_[0];
if (!hashed_peers_.empty()) {
size_t last_link_hash = hashed_peers_[0].link_hash;
size_t comm_hash = 0;
for (const auto& entry : hashed_peers_) {
if (entry.link_hash != last_link_hash) {
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
comm_hash = 0;
last_link_hash = entry.link_hash;
}
CombineHashCommutative(entry.peer->GetTopoHash(prev), &comm_hash);
next_hashed_nodes_ |= entry.peer->last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " += " << entry.peer->name()
<< " mask=" << std::hex << next_hashed_nodes_;
}
}
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
}
topo_hash_.push_back(hval);
}
size_t SigNode::GetTopoHash(int distance) const {
CHECK(!topo_hash_.empty());
const int64_t topo_hash_size = topo_hash_.size();
if (distance >= topo_hash_size) {
CHECK(hash_is_final_);
return topo_hash_.back();
} else {
return topo_hash_[distance];
}
}
bool SigNode::operator==(const SigNode& other) const {
if (opcode() != other.opcode()) {
return false;
}
if (unique_rank_ != other.unique_rank_) {
return false;
}
if (hashed_peers_.size() != other.hashed_peers_.size()) {
return false;
}
for (auto it1 = hashed_peers_.begin(), it2 = other.hashed_peers_.begin();
it1 != hashed_peers_.end(); ++it1, ++it2) {
if (it1->link_hash != it2->link_hash) {
return false;
}
if (it1->peer->unique_rank_ != it2->peer->unique_rank_) {
return false;
}
}
return true;
}
constexpr int Signature::kMaxGraphSize;
string Signature::ToString() const {
string result;
for (size_t n = 0; n < nodes.size(); ++n) {
result += absl::StrFormat("%d:%s", n, nodes[n]->opcode());
for (const auto& entry : nodes[n]->hashed_peers_) {
const auto& link = nodes[n]->hash_to_link_[entry.link_hash];
if (link.tag.local.IsInbound()) {
result +=
absl::StrFormat("[%s:%s:%d]", string(link.tag.local),
string(link.tag.remote), entry.peer->unique_rank_);
}
}
result.push_back(',');
}
return result;
}
Status Signature::Compute() {
if (map.size() > kMaxGraphSize) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"A graph of %d nodes is too big for signature computation, "
"the maximal supported node count is %d.",
map.size(), kMaxGraphSize));
}
size_t next_node_id = 0;
sig_short = 0;
sig_full.resize(0);
PrepareNodes();
FindUniqueHashes(&next_node_id);
while (next_node_id < map.size()) {
ComputeOneRound(next_node_id);
FindUniqueHashes(&next_node_id);
}
OrderLinks();
return absl::OkStatus();
}
void Signature::PrepareNodes() {
nodes.resize(0);
int64_t mask = 1;
for (const auto& entry : map) {
SigNode* node = entry.second.get();
node->last_hashed_nodes_ = node->node_mask_ = mask;
mask <<= 1;
node->unique_rank_ = ~0;
node->hash_is_final_ = false;
node->ComputeTopoHash0();
if (node->GetHighTopoHash() <= map.size()) {
node->ReHighTopoHash();
}
nodes.emplace_back(node);
}
}
void Signature::FindUniqueHashes(size_t* next_node_id_p) {
std::stable_sort(nodes.begin() + *next_node_id_p, nodes.end(),
SigNode::NodeOrderLess());
bool found_unique = false;
for (size_t n = *next_node_id_p; n < nodes.size(); ++n) {
size_t cur_hash = nodes[n]->GetHighTopoHash();
if (n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash) {
for (++n;
n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash;
++n) {
}
if (found_unique || n != nodes.size() - 1) {
continue;
}
}
found_unique = true;
size_t id = (*next_node_id_p)++;
nodes[n]->unique_rank_ = id;
size_t last_hash = nodes[n]->GetHighTopoHash();
CombineHash(last_hash, &sig_short);
sig_full.push_back(last_hash);
nodes[n]->topo_hash_.resize(1);
nodes[n]->topo_hash_[0] = id + 1;
nodes[n]->hash_is_final_ = true;
nodes[n]->last_hashed_nodes_ = nodes[n]->node_mask_;
if (n != id) {
std::swap(nodes[id], nodes[n]);
}
}
}
void Signature::ComputeOneRound(size_t next_node_id) {
int debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
node->topo_hash_.resize(1);
node->last_hashed_nodes_ = node->node_mask_;
node->hash_is_final_ = false;
if (debug) {
LOG(INFO) << "DEBUG distance=" << 0 << " node " << debug_i++ << " "
<< node->name() << " mask=" << std::hex
<< node->last_hashed_nodes_;
}
}
bool stop = false;
for (int distance = 1; !stop; ++distance) {
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (node->hash_is_final_) {
continue;
}
node->ComputeTopoHash(distance);
if (node->GetHighTopoHash() <= nodes.size()) {
node->ReHighTopoHash();
}
}
stop = true;
debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (debug) {
LOG(INFO) << "DEBUG distance=" << distance << " node " << debug_i++
<< " " << node->name() << " oldmask=" << std::hex
<< node->last_hashed_nodes_ << " mask=" << std::hex
<< node->next_hashed_nodes_;
}
if (node->last_hashed_nodes_ == node->next_hashed_nodes_) {
node->hash_is_final_ = true;
} else {
node->last_hashed_nodes_ = node->next_hashed_nodes_;
stop = false;
}
}
}
}
void Signature::OrderLinks() {
for (const auto& node : nodes) {
if (node->hashed_peers_.empty()) {
continue;
}
size_t cur_link_hash = node->hashed_peers_[0].link_hash + 1;
int first_idx = -1;
int idx;
for (idx = 0; idx < static_cast<int64_t>(node->hashed_peers_.size());
++idx) {
auto& entry = node->hashed_peers_[idx];
if (entry.link_hash == cur_link_hash) {
continue;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
cur_link_hash = entry.link_hash;
first_idx = idx;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
}
}
bool Signature::operator==(const Signature& other) const {
if (sig_short != other.sig_short) {
return false;
}
if (sig_full.size() != other.sig_full.size()) {
return false;
}
for (auto it1 = sig_full.begin(), it2 = other.sig_full.begin();
it1 != sig_full.end(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
}
if (nodes.size() != other.nodes.size()) {
return false;
}
for (auto it1 = nodes.begin(), it2 = other.nodes.begin(); it1 != nodes.end();
++it1, ++it2) {
if (**it1 != **it2) {
return false;
}
}
return true;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/subgraph.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Gt;
using ::testing::Ne;
using ::testing::SizeIs;
TEST(SigNodeLinkTag, Compare) {
SigNode::LinkTag a(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag b(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag c(GenNode::Port(false, 2), GenNode::Port(false, 1));
SigNode::LinkTag d(GenNode::Port(false, 1), GenNode::Port(false, 3));
SigNode::LinkTag e(GenNode::Port(false, 2), GenNode::Port(false, 2));
EXPECT_TRUE(a == b);
EXPECT_FALSE(a == c);
EXPECT_FALSE(a == e);
EXPECT_FALSE(a < b);
EXPECT_FALSE(b < a);
EXPECT_TRUE(a < c);
EXPECT_FALSE(c < a);
EXPECT_TRUE(a < d);
EXPECT_FALSE(d < a);
}
class SigBaseTest : public ::testing::Test, protected TestGraphs {
protected:
void BuildSigMap(const GraphDef& graph) {
gen_map_.clear();
sig_.map.clear();
CHECK(GenNode::BuildGraphInMap(graph, &gen_map_).ok());
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
}
static void CopyLinksPass2(
std::map<SigNode::LinkTag, SigNode::Link>* link_map, SigNode* node) {
node->CopyLinksPass2(link_map);
}
static void ComputeTopoHash0(SigNode* node) { node->ComputeTopoHash0(); }
static void ComputeTopoHash(int distance, SigNode* node) {
node->ComputeTopoHash(distance);
}
static size_t GetTopoHash(int distance, SigNode* node) {
return node->GetTopoHash(distance);
}
static size_t GetHighTopoHash(SigNode* node) {
return node->GetHighTopoHash();
}
static void ReHighTopoHash(SigNode* node) { node->ReHighTopoHash(); }
static SigNode::HashedPeerVector& RefHashedPeers(SigNode* node) {
return node->hashed_peers_;
}
static size_t& RefUniqueRank(SigNode* node) { return node->unique_rank_; }
static bool& RefHashIsFinal(SigNode* node) { return node->hash_is_final_; }
static std::vector<size_t>& RefTopoHash(SigNode* node) {
return node->topo_hash_;
}
static uint64_t& RefNodeMask(SigNode* node) { return node->node_mask_; }
static uint64_t& RefLastHashedNodes(SigNode* node) {
return node->last_hashed_nodes_;
}
static uint64_t& RefNextHashedNodes(SigNode* node) {
return node->next_hashed_nodes_;
}
static void PrepareNodes(Signature* signature) { signature->PrepareNodes(); }
static void FindUniqueHashes(size_t* next_node_id_p, Signature* signature) {
signature->FindUniqueHashes(next_node_id_p);
}
static void ComputeOneRound(size_t next_node_id, Signature* signature) {
signature->ComputeOneRound(next_node_id);
}
static void OrderLinks(Signature* signature) { signature->OrderLinks(); }
GenNodeMap gen_map_;
Signature sig_;
};
class SigNodeTest : public SigBaseTest {};
TEST_F(SigNodeTest, DuplicateHash) {
NodeDef node1 = MakeNodeConst("node1");
NodeDef node2 = MakeNodeConst("node2");
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
SigNode sn1(&node1);
SigNode sn2(&node2);
SigNode sn3(&node3);
constexpr size_t kSameHash = 999;
SigNode::Link link1;
link1.tag = SigNode::LinkTag(GenNode::Port(true, 0), GenNode::Port(false, 0));
link1.unique_hash = kSameHash;
link1.peers.emplace_back(&sn1);
SigNode::Link link2;
link2.tag = SigNode::LinkTag(GenNode::Port(true, 1), GenNode::Port(false, 0));
link2.unique_hash = kSameHash;
link2.peers.emplace_back(&sn2);
SigNode::Link link3;
link3.tag = SigNode::LinkTag(GenNode::Port(true, 2), GenNode::Port(false, 0));
link3.unique_hash = kSameHash;
link3.peers.emplace_back(&sn3);
std::map<SigNode::LinkTag, SigNode::Link> link_map;
link_map[link1.tag] = link1;
link_map[link2.tag] = link2;
link_map[link3.tag] = link3;
CopyLinksPass2(&link_map, &sn3);
auto& hl = sn3.hash_to_link();
EXPECT_THAT(hl, SizeIs(3));
std::map<SigNode::LinkTag, SigNode::Link> rehashed;
auto hlit = hl.begin();
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
ASSERT_THAT(rehashed, SizeIs(3));
auto rhit = rehashed.begin();
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link1.tag);
EXPECT_THAT(rhit->second.unique_hash, Eq(kSameHash));
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn1));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link2.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
size_t hash2 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn2));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link3.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
EXPECT_THAT(rhit->second.unique_hash, Ne(hash2));
size_t hash3 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn3));
auto& peers = sn3.hashed_peers();
EXPECT_THAT(peers, SizeIs(3));
auto peerit = peers.begin();
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(kSameHash));
EXPECT_THAT(peerit->peer, Eq(&sn1));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash2));
EXPECT_THAT(peerit->peer, Eq(&sn2));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash3));
EXPECT_THAT(peerit->peer, Eq(&sn3));
}
TEST_F(SigNodeTest, GetTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
RefHashIsFinal(&sn1) = true;
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(456));
EXPECT_THAT(GetHighTopoHash(&sn1), Eq(456));
}
TEST_F(SigNodeTest, ReTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
ReHighTopoHash(&sn1);
size_t expected_hash = 456;
CombineHash(1, &expected_hash);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(expected_hash));
}
TEST_F(SigNodeTest, ComputeTopoHash0) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefUniqueRank(&sn1) = 10;
RefNodeMask(&sn1) = 0x02;
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
RefLastHashedNodes(&sn1) = 0xFF;
RefNextHashedNodes(&sn1) = 0xFF;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(2, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
ComputeTopoHash0(&sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(1));
size_t exp_hval = std::hash<string>()(sn1.opcode());
CombineHash(1, &exp_hval);
CombineHash(1, &exp_hval);
CombineHash(2, &exp_hval);
CombineHash(3, &exp_hval);
CombineHash(3, &exp_hval);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(exp_hval));
}
TEST_F(SigNodeTest, ComputeTopoHashNotFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x38));
size_t exp_hash = 123;
size_t comm_hash;
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(10, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHash(20, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(30, &exp_hash);
CombineHash(comm_hash, &exp_hash);
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(exp_hash));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(3));
}
TEST_F(SigNodeTest, ComputeTopoHashFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
RefHashIsFinal(&sn1) = true;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(321));
}
TEST_F(SigNodeTest, EqualsOpcode) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
node2.set_op("Mul");
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsRank) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
RefUniqueRank(&sn1) = 1;
RefUniqueRank(&sn2) = 2;
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsLinkSize) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GraphDef graph2;
(*graph2.add_node()) = MakeNodeConst("node1");
auto node22 = graph2.add_node();
*node22 = MakeNodeMul("node2", "node1", "node1");
node22->add_input("node2");
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph2, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
EXPECT_FALSE(*sig_map2["node2"] == *sig_map1["node2"]);
}
TEST_F(SigNodeTest, EqualsLinks) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
SigNode* sn2 = sig_map2["node2"].get();
++RefHashedPeers(sn2)[0].link_hash;
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
--RefHashedPeers(sn2)[0].link_hash;
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
++RefUniqueRank(sig_map2["node1"].get());
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
}
class SignatureTest : public SigBaseTest {
protected:
static void InitPermutation(size_t size,
std::vector<size_t>* plain_permutation,
std::vector<size_t>* countdown) {
plain_permutation->clear();
countdown->clear();
for (size_t i = 0; i < size; ++i) {
plain_permutation->emplace_back(i);
countdown->emplace_back(size - 1 - i);
}
}
static void BuildPermutation(const std::vector<size_t>& plain_permutation,
const std::vector<size_t>& countdown,
std::vector<size_t>* result) {
*result = plain_permutation;
for (int i = 0; i < result->size(); ++i) {
std::swap((*result)[i], (*result)[i + countdown[i]]);
}
}
static bool CountDown(std::vector<size_t>* countdown) {
int pos;
for (pos = countdown->size() - 2; pos >= 0; --pos) {
if ((*countdown)[pos] > 0) {
--(*countdown)[pos];
break;
}
(*countdown)[pos] = (countdown->size() - 1 - pos);
}
return pos >= 0;
}
void TestGraphEveryWay(const GraphDef& graph) {
size_t graph_size = graph.node_size();
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
std::vector<size_t> plain_permutation;
std::vector<size_t> countdown;
InitPermutation(graph_size, &plain_permutation, &countdown);
std::set<string> signatures;
std::vector<size_t> permutation;
do {
BuildPermutation(plain_permutation, countdown, &permutation);
constexpr bool kDebugPermutation = false;
if (kDebugPermutation) {
string p;
for (int i = 0; i < permutation.size(); ++i) {
p.push_back('0' + permutation[i]);
}
LOG(INFO) << "Permutation: " << p;
}
std::vector<std::unique_ptr<SigNode>> hold(graph_size);
int idx;
sig_.nodes.clear();
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes before permutation:";
}
for (auto& entry : sig_.map) {
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
hold[idx++] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[permutation[idx++]]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
sig_.nodes.emplace_back(entry.second.get());
RefUniqueRank(entry.second.get()) = idx;
}
OrderLinks(&sig_);
ASSERT_THAT(sig_.Compute(), Eq(absl::OkStatus()));
signatures.insert(sig_.ToString());
EXPECT_THAT(sig_.sig_full, SizeIs(graph_size));
size_t hval = 0;
for (size_t ih : sig_.sig_full) {
EXPECT_THAT(ih, Gt(graph_size));
CombineHash(ih, &hval);
}
EXPECT_THAT(sig_.sig_short, Eq(hval));
idx = 0;
for (auto& entry : sig_.map) {
hold[permutation[idx++]] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after un-permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[idx++]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
}
} while (CountDown(&countdown));
for (const auto& s : signatures) {
LOG(INFO) << "Signature: " << s;
}
EXPECT_THAT(signatures, SizeIs(1));
}
};
TEST_F(SignatureTest, PrepareNodes) {
NodeDef node1 = MakeNodeConst("node1");
sig_.map["node1"] = std::make_unique<SigNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
sig_.map["node2"] = std::make_unique<SigNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
sig_.map["node3"] = std::make_unique<SigNode>(&node3);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(3));
int idx = 0;
for (const auto& entry : sig_.map) {
EXPECT_THAT(RefNodeMask(entry.second.get()), Eq(1 << idx))
<< " at index " << idx;
EXPECT_THAT(RefUniqueRank(entry.second.get()), Eq(static_cast<size_t>(~0)))
<< " at index " << idx;
EXPECT_THAT(RefHashIsFinal(entry.second.get()), false)
<< " at index " << idx;
EXPECT_THAT(RefTopoHash(entry.second.get()), SizeIs(1))
<< " at index " << idx;
++idx;
}
}
TEST_F(SignatureTest, FindUniqueHashesAllDifferent) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(900);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(800);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(600);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
size_t next = 1;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(4));
EXPECT_THAT(sig_.nodes[0], Eq(&sn1));
EXPECT_THAT(sig_.nodes[1], Eq(&sn4));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn2));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
ASSERT_THAT(RefTopoHash(&sn2), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn3), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn4), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn2)[0], Eq(4));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(3));
EXPECT_THAT(RefTopoHash(&sn4)[0], Eq(2));
EXPECT_THAT(sig_.sig_full, ElementsAre(600, 700, 800));
size_t exp_short_hash = 0;
CombineHash(600, &exp_short_hash);
CombineHash(700, &exp_short_hash);
CombineHash(800, &exp_short_hash);
EXPECT_THAT(sig_.sig_short, Eq(exp_short_hash));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicatesExceptOne) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(800);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(800);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn3));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn1));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn5));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(false));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(1));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicates) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(700);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(700);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn5));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn1));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn5)[0], Eq(1));
}
TEST_F(SignatureTest, ComputeOneRoundCircular) {
BuildSigMap(graph_circular_onedir_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
size_t hval = GetHighTopoHash(sig_.nodes[0]);
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(GetHighTopoHash(sig_.nodes[i]), Eq(hval)) << " at index " << i;
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefTopoHash(sig_.nodes[i]), SizeIs(4)) << " at index " << i;
}
}
TEST_F(SignatureTest, ComputeOneRoundLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
std::vector<size_t> hash_size;
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(4, 5, 5, 6, 6));
}
TEST_F(SignatureTest, ComputeOneRoundSplitLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
std::swap(sig_.nodes[0], sig_.nodes[2]);
ASSERT_THAT(RefNodeMask(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
RefHashIsFinal(sig_.nodes[0]) = true;
ComputeOneRound(1, &sig_);
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
std::vector<size_t> hash_size;
for (int i = 1; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(3, 3, 4, 4));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[4]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[4]), Eq(0x1C));
}
TEST_F(SignatureTest, OrderLinks) {
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph_for_link_order_, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map); |
1,374 | cpp | tensorflow/tensorflow | graph_analyzer | tensorflow/core/grappler/graph_analyzer/graph_analyzer.cc | tensorflow/core/grappler/graph_analyzer/graph_analyzer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GRAPH_ANALYZER_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GRAPH_ANALYZER_H_
#include <deque>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/graph_analyzer/map_tools.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include "tensorflow/core/grappler/graph_analyzer/subgraph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
class GraphAnalyzerTest;
}
class GraphAnalyzer {
public:
GraphAnalyzer(const GraphDef& graph, int subgraph_size);
virtual ~GraphAnalyzer();
Status Run();
std::vector<string> DumpSubgraphs();
Status OutputSubgraphs();
private:
GraphAnalyzer() = delete;
GraphAnalyzer(const GraphAnalyzer&) = delete;
void operator=(const GraphAnalyzer&) = delete;
friend class tensorflow::grappler::graph_analyzer::test::GraphAnalyzerTest;
Status BuildMap();
void FindSubgraphs();
void DropInvalidSubgraphs();
Status CollateResult();
std::vector<string> DumpRawSubgraphs();
void ExtendSubgraph(Subgraph* parent);
void ExtendSubgraphAllOrNone(Subgraph* parent, const GenNode* node);
void ExtendSubgraphPortAllOrNone(Subgraph* parent, const GenNode* node,
GenNode::Port port);
void AddExtendedSubgraph(Subgraph* parent, const Subgraph::Identity& id);
bool HasInvalidMultiInputs(Subgraph* sg);
GraphDef graph_;
int subgraph_size_;
GenNodeMap nodes_;
SubgraphPtrSet result_;
SubgraphPtrSet partial_;
std::deque<Subgraph*> todo_;
struct CollationEntry {
std::shared_ptr<Signature> sig;
size_t count = 0;
};
using CollationMap =
std::unordered_map<Signature*, CollationEntry, HashAtPtr<Signature*>,
EqAtPtr<Signature*> >;
CollationMap collation_map_;
struct ReverseLessByCount {
bool operator()(CollationEntry* left, CollationEntry* right) const {
return left->count > right->count;
}
};
using CollationOrderByCount =
std::multiset<CollationEntry*, ReverseLessByCount>;
CollationOrderByCount ordered_collation_;
};
}
}
}
#endif
#include <deque>
#include <iostream>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GraphAnalyzer::GraphAnalyzer(const GraphDef& graph, int subgraph_size)
: graph_(graph), subgraph_size_(subgraph_size) {}
GraphAnalyzer::~GraphAnalyzer() {}
Status GraphAnalyzer::Run() {
if (subgraph_size_ > Signature::kMaxGraphSize) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrFormat("Subgraphs of %d nodes are not supported, "
"the maximal supported node count is %d.",
subgraph_size_, Signature::kMaxGraphSize));
}
Status st = BuildMap();
if (!st.ok()) {
return st;
}
FindSubgraphs();
DropInvalidSubgraphs();
st = CollateResult();
if (!st.ok()) {
return st;
}
return absl::OkStatus();
}
Status GraphAnalyzer::BuildMap() {
nodes_.clear();
return GenNode::BuildGraphInMap(graph_, &nodes_);
}
void GraphAnalyzer::FindSubgraphs() {
result_.clear();
if (subgraph_size_ < 1) {
return;
}
partial_.clear();
todo_.clear();
const Subgraph::Identity empty_parent;
for (const auto& node : nodes_) {
if (subgraph_size_ == 1) {
result_.ExtendParent(empty_parent, node.second.get());
} else {
todo_.push_back(partial_.ExtendParent(empty_parent, node.second.get()));
}
}
while (!todo_.empty()) {
ExtendSubgraph(todo_.front());
todo_.pop_front();
}
partial_.clear();
}
void GraphAnalyzer::ExtendSubgraph(Subgraph* parent) {
const int next_parent_id = parent->id().size() + 1;
bool will_complete = (next_parent_id == subgraph_size_);
SubgraphPtrSet& sg_set = will_complete ? result_ : partial_;
const GenNode* last_all_or_none_node = nullptr;
for (SubgraphIterator sit(parent); !sit.AtEnd(); sit.Next()) {
const GenNode* node = sit.GetNode();
GenNode::Port port = sit.GetPort();
const GenNode::LinkTarget& neighbor = sit.GetNeighbor();
if (node->AllInputsOrNone() && port.IsInbound() && !port.IsControl()) {
if (node != last_all_or_none_node) {
ExtendSubgraphAllOrNone(parent, node);
last_all_or_none_node = node;
}
sit.SkipPort();
} else if (neighbor.node->AllInputsOrNone() && !port.IsInbound() &&
!port.IsControl()) {
if (parent->id().find(neighbor.node) == parent->id().end()) {
ExtendSubgraphAllOrNone(parent, neighbor.node);
}
} else if (node->IsMultiInput(port)) {
ExtendSubgraphPortAllOrNone(parent, node, port);
sit.SkipPort();
} else if (neighbor.node->IsMultiInput(neighbor.port)) {
if (parent->id().find(neighbor.node) != parent->id().end()) {
continue;
}
ExtendSubgraphPortAllOrNone(parent, neighbor.node, neighbor.port);
} else {
Subgraph* sg = sg_set.ExtendParent(parent->id(), neighbor.node);
if (!will_complete && sg != nullptr) {
todo_.push_back(sg);
}
}
}
}
void GraphAnalyzer::ExtendSubgraphAllOrNone(Subgraph* parent,
const GenNode* node) {
Subgraph::Identity id = parent->id();
id.insert(node);
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::ExtendSubgraphPortAllOrNone(Subgraph* parent,
const GenNode* node,
GenNode::Port port) {
auto nbit = node->links().find(port);
if (nbit == node->links().end()) {
return;
}
Subgraph::Identity id = parent->id();
id.insert(node);
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::AddExtendedSubgraph(Subgraph* parent,
const Subgraph::Identity& id) {
if (id.size() == parent->id().size()) {
return;
}
auto sg = std::make_unique<Subgraph>(id);
SubgraphPtrSet& spec_sg_set =
(id.size() == subgraph_size_) ? result_ : partial_;
if (spec_sg_set.find(sg) != spec_sg_set.end()) {
return;
}
const int id_size = id.size();
if (id_size != subgraph_size_) {
todo_.push_back(sg.get());
}
spec_sg_set.insert(std::move(sg));
}
void GraphAnalyzer::DropInvalidSubgraphs() {
auto resit = result_.begin();
while (resit != result_.end()) {
if (HasInvalidMultiInputs(resit->get())) {
auto delit = resit;
++resit;
result_.erase(delit);
} else {
++resit;
}
}
}
bool GraphAnalyzer::HasInvalidMultiInputs(Subgraph* sg) {
for (auto const& node : sg->id()) {
if (!node->AllInputsOrNone()) {
continue;
}
bool anyIn = false;
bool anyOut = false;
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
if (sg->id().find(link.node) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
}
}
if (anyIn && anyOut) {
return true;
}
}
for (SubgraphIterator sit(sg); !sit.AtEnd(); sit.Next()) {
if (sit.GetNode()->IsMultiInput(sit.GetPort())) {
bool anyIn = false;
bool anyOut = false;
do {
GenNode* peer = sit.GetNeighbor().node;
if (sg->id().find(peer) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
} while (sit.NextIfSamePort());
if (anyIn && anyOut) {
return true;
}
}
}
return false;
}
Status GraphAnalyzer::CollateResult() {
ordered_collation_.clear();
collation_map_.clear();
for (const auto& it : result_) {
auto sig = std::make_unique<Signature>();
it->ExtractForSignature(&sig->map);
Status status = sig->Compute();
if (!status.ok()) {
return status;
}
auto& coll_entry = collation_map_[sig.get()];
if (coll_entry.sig == nullptr) {
coll_entry.sig = std::move(sig);
}
++coll_entry.count;
}
for (auto& entry : collation_map_) {
ordered_collation_.insert(&entry.second);
}
result_.clear();
return absl::OkStatus();
}
std::vector<string> GraphAnalyzer::DumpRawSubgraphs() {
std::vector<string> result;
for (const auto& it : result_) {
result.emplace_back(it->Dump());
}
return result;
}
std::vector<string> GraphAnalyzer::DumpSubgraphs() {
std::vector<string> result;
for (auto ptr : ordered_collation_) {
result.emplace_back(
absl::StrFormat("%d %s", ptr->count, ptr->sig->ToString()));
}
return result;
}
Status GraphAnalyzer::OutputSubgraphs() {
size_t total = 0;
for (auto ptr : ordered_collation_) {
std::cout << ptr->count << ' ' << ptr->sig->ToString() << '\n';
total += ptr->count;
}
std::cout << "Total: " << total << '\n';
if (std::cout.fail()) {
return Status(absl::StatusCode::kDataLoss, "Failed to write to stdout");
} else {
return absl::OkStatus();
}
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphAnalyzerTest : public ::testing::Test, protected TestGraphs {
protected:
Status BuildMap() { return gran_->BuildMap(); }
void FindSubgraphs() { gran_->FindSubgraphs(); }
void DropInvalidSubgraphs() { gran_->DropInvalidSubgraphs(); }
Status CollateResult() { return gran_->CollateResult(); }
void ExtendSubgraph(Subgraph* parent) { gran_->ExtendSubgraph(parent); }
void ExtendSubgraphPortAllOrNone(Subgraph* parent, GenNode* node,
GenNode::Port port) {
gran_->ExtendSubgraphPortAllOrNone(parent, node, port);
}
void ExtendSubgraphAllOrNone(Subgraph* parent, GenNode* node) {
gran_->ExtendSubgraphAllOrNone(parent, node);
}
std::vector<string> DumpRawSubgraphs() { return gran_->DumpRawSubgraphs(); }
std::vector<string> DumpPartials() {
std::vector<string> result;
for (const auto& it : gran_->partial_) {
result.emplace_back(it->Dump());
}
return result;
}
const GenNodeMap& GetNodes() { return gran_->nodes_; }
GenNode* GetNode(const string& name) { return gran_->nodes_.at(name).get(); }
SubgraphPtrSet& GetResult() { return gran_->result_; }
SubgraphPtrSet& GetPartial() { return gran_->partial_; }
std::deque<Subgraph*>& GetTodo() { return gran_->todo_; }
std::unique_ptr<GraphAnalyzer> gran_;
};
TEST_F(GraphAnalyzerTest, BuildMap) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
EXPECT_THAT(st, Eq(absl::OkStatus()));
auto& map = GetNodes();
EXPECT_THAT(map.find("node1"), Ne(map.end()));
EXPECT_THAT(map.find("node2"), Ne(map.end()));
EXPECT_THAT(map.find("node3"), Ne(map.end()));
}
TEST_F(GraphAnalyzerTest, BuildMapError) {
(*graph_3n_self_control_.add_node()) = MakeNodeConst("node1");
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs0) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 0);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(0));
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs1) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(3));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: BroadcastGradientArgs(node3)",
"1: Const(node1)",
"1: Sub(node2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphsTooLarge) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseIn) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto parent = std::make_unique<Subgraph>(Subgraph::Identity());
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(parent.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsIncomplete) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("add2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsMulti) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("add1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add1"),
GetNode("add2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("add1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add1), AddN(add2), Sub(sub)",
"1: AddN(add1), Const(const1_1), Const(const1_2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsNoControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass1"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSeparateControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("pass1"),
GenNode::Port(true, -1));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("pass2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: IdentityN(pass2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)",
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsAllOrNone) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("pass1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass1"),
GetNode("pass2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("pass1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: IdentityN(pass1), IdentityN(pass2), Sub(sub)",
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
}
}
}
} |
1,375 | cpp | tensorflow/tensorflow | gen_node | tensorflow/core/grappler/graph_analyzer/gen_node.cc | tensorflow/core/grappler/graph_analyzer/gen_node_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GEN_NODE_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GEN_NODE_H_
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
class GenNode;
using GenNodeMap = std::unordered_map<string, std::unique_ptr<GenNode>>;
class GenNode {
public:
explicit GenNode(const NodeDef* node);
const string& name() const { return node_->name(); }
const string& opcode() const { return node_->op(); }
const NodeDef* node_def() const { return node_; }
Status ParseInputs(const GenNodeMap* map);
static Status BuildGraphInMap(const GraphDef& source, GenNodeMap* map);
class Port {
public:
Port(bool inbound, int32_t id) : value_(id << 1) {
if (inbound) {
value_ |= 1;
}
}
Port(const Port&) = default;
Port& operator=(const Port&) = default;
bool IsInbound() const { return (value_ & 0x1); }
bool IsControl() const { return (value_ < 0); }
int32_t Id() const {
return (value_ >> 1);
}
using IntPort = int32_t;
IntPort Encoded() const { return value_; }
static Port Decode(IntPort encoded) { return Port(encoded); }
bool operator==(const Port& other) const { return value_ == other.value_; }
bool operator<(const Port& other) const { return value_ < other.value_; }
struct Hasher {
size_t operator()(const Port& port) const noexcept {
return hasher(port.Encoded());
}
std::hash<int32_t> hasher;
};
explicit operator string() const;
private:
explicit Port(IntPort value) : value_(value) {}
IntPort value_;
};
struct LinkTarget {
GenNode* node;
Port port;
LinkTarget(GenNode* a_node, Port a_port) : node(a_node), port(a_port) {}
};
using LinkTargetVector = std::vector<LinkTarget>;
using LinkMap = std::unordered_map<Port, LinkTargetVector, Port::Hasher>;
const LinkMap& links() const { return links_; }
bool IsMultiInput(Port port) const;
bool AllInputsOrNone() const { return all_inputs_or_none_; }
private:
const NodeDef* node_;
const OpDef* op_;
bool all_inputs_or_none_ = false;
LinkMap links_;
};
}
}
}
#endif
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/graph_analyzer/hash_tools.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GenNode::GenNode(const NodeDef* node) : node_(node), op_(nullptr) {}
Status GenNode::BuildGraphInMap(const GraphDef& source, GenNodeMap* map) {
for (const auto& n : source.node()) {
const string& name = n.name();
if (map->find(name) != map->end()) {
return Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name '" + name + "'.");
}
(*map)[name] = std::make_unique<GenNode>(&n);
}
for (const auto& mapit : *map) {
Status st = mapit.second->ParseInputs(map);
if (!st.ok()) {
return st;
}
}
return absl::OkStatus();
}
Status GenNode::ParseInputs(const GenNodeMap* map) {
all_inputs_or_none_ = false;
Status st = OpRegistry::Global()->LookUpOpDef(opcode(), &op_);
if (!st.ok()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("Node '%s' contains an undefined operation '%s': %s",
name(), opcode(), st.message()));
}
int n_inputs = node_->input_size();
int n_named_inputs = op_->input_arg_size();
int n_multi_inputs = 0;
for (const auto& inarg : op_->input_arg()) {
if (!inarg.number_attr().empty() || !inarg.type_list_attr().empty()) {
++n_multi_inputs;
}
}
bool is_commutative = grappler::IsCommutative(*node_);
if (n_multi_inputs > 1 || (n_multi_inputs > 0 && n_named_inputs > 1)) {
is_commutative = false;
}
if (is_commutative) {
n_named_inputs = 1;
all_inputs_or_none_ = false;
} else if (n_multi_inputs > 0) {
all_inputs_or_none_ = true;
}
for (int i = 0; i < n_inputs; ++i) {
int other_position;
string other_name = ParseNodeName(node_->input(i), &other_position);
auto other_it = map->find(other_name);
if (other_it == map->end()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' input %d refers to a non-existing node '%s'.", name(),
i, other_name));
}
GenNode* other_node = other_it->second.get();
int this_position = other_position < 0 ? -1 : (is_commutative ? 0 : i);
if (this_position >= 0 && n_multi_inputs == 0 &&
this_position >= n_named_inputs) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' has a non-control input from '%s' at index %d but its "
"operation '%s' defines only %d inputs.",
name(), other_name, this_position, op_->name(), n_named_inputs));
}
Port this_port(true, this_position);
Port other_port(false, other_position);
links_[this_port].emplace_back(LinkTarget(other_node, other_port));
other_node->links_[other_port].emplace_back(LinkTarget(this, this_port));
}
return absl::OkStatus();
}
bool GenNode::IsMultiInput(Port port) const {
if (!port.IsInbound()) {
return false;
}
auto it = links_.find(port);
if (it == links_.end()) {
return false;
}
return (it->second.size() > 1);
}
GenNode::Port::operator string() const {
string result = this->IsInbound() ? "i" : "o";
if (this->IsControl()) {
result.append("C");
} else {
result.append(absl::StrFormat("%d", this->Id()));
}
return result;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
TEST(GenNodeTest, Port) {
{
GenNode::Port p(true, 100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(100));
}
{
GenNode::Port p(false, 0);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(0));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(0));
}
{
GenNode::Port p(true, -100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-100));
}
{
GenNode::Port p(false, -1);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-1));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-1));
}
}
TEST(GenNodeTest, ParseNodeNoInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre());
}
TEST(GenNodeTest, ParseNodeWithControl) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
node3.add_input("^node1");
node3.add_input("^node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"iC: node1[oC], node2[oC]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, -1)), Eq(true));
EXPECT_FALSE(gn1->AllInputsOrNone());
EXPECT_FALSE(gn2->AllInputsOrNone());
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeMul("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeAddN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn2->IsMultiInput(GenNode::Port(false, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputNotCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputList) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeIdentityN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiMultiInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeConst("node4");
map["node4"] = std::make_unique<GenNode>(&node4);
NodeDef node5 =
MakeNodeQuantizedConcat("node5", "node1", "node2", "node3", "node4");
map["node5"] = std::make_unique<GenNode>(&node5);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
auto gn4 = map["node4"].get();
auto gn5 = map["node5"].get();
ASSERT_THAT(gn5->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node5[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node5[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"o0: node5[i2]"
));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"o0: node5[i3]"
));
EXPECT_THAT(DumpLinkMap(gn5->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"i2: node3[o0]",
"i3: node4[o0]"
));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 1)), Eq(false));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 2)), Eq(false));
EXPECT_TRUE(gn5->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiOutput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeSub("node4", "node3:1", "node3:0");
map["node4"] = std::make_unique<GenNode>(&node4);
auto gn4 = map["node4"].get();
ASSERT_THAT(gn4->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]"
));
}
TEST(GenNodeTest, ParseNodeUndefinedOp) {
GenNodeMap map;
NodeDef node1;
node1.set_name("node1");
node1.set_op("Zzzx");
map["node1"] = std::make_unique<GenNode>(&node1);
const OpDef* opdef;
Status nested_error = OpRegistry::Global()->LookUpOpDef("Zzzx", &opdef);
auto gn = map["node1"].get();
ASSERT_THAT(
gn->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Node 'node1' contains an undefined operation 'Zzzx': ",
nested_error.message()))));
}
TEST(GenNodeTest, ParseNodeUnexpectedInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
EXPECT_THAT(gn1->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node1' has a non-control "
"input from 'node1' at index 0 but its operation "
"'Const' defines only 0 inputs.")));
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
node3.add_input("node1");
auto gn3 = map["node3"].get();
EXPECT_THAT(gn3->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node3' has a non-control "
"input from 'node1' at index 2 but its operation "
"'Sub' defines only 2 inputs.")));
}
TEST(GenNodeTest, ParseNodeControlInputsAlwaysOk) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("^node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"iC: node1[oC]",
"oC: node1[iC]"
));
}
TEST(GenNodeTest, ParseNodeInvalidInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeAddN("node1", "node2", "node3");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(
gn1->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node1' input 0 refers to a non-existing node 'node2'.")));
}
TEST(GenNodeTest, BuildGraphInMap) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
(*graph.add_node()) =
MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map), Eq(absl::OkStatus()));
ASSERT_THAT(map.find("node1"), Ne(map.end()));
ASSERT_THAT(map.find("node2"), Ne(map.end()));
ASSERT_THAT(map.find("node3"), Ne(map.end()));
EXPECT_THAT(map["node1"]->name(), Eq("node1"));
EXPECT_THAT(map["node2"]->name(), Eq("node2"));
EXPECT_THAT(map["node3"]->name(), Eq("node3"));
EXPECT_THAT(DumpLinkMap(map["node1"]->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(map["node2"]->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]",
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(map["node3"]->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"o0: node2[i1]",
"o1: node2[i0]"
));
}
TEST(GenNodeTest, BuildGraphInMapDuplicateNode) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeConst("node1");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST(GenNodeTest, BuildGraphInMapParseError) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
GenNodeMap map;
ASSERT_THAT(
GenNode::BuildGraphInMap(graph, &map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node2' input 0 refers to a non-existing node 'node3'.")));
}
}
}
}
}
} |
1,376 | cpp | tensorflow/tensorflow | function_api_info | tensorflow/core/grappler/optimizers/function_api_info.cc | tensorflow/core/grappler/optimizers/function_api_info_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_FUNCTION_API_INFO_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_FUNCTION_API_INFO_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
class FunctionApiInfo {
public:
FunctionApiInfo();
virtual ~FunctionApiInfo();
enum FunctionType {
INFERENCE,
FORWARD,
BACKWARD,
};
Status Init(const FunctionDef& function_def);
const string& interface_name() const;
const string& preferred_device() const;
const FunctionType function_type() const;
const string& pairing_function_name() const;
const DataTypeVector& input_arg_dtypes() const;
const DataTypeVector& output_arg_dtypes() const;
private:
string interface_name_;
string preferred_device_;
FunctionType function_type_;
string pairing_function_name_;
DataTypeVector input_arg_dtypes_;
DataTypeVector output_arg_dtypes_;
FunctionApiInfo(const FunctionApiInfo&) = delete;
void operator=(const FunctionApiInfo&) = delete;
};
class FunctionLibraryApiInfo {
public:
FunctionLibraryApiInfo();
virtual ~FunctionLibraryApiInfo();
Status Init(const FunctionDefLibrary& function_library);
Status GetEquivalentImplementations(
const string& function_name, std::vector<string>* other_functions) const;
const FunctionApiInfo* GetApiInfo(const string& function_name) const;
bool empty() const { return func_info_.empty(); }
std::size_t size() const { return func_info_.size(); }
private:
std::unordered_map<string, std::unique_ptr<FunctionApiInfo>> func_info_;
absl::flat_hash_map<string, std::vector<string>> intf_to_inference_funcs_;
absl::flat_hash_map<string, std::vector<string>> intf_to_forward_funcs_;
absl::flat_hash_map<string, std::vector<string>> intf_to_backward_funcs_;
FunctionLibraryApiInfo(const FunctionLibraryApiInfo&) = delete;
void operator=(const FunctionLibraryApiInfo&) = delete;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
FunctionApiInfo::FunctionApiInfo() {}
FunctionApiInfo::~FunctionApiInfo() {}
Status FunctionApiInfo::Init(const FunctionDef& function_def) {
function_type_ = FunctionApiInfo::FunctionType::INFERENCE;
for (const auto& attr : function_def.attr()) {
if (attr.first == "api_preferred_device") {
preferred_device_ = attr.second.s();
}
if (attr.first == "api_implements") {
interface_name_ = attr.second.s();
}
if (attr.first == "forward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::BACKWARD;
pairing_function_name_ = attr.second.s();
}
if (attr.first == "backward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::FORWARD;
pairing_function_name_ = attr.second.s();
}
}
input_arg_dtypes_.reserve(function_def.signature().input_arg_size());
for (const auto& input_arg : function_def.signature().input_arg()) {
input_arg_dtypes_.emplace_back(input_arg.type());
}
output_arg_dtypes_.reserve(function_def.signature().output_arg_size());
for (const auto& output_arg : function_def.signature().output_arg()) {
output_arg_dtypes_.emplace_back(output_arg.type());
}
if (interface_name_.empty() && !preferred_device_.empty()) {
return errors::InvalidArgument(
"Function '", function_def.signature().name(),
"' has a preferred device, but does not implement an interface");
}
return absl::OkStatus();
}
const string& FunctionApiInfo::preferred_device() const {
return preferred_device_;
}
const string& FunctionApiInfo::interface_name() const {
return interface_name_;
}
const FunctionApiInfo::FunctionType FunctionApiInfo::function_type() const {
return function_type_;
}
const string& FunctionApiInfo::pairing_function_name() const {
return pairing_function_name_;
}
const DataTypeVector& FunctionApiInfo::input_arg_dtypes() const {
return input_arg_dtypes_;
}
const DataTypeVector& FunctionApiInfo::output_arg_dtypes() const {
return output_arg_dtypes_;
}
FunctionLibraryApiInfo::FunctionLibraryApiInfo() {}
FunctionLibraryApiInfo::~FunctionLibraryApiInfo() {}
namespace {
bool IsSameArgDef(const OpDef::ArgDef& arg1, const OpDef::ArgDef& arg2) {
if (arg1.type() != arg2.type()) return false;
if (arg1.type_attr() != arg2.type_attr()) return false;
if (arg1.number_attr() != arg2.number_attr()) return false;
if (arg1.type_list_attr() != arg2.type_list_attr()) return false;
if (arg1.is_ref() != arg2.is_ref()) return false;
return true;
}
bool IsSameSignature(const FunctionDef& f1, const FunctionDef& f2,
const bool check_inputs, const bool check_outputs) {
const auto& sig1 = f1.signature();
const auto& sig2 = f2.signature();
if (check_inputs) {
if (sig1.input_arg_size() != sig2.input_arg_size()) return false;
for (int k = 0; k < sig1.input_arg_size(); ++k) {
if (!IsSameArgDef(sig1.input_arg(k), sig2.input_arg(k))) return false;
}
}
if (check_outputs) {
if (f1.ret().size() != f2.ret().size()) return false;
if (sig1.output_arg_size() != sig2.output_arg_size()) return false;
for (int k = 0; k < sig1.output_arg_size(); ++k) {
if (!IsSameArgDef(sig1.output_arg(k), sig2.output_arg(k))) return false;
}
}
return true;
}
Status ValidateSignature(const string& interface_name,
const std::vector<const FunctionDef*>& equiv_funcs,
const FunctionApiInfo::FunctionType function_type) {
if (equiv_funcs.size() < 2) return absl::OkStatus();
for (size_t k = 1; k < equiv_funcs.size(); ++k) {
const bool check_input =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::FORWARD);
const bool check_output =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::BACKWARD);
if (!IsSameSignature(*equiv_funcs[0], *equiv_funcs[k], check_input,
check_output)) {
return errors::InvalidArgument(
"Functions '", equiv_funcs[0]->signature().name(), "' and '",
equiv_funcs[k]->signature().name(), "' both implement '",
interface_name, "' but their signatures do not match.");
}
}
return absl::OkStatus();
}
Status ValidateSignatures(
const std::unordered_map<string, std::vector<const FunctionDef*>>&
intf_to_func,
const FunctionApiInfo::FunctionType function_type) {
for (const auto& item : intf_to_func)
TF_RETURN_IF_ERROR(
ValidateSignature(item.first, item.second, function_type));
return absl::OkStatus();
}
}
Status FunctionLibraryApiInfo::Init(
const FunctionDefLibrary& function_library) {
std::unordered_map<string, std::vector<const FunctionDef*>> infer_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> fwd_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> bwd_funcs;
for (const auto& function : function_library.function()) {
std::unique_ptr<FunctionApiInfo> func_info(new FunctionApiInfo);
TF_RETURN_IF_ERROR(func_info->Init(function));
if (func_info->interface_name().empty()) continue;
const string& function_name = function.signature().name();
const string& interface_name = func_info->interface_name();
VLOG(3) << "Got " << func_info->function_type()
<< " function: " << function_name
<< " with interface: " << interface_name;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
intf_to_inference_funcs_[interface_name].emplace_back(function_name);
infer_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::FORWARD:
intf_to_forward_funcs_[interface_name].emplace_back(function_name);
fwd_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::BACKWARD:
intf_to_backward_funcs_[interface_name].emplace_back(function_name);
bwd_funcs[interface_name].emplace_back(&function);
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
func_info_[function_name] = std::move(func_info);
}
TF_RETURN_IF_ERROR(ValidateSignatures(
infer_funcs, FunctionApiInfo::FunctionType::INFERENCE));
TF_RETURN_IF_ERROR(
ValidateSignatures(fwd_funcs, FunctionApiInfo::FunctionType::FORWARD));
TF_RETURN_IF_ERROR(
ValidateSignatures(bwd_funcs, FunctionApiInfo::FunctionType::BACKWARD));
return absl::OkStatus();
}
Status FunctionLibraryApiInfo::GetEquivalentImplementations(
const string& function_name, std::vector<string>* other_functions) const {
const auto func_it = func_info_.find(function_name);
if (func_it == func_info_.end()) return absl::OkStatus();
const FunctionApiInfo* func_info = func_it->second.get();
absl::flat_hash_map<string, std::vector<string>>::const_iterator it;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
it = intf_to_inference_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::FORWARD:
it = intf_to_forward_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::BACKWARD:
it = intf_to_backward_funcs_.find(func_info->interface_name());
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
for (const auto& func_name : it->second) {
if (func_name == function_name) continue;
other_functions->emplace_back(func_name);
}
return absl::OkStatus();
}
const FunctionApiInfo* FunctionLibraryApiInfo::GetApiInfo(
const string& function_name) const {
const auto it = func_info_.find(function_name);
if (it == func_info_.end()) return nullptr;
return it->second.get();
}
}
} | #include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void SetArg(const string& name, const string& type_name,
OpDef::ArgDef* arg_def) {
arg_def->set_name(name);
arg_def->set_type_attr(type_name);
}
typedef std::pair<string, string> ArgSpec;
void SetArgs(const std::vector<ArgSpec>& input_args_spec,
const std::vector<ArgSpec>& output_args_spec, OpDef* sig) {
for (const auto& arg_spec : input_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_input_arg());
for (const auto& arg_spec : output_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_output_arg());
}
void PopulateFunction(const string& name, const string& api_interface_name,
const string& preferred_device,
const std::vector<ArgSpec>& input_args,
const std::vector<ArgSpec>& output_args,
const string& forward_function_name,
const string& backward_function_name,
FunctionDef* func_def) {
OpDef* sig = func_def->mutable_signature();
sig->set_name(name);
SetArgs(input_args, output_args, sig);
auto* func_attr = func_def->mutable_attr();
if (!api_interface_name.empty())
(*func_attr)["api_implements"].set_s(api_interface_name);
if (!preferred_device.empty())
(*func_attr)["api_preferred_device"].set_s(preferred_device);
if (!forward_function_name.empty())
(*func_attr)["forward_function_name"].set_s(forward_function_name);
if (!backward_function_name.empty())
(*func_attr)["backward_function_name"].set_s(backward_function_name);
}
void PopulateSampleLibrary(const bool mismatch_args,
FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> func_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> func_wrong_args{{"in1", "int32"},
{"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU",
mismatch_args ? func_wrong_args : func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoThings", "DoThings", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("OneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("AnotherOneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
}
void PopulateComplexLibrary(FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> input_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
const std::vector<ArgSpec> output_with_state{
{"out", "float32"}, {"state1", "int32"}, {"state2", "int32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", input_args, output_args, "",
"DoStuffCpu_gradient", func_lib->add_function());
PopulateFunction("DoStuffCpu_gradient", "DoStuff", "CPU", output_args,
input_args, "DoStuffCpu", "", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU", input_args,
output_with_state, "", "DoStuffGpu_gradient",
func_lib->add_function());
PopulateFunction("DoStuffGpu_gradient", "DoStuff", "GPU", output_with_state,
input_args, "DoStuffGpu", "", func_lib->add_function());
}
bool CheckEquivImpl(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name,
const std::vector<string>& expected_other) {
std::vector<string> other_impl;
Status status =
lib_api_info.GetEquivalentImplementations(func_name, &other_impl);
EXPECT_EQ(status, absl::OkStatus());
const std::unordered_set<string> actual(other_impl.begin(), other_impl.end());
const std::unordered_set<string> expected(expected_other.begin(),
expected_other.end());
return actual == expected;
}
string GetInterfaceName(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->interface_name();
}
string GetPreferredDevice(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->preferred_device();
}
TEST(FunctionApiInfoTest, ParseTags) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( false, &func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoThings", GetInterfaceName(lib_api_info, "DoThings"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("", GetPreferredDevice(lib_api_info, "DoThings"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "OneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "AnotherOneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoThings", {}));
}
TEST(FunctionApiInfoTest, ComplexFunctionLib) {
FunctionDefLibrary func_lib;
PopulateComplexLibrary(&func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu_gradient",
{"DoStuffGpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu_gradient",
{"DoStuffCpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
}
TEST(FunctionApiInfoTest, MismatchedArguments) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( true, &func_lib);
FunctionLibraryApiInfo lib_api_info;
const Status ret = lib_api_info.Init(func_lib);
EXPECT_FALSE(ret.ok());
}
}
}
} |
1,377 | cpp | tensorflow/tensorflow | memory_optimizer | tensorflow/core/grappler/optimizers/memory_optimizer.cc | tensorflow/core/grappler/optimizers/memory_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_MEMORY_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_MEMORY_OPTIMIZER_H_
#include <string>
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class MemoryOptimizer : public GraphOptimizer {
public:
explicit MemoryOptimizer(
RewriterConfig::MemOptType optimization_level,
const string& recomputation_targets_name_scope = "gradients/")
: optimization_level_(optimization_level),
recomputation_targets_name_scope_(recomputation_targets_name_scope) {}
~MemoryOptimizer() override {}
string name() const override { return "memory_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* pruned_graph) override;
private:
RewriterConfig::MemOptType optimization_level_;
string recomputation_targets_name_scope_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
#include <algorithm>
#include <queue>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_memory.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
const char* kRecomputedNodePrefix = "Recomputed";
const char* kRecomputeTriggerNodePrefix = "RecomputeTrigger";
const char* kRecomputeHint = "_recompute_hint";
std::unordered_set<string> GetCheapToRecomputeOps() {
std::unordered_set<string> cheap_ops = {"Add",
"AddN",
"BiasAdd",
"Cast",
"Fill",
"FloorDiv",
"FloorMod",
"FusedBatchNorm",
"LeakyRelu",
"Mul",
"Neg",
"RealDiv",
"Reciprocal",
"Relu",
"Relu6",
"Reshape",
"Rsqrt",
"Sigmoid",
"Sqrt",
"Square",
"SquaredDifference",
"Sub",
"Tile",
"Transpose"};
return cheap_ops;
}
std::unordered_set<const NodeDef*> FindCandidateRecomputeNodes(
const NodeMap& node_map, const GraphDef* graph,
const std::function<bool(const NodeDef&)>& is_candidate,
const std::function<bool(const NodeDef&)>& is_target) {
std::unordered_set<const NodeDef*> candidate_recompute_nodes;
for (const auto& node : graph->node()) {
if (!is_candidate(node)) {
continue;
}
bool has_target_output = false;
for (const NodeDef* output : node_map.GetOutputs(node.name())) {
if (is_target(*output)) {
has_target_output = true;
break;
}
}
if (!has_target_output) {
continue;
}
bool has_target_input = false;
for (const string& input_name : node.input()) {
const NodeDef* input_node = node_map.GetNode(input_name);
if (is_target(*input_node)) {
has_target_input = true;
break;
}
}
if (has_target_input) {
continue;
}
candidate_recompute_nodes.insert(&node);
}
return candidate_recompute_nodes;
}
void connected_subgraph(const NodeMap& node_map, bool collect_inputs,
bool collect_outputs,
const std::function<bool(const NodeDef&)>& is_candidate,
std::unordered_set<const NodeDef*>* expanded_nodes) {
std::queue<const NodeDef*> to_visit;
for (const NodeDef* starting_node : *expanded_nodes) {
to_visit.push(starting_node);
}
expanded_nodes->clear();
while (!to_visit.empty()) {
const NodeDef* current_node = to_visit.front();
to_visit.pop();
if (!expanded_nodes->insert(current_node).second) {
continue;
}
if (collect_inputs) {
for (const string& input_name_raw : current_node->input()) {
const NodeDef* input_node = node_map.GetNode(input_name_raw);
if (expanded_nodes->count(input_node) == 0 &&
is_candidate(*input_node)) {
to_visit.push(input_node);
}
}
}
if (collect_outputs) {
for (const NodeDef* output : node_map.GetOutputs(current_node->name())) {
if (expanded_nodes->count(output) == 0 && is_candidate(*output)) {
to_visit.push(output);
}
}
}
}
}
struct RecomputedSubGraph {
std::unordered_set<const NodeDef*> recomputed_source_nodes;
std::unordered_set<NodeDef*> target_nodes;
};
std::vector<RecomputedSubGraph> GetOpGroupsToRecompute(
const GraphDef* graph, const NodeMap& node_map,
const std::function<bool(const NodeDef&)>& should_recompute,
const std::function<bool(const NodeDef&)>& is_target) {
std::unordered_set<const NodeDef*> visited_nodes;
std::vector<RecomputedSubGraph> subgraphs_to_recompute;
std::unordered_set<const NodeDef*> candidate_recompute_nodes =
FindCandidateRecomputeNodes(node_map, graph, should_recompute, is_target);
for (const NodeDef* recompute_node : candidate_recompute_nodes) {
if (visited_nodes.count(recompute_node) > 0) {
continue;
}
RecomputedSubGraph current_recomputation;
std::unordered_set<const NodeDef*> unpruned_recompute_nodes;
unpruned_recompute_nodes.insert(recompute_node);
connected_subgraph(node_map,
true,
true,
should_recompute, &unpruned_recompute_nodes);
visited_nodes.insert(unpruned_recompute_nodes.begin(),
unpruned_recompute_nodes.end());
for (const NodeDef* unpruned_recompute_node : unpruned_recompute_nodes) {
bool inserted_feed = false;
for (NodeDef* output :
node_map.GetOutputs(unpruned_recompute_node->name())) {
if (is_target(*output)) {
current_recomputation.target_nodes.insert(output);
if (!inserted_feed) {
current_recomputation.recomputed_source_nodes.insert(
unpruned_recompute_node);
inserted_feed = true;
}
}
}
}
connected_subgraph(
node_map,
true,
false,
[&unpruned_recompute_nodes](const NodeDef& node) {
return unpruned_recompute_nodes.count(&node) != 0;
},
¤t_recomputation.recomputed_source_nodes);
if (current_recomputation.target_nodes.empty()) {
continue;
}
subgraphs_to_recompute.push_back(current_recomputation);
}
return subgraphs_to_recompute;
}
std::unordered_map<const NodeDef*, int> GetMaxDownstreamComponents(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components) {
std::unordered_map<const NodeDef*, int> recomputed_node_components;
for (const NodeDef* original_recompute_node : recomputed_source_nodes) {
int max_target_component = -1;
for (NodeDef* output :
node_map.GetOutputs(original_recompute_node->name())) {
if (target_nodes.count(output) != 0) {
int current_target_component = components.find(output)->second;
if (current_target_component > max_target_component) {
max_target_component = current_target_component;
}
}
}
if (max_target_component > -1) {
recomputed_node_components[original_recompute_node] =
max_target_component;
}
}
std::vector<const NodeDef*> recomputed_source_nodes_topological(
recomputed_source_nodes.begin(), recomputed_source_nodes.end());
std::sort(recomputed_source_nodes_topological.begin(),
recomputed_source_nodes_topological.end(),
[&components](const NodeDef* first, const NodeDef* second) {
return components.find(first)->second <
components.find(second)->second;
});
for (const NodeDef* original_recompute_node :
recomputed_source_nodes_topological) {
int max_component;
auto recomputed_component_iterator =
recomputed_node_components.find(original_recompute_node);
if (recomputed_component_iterator != recomputed_node_components.end()) {
max_component = recomputed_component_iterator->second;
} else {
max_component = -1;
}
for (NodeDef* output :
node_map.GetOutputs(original_recompute_node->name())) {
if (recomputed_source_nodes.count(output) == 0) {
continue;
}
auto child_component_iterator = recomputed_node_components.find(output);
CHECK(child_component_iterator != recomputed_node_components.end());
int child_component = child_component_iterator->second;
if (child_component > max_component) {
max_component = child_component;
}
}
CHECK_GE(max_component, 0);
recomputed_node_components[original_recompute_node] = max_component;
}
return recomputed_node_components;
}
std::unordered_map<const NodeDef*, const NodeDef*>
AddRecomputeControlDependencyNodes(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components,
const std::unordered_map<const NodeDef*, int>&
recomputed_node_max_feed_components,
GraphDef* graph) {
std::vector<const NodeDef*> recomputed_source_nodes_topological(
recomputed_source_nodes.begin(), recomputed_source_nodes.end());
std::sort(recomputed_source_nodes_topological.begin(),
recomputed_source_nodes_topological.end(),
[&recomputed_node_max_feed_components](const NodeDef* first,
const NodeDef* second) {
int first_component =
recomputed_node_max_feed_components.find(first)->second;
int second_component =
recomputed_node_max_feed_components.find(second)->second;
return first_component > second_component
|| (first_component == second_component &&
first->name() > second->name());
});
std::vector<const NodeDef*> target_inputs_topological;
for (const NodeDef* target_node : target_nodes) {
for (const string& target_input_name_raw : target_node->input()) {
const NodeDef* target_input = node_map.GetNode(target_input_name_raw);
if (target_input == nullptr ||
recomputed_source_nodes.count(target_input) != 0 ||
components.find(target_node)->second ==
components.find(target_input)->second) {
continue;
}
target_inputs_topological.push_back(target_input);
}
}
std::sort(target_inputs_topological.begin(), target_inputs_topological.end(),
[&components](const NodeDef* first, const NodeDef* second) {
return components.find(first)->second >
components.find(second)->second;
});
auto target_input_iterator = target_inputs_topological.begin();
NodeDef* current_trigger_node = nullptr;
std::unordered_map<const NodeDef*, const NodeDef*> triggers;
for (const NodeDef* original_recomputed_node :
recomputed_source_nodes_topological) {
NodeDef* new_trigger_node = graph->add_node();
new_trigger_node->set_name(AddPrefixToNodeName(
original_recomputed_node->name(), kRecomputeTriggerNodePrefix));
new_trigger_node->set_op("NoOp");
new_trigger_node->set_device(original_recomputed_node->device());
if (current_trigger_node != nullptr) {
*new_trigger_node->add_input() =
strings::StrCat("^", current_trigger_node->name());
}
current_trigger_node = new_trigger_node;
triggers[original_recomputed_node] = current_trigger_node;
for (;
target_input_iterator != target_inputs_topological.end() &&
components.find(*target_input_iterator)->second >
recomputed_node_max_feed_components.find(original_recomputed_node)
->second;
++target_input_iterator) {
*current_trigger_node->add_input() =
strings::StrCat("^", (*target_input_iterator)->name());
VLOG(2) << " Recomputation trigger " << current_trigger_node->name()
<< " depends on " << (*target_input_iterator)->name();
}
}
return triggers;
}
string RecomputedOrOriginalNodeName(
const std::unordered_set<string>& recomputed_node_names,
const string& original_node_name) {
if (recomputed_node_names.find(original_node_name) ==
recomputed_node_names.end()) {
return original_node_name;
} else {
return AddPrefixToNodeName(original_node_name, kRecomputedNodePrefix);
}
}
void RecomputeSubgraph(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components,
GraphDef* graph) {
std::unordered_set<string> recomputed_node_names;
VLOG(1) << "Recomputing a " << recomputed_source_nodes.size()
<< " node subgraph";
std::unordered_map<const NodeDef*, int> recomputed_node_components =
GetMaxDownstreamComponents(recomputed_source_nodes, target_nodes,
node_map, components);
for (const NodeDef* original_node : recomputed_source_nodes) {
VLOG(2) << " " << original_node->name();
recomputed_node_names.insert(original_node->name());
}
std::unordered_map<const NodeDef*, const NodeDef*> triggers =
AddRecomputeControlDependencyNodes(recomputed_source_nodes, target_nodes,
node_map, components,
recomputed_node_components, graph);
for (const NodeDef* original_node : recomputed_source_nodes) {
NodeDef* copied_node = graph->add_node();
copied_node->set_name(
AddPrefixToNodeName(original_node->name(), kRecomputedNodePrefix));
copied_node->set_op(original_node->op());
*copied_node->mutable_attr() = original_node->attr();
copied_node->set_device(original_node->device());
for (const string& original_input_name : original_node->input()) {
*copied_node->add_input() = RecomputedOrOriginalNodeName(
recomputed_node_names, original_input_name);
}
*copied_node->add_input() =
strings::StrCat("^", triggers[original_node]->name());
}
for (NodeDef* target_node : target_nodes) {
for (string& target_input_name : *target_node->mutable_input()) {
target_input_name = RecomputedOrOriginalNodeName(recomputed_node_names,
target_input_name);
}
}
}
void RecomputationRewritingPass(RewriterConfig::MemOptType optimization_level,
const string& recomputation_targets_name_scope,
GraphDef* graph, const GrapplerItem& item) {
TF_CHECK_OK(TopologicalSort(graph));
NodeMap node_map(graph);
std::vector<RecomputedSubGraph> recomputed_subgraphs;
std::unordered_set<string> feeds;
for (const auto& feed : item.feed) {
feeds.insert(NodeName(feed.first));
}
std::function<bool(const NodeDef&)> is_target =
[&recomputation_targets_name_scope](const NodeDef& node) {
return absl::StartsWith(node.name(),
recomputation_targets_name_scope) ||
static_cast<int>(node.name().find(
"/" + recomputation_targets_name_scope)) != -1;
};
if (optimization_level == RewriterConfig::RECOMPUTATION_HEURISTICS ||
optimization_level == RewriterConfig::HEURISTICS) {
std::unordered_set<string> cheap_to_recompute_ops =
GetCheapToRecomputeOps();
recomputed_subgraphs = GetOpGroupsToRecompute(
graph, node_map,
[&cheap_to_recompute_ops, &feeds, &is_target](const NodeDef& node) {
return !is_target(node) && feeds.count(node.name()) == 0 &&
(cheap_to_recompute_ops.count(node.op()) > 0 ||
node.attr().count(kRecomputeHint) > 0);
},
is_target);
} else if (optimization_level == RewriterConfig::MANUAL) {
recomputed_subgraphs = GetOpGroupsToRecompute(
graph, node_map,
[&feeds, &is_target](const NodeDef& node) {
return !is_target(node) && feeds.count(node.name()) == 0 &&
node.attr().count(kRecomputeHint) > 0;
},
is_target);
}
if (!recomputed_subgraphs.empty()) {
std::unordered_map<const NodeDef*, int> topological_numbering;
for (int node_number = 0; node_number < graph->node().size();
++node_number) {
topological_numbering[graph->mutable_node(node_number)] =
graph->node().size() - node_number - 1;
}
for (const RecomputedSubGraph& subgraph : recomputed_subgraphs) {
RecomputeSubgraph(subgraph.recomputed_source_nodes, subgraph.target_nodes,
node_map, topological_numbering, graph);
}
}
}
bool SchedulingPass(Cluster* cluster, std::unique_ptr<GraphMemory>* memory_ptr,
GrapplerItem* item) {
MutableGraphView view(&item->graph);
std::unordered_map<string, std::unordered_set<NodeDef*>> addn_list;
for (NodeDef& node : *item->graph.mutable_node()) {
if (!IsAddN(node) && node.op() != "AccumulateNV2") {
continue;
}
if (view.NumFanins(node, false) <= 2) {
continue;
}
for (const auto& input : view.GetFanins(node, false)) {
if (input.node->device() == node.device()) {
string tensor_name =
strings::StrCat(input.node->name(), ":", input.port_id);
addn_list[tensor_name].insert(&node);
}
}
}
if (addn_list.empty()) {
return false;
}
if ((*memory_ptr) == nullptr) {
memory_ptr->reset(new GraphMemory(*item));
Status s = (*memory_ptr)->InferStatically(cluster->GetDevices());
if (!s.ok()) {
memory_ptr->reset();
VLOG(1) << "Failed to infer memory usage: " << s.message();
return false;
}
}
const GraphMemory& memory = **memory_ptr;
std::unordered_set<NodeDef*> addn_to_rewrite;
for (const auto& device : cluster->GetDevices()) {
const string& name = device.first;
const DeviceProperties& prop = device.second;
if (prop.memory_size() <= 0) {
VLOG(1) << "Available memory unknown for device " << name;
continue;
}
const GraphMemory::MemoryUsage& mem_usage = memory.GetPeakMemoryUsage(name);
if (mem_usage.used_memory <= prop.memory_size() * 0.8) {
continue;
}
for (const auto& live : mem_usage.live_tensors) {
string tensor_name = strings::StrCat(live.node, ":", live.output_id);
auto it = addn_list.find(tensor_name);
if (it != addn_list.end()) {
addn_to_rewrite.insert(it->second.begin(), it->second.end());
}
}
}
if (addn_to_rewrite.empty()) {
return false;
}
GraphProperties properties(*item);
Status s = properties.InferStatically(false,
false,
false);
if (!s.ok()) {
VLOG(1) << "Failed to infer shapes: " << s.message();
return false;
}
GraphTopologyView graph_topology;
Status initialized_topology = graph_topology.InitializeFromGraph(item->graph);
if (!initialized_topology.ok()) {
VLOG(1) << "Failed to initialize graph topology view: "
<< initialized_topology.message();
return false;
}
bool updated_graph = false;
for (NodeDef* node : addn_to_rewrite) {
if (!properties.HasOutputProperties(node->name())) {
VLOG(1) << "Missing properties for " << node->name();
continue;
}
const TensorShapeProto& shape =
properties.GetOutputProperties(node->name())[0].shape();
PartialTensorShape shp(shape);
if (!shp.IsFullyDefined()) {
VLOG(1) << "Shape not fully known for " << node->name();
continue;
}
DataType dtype = node->attr().at("T").type();
if (dtype != DT_HALF && dtype != DT_FLOAT && dtype != DT_DOUBLE &&
dtype != DT_INT64) {
VLOG(1) << "Unsupported dtype for " << node->name();
continue;
}
std::unordered_map<const NodeDef*, int> topo_order;
DfsTraversal(graph_topology, {node}, TraversalDirection::kFollowInputs,
DfsCallbacks::PostOrder([&topo_order](const NodeDef* n) {
int topo_index = static_cast<int>(topo_order.size());
topo_order[n] = topo_index;
}));
std::vector<int> input_topo_index;
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
const string node_name = NodeName(input);
const NodeDef* node = view.GetNode(node_name);
input_topo_index.push_back(topo_order.at(node));
}
int min_input_topo_index = INT_MAX;
int min_input_id = -1;
for (int i = 0; i < node->input_size(); ++i) {
if (IsControlInput(node->input(i))) {
break;
}
const int current = input_topo_index[i];
if (current < min_input_topo_index) {
min_input_topo_index = current;
min_input_id = i;
}
}
CHECK_LE(0, min_input_id);
std::vector<string> pre_ctrl_deps;
std::vector<string> post_ctrl_deps;
for (int i = node->input_size() - 1; i >= 0; --i) {
if (!IsControlInput(node->input(i))) {
break;
}
if (input_topo_index[i] < min_input_topo_index) {
pre_ctrl_deps.push_back(node->input(i));
} else {
post_ctrl_deps.push_back(node->input(i));
}
}
const string& device = node->device();
const string tmp_var_name = strings::StrCat(node->name(), "/tmp_var");
if (view.GetNode(tmp_var_name) != nullptr) {
VLOG(1) << "Temporary variable already exists " << tmp_var_name;
return false;
}
NodeDef* tmp_var = item->graph.add_node();
tmp_var->set_name(tmp_var_name);
tmp_var->set_op("TemporaryVariable");
tmp_var->set_device(device);
(*tmp_var->mutable_attr())["dtype"].set_type(dtype);
*(*tmp_var->mutable_attr())["shape"].mutable_shape() = shape;
(*tmp_var->mutable_attr())["var_name"].set_s(tmp_var->name());
for (const string& ctrl_dep : pre_ctrl_deps) {
*tmp_var->add_input() = ctrl_dep;
}
*tmp_var->add_input() =
AsControlDependency(NodeName(node->input(min_input_id)));
NodeDef* zeros = item->graph.add_node();
zeros->set_name(strings::StrCat(node->name(), "/tmp_var_zeros"));
zeros->set_op("ZerosLike");
zeros->set_device(device);
(*zeros->mutable_attr())["T"].set_type(dtype);
*zeros->add_input() = node->input(min_input_id);
NodeDef* initialize = item->graph.add_node();
initialize->set_name(strings::StrCat(node->name(), "/tmp_var_initializer"));
initialize->set_op("Assign");
initialize->set_device(device);
(*initialize->mutable_attr())["T"].set_type(dtype);
(*initialize->mutable_attr())["use_locking"].set_b(false);
(*initialize->mutable_attr())["validate_shape"].set_b(false);
*initialize->add_input() = tmp_var->name();
*initialize->add_input() = zeros->name();
std::vector<NodeDef*> accumulates;
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
if (!IsControlInput(input)) {
NodeDef* accumulate = item->graph.add_node(); | #include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class RecomputeSubgraphTest : public GrapplerTest {};
TEST_F(RecomputeSubgraphTest, SimpleSubgraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::AddN(s.WithOpName("gradients/d"), {c});
Output e = ops::AddN(s.WithOpName("gradients/e"), {d, b});
Output f = ops::AddN(s.WithOpName("gradients/f"), {e, a});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(6, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
NodeMap post_transform_node_map(&output);
EXPECT_EQ(8, output.node_size());
NodeDef* transformed_e = post_transform_node_map.GetNode(e.name());
EXPECT_EQ(2, transformed_e->input_size());
EXPECT_EQ("gradients/d", transformed_e->input(0));
EXPECT_EQ("Recomputed/b", transformed_e->input(1));
NodeDef* recomputed_b = post_transform_node_map.GetNode("Recomputed/b");
EXPECT_EQ(2, recomputed_b->input_size());
EXPECT_EQ("a", recomputed_b->input(0));
EXPECT_EQ("^RecomputeTrigger/b", recomputed_b->input(1));
NodeDef* recompute_trigger =
post_transform_node_map.GetNode("RecomputeTrigger/b");
EXPECT_EQ(1, recompute_trigger->input_size());
EXPECT_EQ("^gradients/d", recompute_trigger->input(0));
}
TEST_F(RecomputeSubgraphTest, NoFeedsRecomputed) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::AddN(s.WithOpName("gradients/d"), {c});
Output e = ops::AddN(s.WithOpName("gradients/e"), {d, b});
Output f = ops::AddN(s.WithOpName("gradients/f"), {e, a});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.feed.emplace_back("b", Tensor());
EXPECT_EQ(6, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(6, output.node_size());
}
TEST_F(RecomputeSubgraphTest, TwoInputSubgraphs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Variable(s.WithOpName("b"), {2, 3, 4}, DT_FLOAT);
Output d = ops::AddN(
s.WithOpName("some_name_scope/gradients/two_subgraph_inputs"), {a, b});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(3, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("a")->mutable_attr())["_recompute_hint"]
.set_i(0);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL,
"some_name_scope/gradients");
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
NodeMap post_transform_node_map(&output);
EXPECT_EQ(7, output.node_size());
EXPECT_NE(post_transform_node_map.GetNode("Recomputed/a"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("Recomputed/b"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("RecomputeTrigger/a"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("RecomputeTrigger/b"), nullptr);
}
TEST_F(RecomputeSubgraphTest, MultiNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("Conv"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("BN"), a);
Output c = ops::Identity(s.WithOpName("ReLU"), b);
Output d = ops::Identity(s.WithOpName("Conv1"), c);
Output trigger = ops::AddN(s.WithOpName("gradients/BN1Grad"), {d});
Output e = ops::AddN(s.WithOpName("gradients/Conv1Grad"), {trigger, c});
Output f = ops::AddN(s.WithOpName("gradients/ReLUGrad"), {e, c});
Output g = ops::AddN(s.WithOpName("gradients/BNGrad"), {f, a});
Output h = ops::AddN(s.WithOpName("gradients/ConvGrad"), {g});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(9, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
pre_transform_node_map.GetNode("BN")->set_op("FusedBatchNorm");
pre_transform_node_map.GetNode("ReLU")->set_op("Relu");
MemoryOptimizer optimizer(RewriterConfig::RECOMPUTATION_HEURISTICS);
GraphDef first_pass_output;
Status first_pass_status =
optimizer.Optimize(nullptr, item, &first_pass_output);
TF_EXPECT_OK(first_pass_status);
NodeMap post_transform_node_map(&first_pass_output);
EXPECT_EQ(13, first_pass_output.node_size());
NodeDef* transformed_e = post_transform_node_map.GetNode(e.name());
EXPECT_EQ(2, transformed_e->input_size());
EXPECT_EQ("gradients/BN1Grad", transformed_e->input(0));
EXPECT_EQ("Recomputed/ReLU", transformed_e->input(1));
NodeDef* transformed_f = post_transform_node_map.GetNode(f.name());
EXPECT_EQ(2, transformed_f->input_size());
EXPECT_EQ("gradients/Conv1Grad", transformed_f->input(0));
EXPECT_EQ("Recomputed/ReLU", transformed_f->input(1));
NodeDef* transformed_g = post_transform_node_map.GetNode(g.name());
EXPECT_EQ(2, transformed_g->input_size());
EXPECT_EQ("gradients/ReLUGrad", transformed_g->input(0));
EXPECT_EQ("Conv", transformed_g->input(1));
NodeDef* recomputed_b = post_transform_node_map.GetNode("Recomputed/BN");
EXPECT_EQ(2, recomputed_b->input_size());
EXPECT_EQ("Conv", recomputed_b->input(0));
EXPECT_EQ("^RecomputeTrigger/BN", recomputed_b->input(1));
NodeDef* recompute_trigger_b =
post_transform_node_map.GetNode("RecomputeTrigger/BN");
EXPECT_EQ(1, recompute_trigger_b->input_size());
EXPECT_EQ("^RecomputeTrigger/ReLU", recompute_trigger_b->input(0));
NodeDef* recomputed_c = post_transform_node_map.GetNode("Recomputed/ReLU");
EXPECT_EQ(2, recomputed_c->input_size());
EXPECT_EQ("Recomputed/BN", recomputed_c->input(0));
EXPECT_EQ("^RecomputeTrigger/ReLU", recomputed_c->input(1));
NodeDef* recompute_trigger_c =
post_transform_node_map.GetNode("RecomputeTrigger/ReLU");
EXPECT_EQ(1, recompute_trigger_c->input_size());
EXPECT_EQ("^gradients/BN1Grad", recompute_trigger_c->input(0));
}
class MemoryOptimizerTest : public GrapplerTest {
public:
static std::unique_ptr<VirtualCluster> CreateVirtualCluster() {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_memory_size(1024 * 1024);
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.set_frequency(1000);
gpu_device.set_num_cores(24);
gpu_device.set_bandwidth(128);
gpu_device.set_memory_size(1024 * 1024);
gpu_device.mutable_environment()->insert({"architecture", "6"});
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:localhost/replica:0/task:0/gpu:0"] = gpu_device;
return std::unique_ptr<VirtualCluster>(new VirtualCluster(devices));
}
};
TEST_F(MemoryOptimizerTest, SimpleSwapping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Variable(s.WithOpName("a").WithDevice("/gpu:0"), {10, 10}, DT_FLOAT);
Output b = ops::AddN(s.WithOpName("b").WithDevice("/gpu:0"), {a});
Output c = ops::AddN(s.WithOpName("c").WithDevice("/gpu:0"), {b});
Output d = ops::AddN(s.WithOpName("d").WithDevice("/gpu:0"), {c});
Output e = ops::AddN(s.WithOpName("e").WithDevice("/gpu:0"), {b, d});
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {10, 10});
Output init = ops::Assign(s.WithOpName("init"), a, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
EXPECT_EQ(7, item.graph.node_size());
EXPECT_EQ(NodeName(e.name()), item.graph.node(4).name());
AttrValue& val =
(*item.graph.mutable_node(4)->mutable_attr())["_swap_to_host"];
val.mutable_list()->add_i(0);
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(9, output.node_size());
const NodeDef& new_e = output.node(6);
EXPECT_EQ(NodeName(e.name()), new_e.name());
EXPECT_EQ(2, new_e.input_size());
EXPECT_EQ(NodeName(d.name()), new_e.input(1));
EXPECT_EQ("swap_in_e_0", new_e.input(0));
const NodeDef& swap_out = output.node(7);
EXPECT_EQ("swap_out_e_0", swap_out.name());
EXPECT_EQ("_CopyFromGpuToHost", swap_out.op());
const NodeDef& swap_in = output.node(8);
EXPECT_EQ("swap_in_e_0", swap_in.name());
EXPECT_EQ("_CopyFromHostToGpu", swap_in.op());
EXPECT_EQ(NodeName(b.name()), swap_out.input(0));
EXPECT_EQ(NodeName(swap_out.name()), swap_in.input(0));
EXPECT_EQ("^c", swap_in.input(1));
const NodeDef& new_c = output.node(4);
EXPECT_EQ(NodeName(c.name()), new_c.name());
EXPECT_EQ("^swap_out_e_0", new_c.input(1));
GrapplerItem item_copy = item.WithGraph(std::move(output));
status = optimizer.Optimize(cluster.get(), item_copy, &output);
TF_EXPECT_OK(status);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.fetch = {"e"};
item.init_ops = {init.name()};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(MemoryOptimizerTest, SwappingHeuristics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output v = ops::Variable(s.WithOpName("v").WithDevice("/gpu:0"),
{128, 128, 8}, DT_FLOAT);
Output a = ops::Identity(s.WithOpName("a").WithDevice("/gpu:0"), v);
Output b = ops::Square(s.WithOpName("b").WithDevice("/gpu:0"), v);
Output c = ops::Sqrt(s.WithOpName("c").WithDevice("/gpu:0"), a);
Output d = ops::Identity(s.WithOpName("d").WithDevice("/gpu:0"), b);
Output axis = ops::Const(s.WithOpName("axis"), 0);
Output e =
ops::Concat(s.WithOpName("e").WithDevice("/gpu:0"), {a, b, c, d}, axis);
Output f = ops::Square(s.WithOpName("f").WithDevice("/gpu:0"), a);
Output g = ops::Sqrt(s.WithOpName("g").WithDevice("/gpu:0"), b);
Output h = ops::Exp(s.WithOpName("h").WithDevice("/gpu:0"), c);
Output i = ops::Log(s.WithOpName("i").WithDevice("/gpu:0"), d);
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {128, 128, 8});
Output init = ops::Assign(s.WithOpName("init"), v, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e", "f", "g", "h", "i"};
item.init_ops = {init.name()};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SWAPPING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
for (const auto& node : output.node()) {
if (node.name() == "e") {
EXPECT_EQ(5, node.input_size());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("swap_in_e_1", node.input(1));
EXPECT_EQ("swap_in_e_2", node.input(2));
EXPECT_EQ("swap_in_e_3", node.input(3));
EXPECT_EQ("axis", node.input(4));
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
#endif
}
TEST_F(MemoryOptimizerTest, UnswappableInputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output v = ops::Variable(s.WithOpName("v").WithDevice("/gpu:0"),
{128, 128, 8}, DT_FLOAT);
Output a = ops::Square(s.WithOpName("a").WithDevice("/gpu:0"), v);
Output b = ops::Identity(s.WithOpName("b").WithDevice("/gpu:0"), {a});
Output c = ops::Identity(s.WithOpName("c").WithDevice("/gpu:0"), {a});
Output index = ops::Const(s.WithOpName("index"), {0});
Output indices = ops::Tile(s.WithOpName("indices"), index, {128});
Output d =
ops::ScatterAdd(s.WithOpName("d").WithDevice("/gpu:0"), v, indices, c);
Output axis = ops::Const(s.WithOpName("axis"), 0);
Output e =
ops::Concat(s.WithOpName("e").WithDevice("/gpu:0"), {b, c, d}, axis);
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {128, 128, 8});
Output init = ops::Assign(s.WithOpName("init"), v, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
item.init_ops = {init.name()};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SWAPPING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
for (const auto& node : output.node()) {
if (node.name() == "e") {
EXPECT_EQ(5, node.input_size());
EXPECT_EQ("d", node.input(2));
EXPECT_EQ("^swap_out_d_2", node.input(4));
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(MemoryOptimizerTest, AccumulationRewrites) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomNormal(s.WithOpName("a").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output b = ops::RandomNormal(s.WithOpName("b").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output c = ops::RandomNormal(s.WithOpName("c").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output d = ops::AddN(s.WithOpName("d").WithDevice("/cpu:0"), {a, b, c});
Output e = ops::Square(s.WithOpName("e").WithDevice("/cpu:0"), d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SCHEDULING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
int count = 0;
for (const auto& node : output.node()) {
if (node.name() == "d") {
EXPECT_EQ("DestroyTemporaryVariable", node.op());
count++;
} else if (node.name() == "d/tmp_var_initializer") {
EXPECT_EQ("Assign", node.op());
count++;
} else if (node.name() == "d/tmp_var") {
EXPECT_EQ("TemporaryVariable", node.op());
count++;
} else if (node.name() == "e") {
EXPECT_EQ("Square", node.op());
EXPECT_EQ("d", node.input(0));
count++;
}
}
EXPECT_EQ(4, count);
std::vector<string> fetch = {"a", "b", "c", "e"};
auto tensors = EvaluateNodes(output, fetch, {});
EXPECT_EQ(4, tensors.size());
for (int i = 0; i < tensors[0].NumElements(); ++i) {
float actual = tensors[3].flat<float>()(i);
float expected = 0.0f;
for (int j = 0; j < 3; ++j) {
expected += tensors[j].flat<float>()(i);
}
expected *= expected;
EXPECT_NEAR(actual, expected, 1e-4);
}
}
class RelaxAllocatorConstraintsTest : public GrapplerTest {};
TEST_F(RelaxAllocatorConstraintsTest, SameDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/cpu:0"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
item.fetch = {"exp"};
item.init_ops = {"variable"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(RelaxAllocatorConstraintsTest, DifferentDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/gpu:0"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.fetch = {"exp"};
item.init_ops = {"variable"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(RelaxAllocatorConstraintsTest, SameDeviceType) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/cpu:1"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_TRUE(node.attr().at("_grappler_relax_allocator_constraints").b());
}
TEST_F(RelaxAllocatorConstraintsTest, SendNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
NodeDef* send = item.graph.add_node();
send->set_name("send");
send->set_op("_Send");
send->add_input("assign");
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
}
TEST_F(RelaxAllocatorConstraintsTest, AssignNodeInFanout) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant0 = ops::Const(s.WithOpName("constant0").WithDevice("/cpu:0"),
-42.0f, {128, 128});
Output variable0 = ops::Variable(
s.WithOpName("variable0").WithDevice("/cpu:0"), {128, 128}, DT_FLOAT);
Output assign0 = ops::Assign(s.WithOpName("assign0").WithDevice("/cpu:0"),
variable0, constant0);
Output assign2 = ops::Assign(s.WithOpName("assign2").WithDevice("/cpu:0"),
variable0, constant0);
Output assign3 = ops::Assign(s.WithOpName("assign3").WithDevice("/cpu:0"),
variable0, constant0);
Output assign4 = ops::Assign(s.WithOpName("assign4").WithDevice("/cpu:0"),
variable0, constant0);
Output rank_cpu =
ops::Rank(s.WithOpName("rank_cpu").WithDevice("/cpu:0"), assign3);
Output exp_cpu =
ops::Exp(s.WithOpName("exp_cpu").WithDevice("/cpu:0"), assign4);
Output rank_gpu = ops::Rank(s.WithOpName("rank_gpu")
.WithDevice("/gpu:0")
.WithControlDependencies(assign2),
assign0);
Output id_gpu = ops::Identity(s.WithOpName("id_gpu"), rank_cpu);
Output id_gpu2 = ops::Identity(s.WithOpName("id_gpu2"), exp_cpu);
Output variable_gpu = ops::Variable(
s.WithOpName("variable_gpu").WithDevice("/gpu:0"), {128, 128}, DT_FLOAT);
Output assign_gpu = ops::Assign(
s.WithOpName("assign_gpu").WithDevice("/gpu:0"), variable_gpu, exp_cpu);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"assign0", "assign_gpu", "rank_gpu", "id_gpu", "id_gpu2"};
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(3);
EXPECT_EQ("assign0", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
node = output.node(4);
EXPECT_EQ("assign2", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
node = output.node(5);
EXPECT_EQ("assign3", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
node = output.node(6);
EXPECT_EQ("assign4", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
node = output.node(12);
EXPECT_EQ("assign_gpu", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.init_ops = {"exp_cpu", "variable_gpu"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
for (int i = 0; i < tensors_expected.size(); ++i) {
if (i == 2 || i == 3) {
test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]);
} else {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
#endif
}
}
}
} |
1,378 | cpp | tensorflow/tensorflow | dependency_optimizer | tensorflow/core/grappler/optimizers/dependency_optimizer.cc | tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEPENDENCY_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEPENDENCY_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class DependencyOptimizer : public GraphOptimizer {
public:
DependencyOptimizer() {}
explicit DependencyOptimizer(RewriterConfig::Toggle opt_level) {}
~DependencyOptimizer() override {}
string name() const override { return "dependency_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
bool BypassingNodeIsBeneficial(
const NodeDef& node, const std::vector<NodeDef*>& input_nodes,
const std::vector<NodeDef*>& output_nodes) const;
int NumEdgesIfBypassed(const NodeDef& node,
const std::vector<NodeDef*>& output_nodes) const;
bool SafeToRemoveIdentity(const NodeDef& node) const;
bool SafeToConvertToNoOp(const NodeDef& node) const;
void CleanControlInputs();
void BuildNodeToIdx();
void OptimizeNode(int node_idx, SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete);
Status TransitiveReduction();
Status OptimizeDependencies();
void GroupCrossDeviceControlEdges(bool host_granularity);
bool fetch_nodes_known_;
std::unordered_set<string> nodes_to_preserve_;
std::unique_ptr<NodeMap> node_map_;
std::unordered_map<const NodeDef*, int> node_to_idx_;
GraphDef* optimized_graph_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include <unordered_set>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
bool RemoveControlInput(NodeDef* node, const string& control_input_to_remove,
NodeMap* node_map) {
for (int pos = node->input_size() - 1; pos >= 0; --pos) {
const string& input = node->input(pos);
if (input[0] != '^') break;
if (input == control_input_to_remove) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
node_map->RemoveOutput(NodeName(input), node->name());
return true;
}
}
return false;
}
}
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
return false;
}
if (node.input_size() < 1) {
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
if (input == nullptr) {
VLOG(1) << "node = " << node.name() << " input = " << node.input(0);
return false;
}
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
bool DependencyOptimizer::SafeToConvertToNoOp(const NodeDef& node) const {
if (HasRegularOutputs(node, *node_map_)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has outputs.";
return false;
}
if (!fetch_nodes_known_) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Fetches unknown.";
return false;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
VLOG(3) << "Not safe to convert to NoOp: " << node.name()
<< " is in preserve set.";
return false;
}
if (IsMerge(node) || IsSwitch(node) || ModifiesFrameInfo(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node modifies frame info.";
return false;
}
static const absl::flat_hash_set<string>* gather_ops =
new absl::flat_hash_set<string>{"Gather", "GatherV2", "GatherNd",
"ResourceGather", "ResourceGatherNd"};
const bool is_variable_read =
IsReadVariableOp(node) || IsReadVariablesOp(node) ||
gather_ops->find(node.op()) != gather_ops->end();
if (!is_variable_read && !IsFreeOfSideEffect(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has side effect.";
return false;
}
if (absl::StartsWith(node.op(), "Submodel")) {
return false;
}
const OpDef* op_def = nullptr;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok() || op_def->output_arg_size() == 0) {
return false;
}
const std::unordered_set<string> do_not_rewrite_ops{
"Assert", "CheckNumerics", "_Retval",
"_Arg", "_ParallelConcatUpdate", "TPUExecute",
"TPUCompile", "ControlTrigger"};
if (do_not_rewrite_ops.find(node.op()) != do_not_rewrite_ops.end()) {
return false;
}
if (!SafeToRemoveIdentity(node)) {
return false;
}
return true;
}
int DependencyOptimizer::NumEdgesIfBypassed(
const NodeDef& node, const std::vector<NodeDef*>& output_nodes) const {
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (is_multi_input_identity_n) {
int num_edges_if_bypassed(0);
for (const string& input_node_name : node.input()) {
if (IsControlInput(input_node_name)) {
num_edges_if_bypassed += num_outputs;
} else {
++num_edges_if_bypassed;
}
}
for (auto consumer : output_nodes) {
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId consumer_input = ParseTensorName(consumer->input(j));
if (consumer_input.node() == node.name()) {
if (IsControlInput(consumer_input)) {
num_edges_if_bypassed += num_inputs;
} else {
++num_edges_if_bypassed;
}
}
}
}
return num_edges_if_bypassed;
} else {
return num_inputs * num_outputs;
}
}
bool DependencyOptimizer::BypassingNodeIsBeneficial(
const NodeDef& node, const std::vector<NodeDef*>& input_nodes,
const std::vector<NodeDef*>& output_nodes) const {
const bool is_identity = IsIdentity(node) || IsIdentityNSingleInput(node);
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (NumEdgesIfBypassed(node, output_nodes) > num_inputs + num_outputs) {
return false;
}
if ((num_inputs == 1 && num_outputs > 1 &&
input_nodes[0]->device() != node.device()) ||
(num_inputs > 1 && num_outputs == 1 &&
output_nodes[0]->device() != node.device())) {
return false;
}
const string& node_dev = node.device();
int num_cross_in = 0;
for (NodeDef* input_node : input_nodes) {
num_cross_in += static_cast<int>(input_node->device() != node_dev);
}
int num_cross_out = 0;
for (NodeDef* output_node : output_nodes) {
num_cross_out += static_cast<int>(output_node->device() != node_dev);
}
const int num_cross_before = num_cross_in + num_cross_out;
int num_cross_after = 0;
for (NodeDef* input_node : input_nodes) {
for (NodeDef* output_node : output_nodes) {
num_cross_after +=
static_cast<int>(input_node->device() != output_node->device());
}
}
if (num_cross_after > num_cross_before) {
return false;
}
if ((is_identity || is_multi_input_identity_n) && num_cross_in > 0 &&
num_cross_out > 0 && num_cross_after > 0) {
return false;
}
return true;
}
void DependencyOptimizer::OptimizeNode(int node_idx,
SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete) {
NodeDef* node = optimized_graph_->mutable_node(node_idx);
const bool is_noop = IsNoOp(*node);
const bool is_identity = IsIdentity(*node) || IsIdentityNSingleInput(*node);
const bool is_multi_input_identity =
IsIdentityN(*node) && !IsIdentityNSingleInput(*node);
const string node_name = node->name();
if (IsConstant(*node) && node->input_size() == 0) {
const auto output_nodes = node_map_->GetOutputs(node_name);
for (NodeDef* fanout : output_nodes) {
bool optimize_fanout = false;
bool data_connection = false;
for (int i = fanout->input_size() - 1; i >= 0; --i) {
const TensorId input_tensor = ParseTensorName(fanout->input(i));
if (input_tensor.node() == node_name) {
if (input_tensor.index() < 0) {
fanout->mutable_input()->SwapElements(i, fanout->input_size() - 1);
fanout->mutable_input()->RemoveLast();
optimize_fanout = true;
} else {
data_connection = true;
}
}
}
if (optimize_fanout) {
nodes_to_simplify->PushBack(node_to_idx_[fanout]);
if (!data_connection) {
node_map_->RemoveOutput(node_name, fanout->name());
}
}
}
if (node_map_->GetOutputs(node_name).empty() && fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_to_idx_[node]);
}
return;
}
if (!is_noop && SafeToConvertToNoOp(*node)) {
VLOG(2) << "***** Replacing " << node_name << " (" << node->op()
<< ") with NoOp.";
std::unordered_set<string> ctrl_inputs;
int pos = 0;
while (pos < node->input_size()) {
const string old_input = node->input(pos);
if (IsControlInput(old_input)) {
if (!ctrl_inputs.insert(old_input).second) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
} else {
++pos;
}
continue;
}
const string ctrl_input = ConstantFolding::AddControlDependency(
old_input, optimized_graph_, node_map_.get());
ctrl_inputs.insert(ctrl_input);
node->set_input(pos, ctrl_input);
node_map_->UpdateInput(node_name, old_input, ctrl_input);
const NodeDef* old_input_node = node_map_->GetNode(old_input);
nodes_to_simplify->PushBack(node_to_idx_[old_input_node]);
++pos;
}
ChangeToNoOp(node);
EraseRegularNodeAttributes(node);
DedupControlInputs(node);
nodes_to_simplify->PushBack(node_to_idx_[node]);
return;
}
if (is_noop || ((is_identity || is_multi_input_identity) &&
SafeToRemoveIdentity(*node))) {
const int num_inputs = node->input_size();
std::vector<NodeDef*> input_nodes;
for (int i = 0; i < num_inputs; ++i) {
NodeDef* input_node = node_map_->GetNode(node->input(i));
if (input_node == nullptr) {
LOG(ERROR) << "Invalid input " << node->input(i);
return;
}
input_nodes.push_back(input_node);
}
const auto& output_node_set = node_map_->GetOutputs(node_name);
const std::vector<NodeDef*> output_nodes(output_node_set.begin(),
output_node_set.end());
if (!BypassingNodeIsBeneficial(*node, input_nodes, output_nodes)) {
return;
}
VLOG(2) << "***** Rerouting input around\n" << node->DebugString();
for (auto consumer : output_nodes) {
bool updated_consumer = false;
VLOG(2) << "consumer before:\n" << consumer->DebugString();
for (int i = 0; i < num_inputs; ++i) {
const NodeDef* input = input_nodes[i];
if ((is_identity && i == 0) ||
(is_multi_input_identity && !IsControlInput(node->input(i)))) {
string new_input;
const string& input_to_forward = node->input(i);
CHECK(!IsControlInput(input_to_forward));
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId old_input = ParseTensorName(consumer->input(j));
if (old_input.node() == node_name) {
if (old_input.index() == i) {
new_input = input_to_forward;
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
} else if (old_input.index() == -1) {
new_input = AsControlDependency(NodeName(input_to_forward));
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
}
}
}
updated_consumer = true;
} else {
if (node_map_->GetOutputs(input->name()).count(consumer) == 0) {
consumer->add_input(AsControlDependency(input->name()));
node_map_->AddOutput(input->name(), consumer->name());
nodes_to_simplify->PushBack(node_to_idx_[input]);
updated_consumer = true;
}
}
}
updated_consumer |= RemoveControlInput(
consumer, AsControlDependency(node_name), node_map_.get());
if (updated_consumer) {
nodes_to_simplify->PushBack(node_to_idx_[consumer]);
}
VLOG(2) << "consumer after:\n" << consumer->DebugString();
}
node_map_->RemoveOutputs(node_name);
if (fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_idx);
node_map_->RemoveInputs(node_name);
node->clear_input();
}
}
}
void DependencyOptimizer::CleanControlInputs() {
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
DedupControlInputs(optimized_graph_->mutable_node(i));
}
}
Status DependencyOptimizer::OptimizeDependencies() {
SetVector<int> nodes_to_simplify;
std::set<int> nodes_to_delete;
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
if (IsNoOp(node) || IsIdentity(node) || IsIdentityN(node) ||
IsConstant(node) || SafeToConvertToNoOp(node)) {
nodes_to_simplify.PushBack(i);
}
}
while (!nodes_to_simplify.Empty()) {
int node_to_simplify = nodes_to_simplify.PopBack();
while (nodes_to_delete.find(node_to_simplify) != nodes_to_delete.end()) {
node_to_simplify = nodes_to_simplify.PopBack();
}
OptimizeNode(node_to_simplify, &nodes_to_simplify, &nodes_to_delete);
}
if (fetch_nodes_known_) {
VLOG(1) << "Deleted " << nodes_to_delete.size() << " out of "
<< optimized_graph_->node_size() << " nodes.";
EraseNodesFromGraph(nodes_to_delete, optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
}
return absl::OkStatus();
}
namespace {
enum DistanceFromSource : uint8 { ZERO = 0, ONE = 1, TWO_OR_GREATER = 2 };
void LongestPathsLowerBounds(
int source, const std::pair<int, int>& target_range,
const std::vector<std::vector<int>>& outputs,
std::vector<DistanceFromSource>* longest_distance) {
std::deque<int> queue;
queue.emplace_front(source);
while (!queue.empty()) {
int node = queue.front();
queue.pop_front();
for (int fanout : outputs[node]) {
if (fanout >= target_range.first && fanout <= target_range.second &&
(*longest_distance)[fanout] != TWO_OR_GREATER) {
(*longest_distance)[fanout] =
(*longest_distance)[fanout] == ZERO ? ONE : TWO_OR_GREATER;
queue.emplace_front(fanout);
}
}
}
}
}
Status DependencyOptimizer::TransitiveReduction() {
const int num_nodes = optimized_graph_->node_size();
int num_controls = 0;
std::vector<std::vector<int>> outputs(num_nodes);
std::vector<gtl::InlinedVector<std::pair<int, int>, 2>> control_outputs(
num_nodes);
std::vector<std::pair<int, int>> target_range(num_nodes, {num_nodes, -1});
for (int node_idx = 0; node_idx < num_nodes; ++node_idx) {
const NodeDef& node = optimized_graph_->node(node_idx);
if (ModifiesFrameInfo(node) || !HasOpDef(node)) {
continue;
}
for (int input_slot = 0; input_slot < node.input_size(); ++input_slot) {
const string& input = node.input(input_slot);
const NodeDef* input_node = node_map_->GetNode(input);
if (ModifiesFrameInfo(*input_node) || IsMerge(*input_node)) {
continue;
}
const int input_node_idx = node_to_idx_[input_node];
outputs[input_node_idx].push_back(node_idx);
target_range[input_node_idx].first =
std::min(target_range[input_node_idx].first, node_idx);
if (IsControlInput(input)) {
++num_controls;
control_outputs[input_node_idx].emplace_back(node_idx, input_slot);
target_range[input_node_idx].second =
std::max(target_range[input_node_idx].second, node_idx);
}
}
}
int num_controls_removed = 0;
std::vector<DistanceFromSource> longest_distance(num_nodes);
typedef std::pair<int, int> InputSlotAndSource;
absl::flat_hash_map<
int, std::set<InputSlotAndSource, std::greater<InputSlotAndSource>>>
control_edges_to_remove;
for (int source = 0; source < num_nodes; ++source) {
if (target_range[source].first >= target_range[source].second ||
target_range[source].second <= source) {
continue;
}
std::fill(longest_distance.begin() + target_range[source].first,
longest_distance.begin() + target_range[source].second + 1, ZERO);
LongestPathsLowerBounds(source, target_range[source], outputs,
&longest_distance);
for (const auto& control_output : control_outputs[source]) {
const int target = control_output.first;
if (longest_distance[target] == TWO_OR_GREATER) {
const int input_slot = control_output.second;
control_edges_to_remove[target].emplace(input_slot, source);
}
}
}
for (const auto& it : control_edges_to_remove) {
const int target = it.first;
NodeDef* target_node = optimized_graph_->mutable_node(target);
for (const InputSlotAndSource& slot_and_source : it.second) {
const int input_slot = slot_and_source.first;
const int source = slot_and_source.second;
const NodeDef& source_node = optimized_graph_->node(source);
CHECK_LT(input_slot, target_node->input_size());
target_node->mutable_input()->SwapElements(input_slot,
target_node->input_size() - 1);
node_map_->RemoveOutput(source_node.name(), target_node->name());
target_node->mutable_input()->RemoveLast();
++num_controls_removed;
}
}
VLOG(1) << "Removed " << num_controls_removed << " out of " << num_controls
<< " control dependencies";
return absl::OkStatus();
}
void DependencyOptimizer::BuildNodeToIdx() {
node_to_idx_.clear();
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
node_to_idx_[&node] = i;
}
}
void DependencyOptimizer::GroupCrossDeviceControlEdges(bool host_granularity) {
VLOG(1)
<< "DependencyOptimizer::GroupCrossDeviceControlEdges host_granularity="
<< host_granularity;
const int num_nodes = optimized_graph_->node_size();
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = optimized_graph_->mutable_node(i);
if (node->device().empty()) continue;
string rest, node_device = node->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(node->device(), &node_device, &rest);
}
std::map<string, NodeDef*> noops;
int num_noops = 0;
for (int j = 0; j < node->input_size(); ++j) {
if (IsControlInput(node->input(j))) {
const NodeDef* input = node_map_->GetNode(node->input(j));
if (input == nullptr || input->device().empty()) continue;
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
if (input_device != node_device) {
VLOG(2) << "Cross-device " << node->name() << " " << input->device()
<< " -> " << node->device();
auto emplace_result = noops.emplace(input_device, nullptr);
if (!emplace_result.second &&
emplace_result.first->second == nullptr) {
VLOG(2) << "Duplicate input device from " << node->name();
string group_name;
NodeDef* noop;
do {
group_name = AddPrefixToNodeName(
node->name(),
strings::StrCat("GroupCrossDeviceControlEdges_", num_noops));
noop = node_map_->GetNode(group_name);
++num_noops;
} while (noop != nullptr);
noop = optimized_graph_->add_node();
noop->set_name(group_name);
noop->set_device(input->device());
noop->set_op("NoOp");
node_map_->AddNode(noop->name(), noop);
emplace_result.first->second = noop;
VLOG(1) << "GroupCrossDeviceControlEdges: Added "
<< SummarizeNodeDef(*noop);
}
}
}
}
int pos = 0;
while (pos < node->input_size()) {
const string& input_name = node->input(pos);
if (IsControlInput(input_name)) {
NodeDef* input = node_map_->GetNode(input_name);
if (input == nullptr) {
++pos;
} else {
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
auto it = noops.find(input_device); | #include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DependencyOptimizerTest : public GrapplerTest {};
void VerifyGraphsEqual(const GraphDef& original_graph,
const GraphDef& optimized_graph, const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
TEST_F(DependencyOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, DependenciesDrivenByConstants) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output z = ops::Const(s.WithOpName("z"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(x), add);
Output id2 = ops::Identity(
s.WithOpName("id2").WithControlDependencies(y).WithControlDependencies(z),
add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(5, output.node_size());
for (const NodeDef& node : item.graph.node()) {
if (node.name() == "id1" || node.name() == "id2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("add", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
}
EXPECT_EQ(2, found);
}
TEST_F(DependencyOptimizerTest, FullTypeForKeptNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
Output id3 =
ops::Identity(s.WithOpName("id3").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
item.fetch.push_back("id3");
for (int i = 0; i < item.graph.node_size(); ++i) {
NodeDef* node = item.graph.mutable_node(i);
if (node->name() == "add") {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args()->set_type_id(TFT_FLOAT);
*node->mutable_experimental_type() = t;
break;
}
}
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id3") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "add") {
EXPECT_EQ(node.op(), "NoOp");
FullTypeDef t = node.experimental_type();
EXPECT_TRUE((t.type_id() == TFT_UNSET) ||
((t.type_id() == TFT_PRODUCT) && (t.args_size() == 0)));
++found;
}
}
EXPECT_EQ(4, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_RepeatedInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id1"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
}
EXPECT_EQ(1, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_SwitchIdentity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(scope.WithOpName("switch"), v_in, v_ctrl);
Output neg = ops::Neg(scope.WithOpName("neg"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1").WithControlDependencies(neg),
{1.0f, 2.0f}, {1, 2});
Output ctrl_dep_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c2 =
ops::Const(scope.WithOpName("c2").WithControlDependencies(ctrl_dep_id),
{1.0f, 2.0f}, {1, 2});
Output neg1 = ops::Neg(scope.WithOpName("neg1"), s.output_false);
Output neg2 = ops::Neg(scope.WithOpName("neg2"), ctrl_dep_id);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("c2");
item.fetch.push_back("neg1");
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("neg", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_NoFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_EmptyInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s, {1, 2}, DT_FLOAT);
auto noop1 = ops::NoOp(s);
auto noop2 = ops::NoOp(s.WithControlDependencies(x));
Output id = ops::Identity(s.WithControlDependencies({noop1.operation}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("RandomUniform", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x).WithDevice("/CPU:1"));
auto noop_1 = ops::NoOp(
s.WithControlDependencies(x).WithControlDependencies(y).WithDevice(
"/CPU:0"));
Output id = ops::Identity(
s.WithControlDependencies({noop.operation}).WithDevice("/CPU:1"), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation})
.WithDevice("/CPU:1"),
y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithDevice("/CPU:0"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:1"), id_b);
Output id_1 = ops::Identity(s.WithDevice("/CPU:1"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_IdenticalDevices) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:0"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "id_a");
if (node.name() == "Identity") {
EXPECT_EQ(node.input(0), "x");
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_SingleInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x));
auto noop_1 =
ops::NoOp(s.WithControlDependencies(x).WithControlDependencies(y));
Output id = ops::Identity(s.WithControlDependencies({noop.operation}), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation}), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Identity_1") {
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveIdentity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output z = ops::RandomUniform(s.WithOpName("z"), {1, 2}, DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithControlDependencies(
z),
x);
auto id_c = ops::Identity(s.WithOpName("id_c").WithControlDependencies(y), x);
Output a_a = ops::Identity(s.WithOpName("a_a"), id_a);
Output a_b = ops::Identity(s.WithOpName("a_b"), id_a);
Output a_c =
ops::Identity(s.WithOpName("a_c").WithControlDependencies(id_a), z);
Output a_d =
ops::Identity(s.WithOpName("a_d").WithControlDependencies(id_a), z);
Output b_a = ops::Identity(s.WithOpName("b_a"), id_b);
Output c_a = ops::Identity(s.WithOpName("c_a"), id_c);
Output c_b =
ops::Identity(s.WithOpName("c_b").WithControlDependencies(id_c), z);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"a_a", "a_b", "a_c", "a_d", "b_a", "c_a", "c_b"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 3, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id_a", node.name());
EXPECT_NE("id_b", node.name());
EXPECT_NE("id_c", node.name());
if (node.name() == "a_a" || node.name() == "a_b") {
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
if (node.name() == "a_c" || node.name() == "a_d") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
if (node.name() == "b_a") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
EXPECT_EQ("^z", node.input(2));
++found;
}
if (node.name() == "c_a") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
}
if (node.name() == "c_b") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
++found;
}
}
EXPECT_EQ(found, 7);
}
TEST_F(DependencyOptimizerTest, RemoveIdentity_RepeatedInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable x(scope.WithOpName("x"), {}, DT_BOOL);
ops::Variable y(scope.WithOpName("y"), {}, DT_BOOL);
ops::Switch sw(scope.WithOpName("switch"), x, x);
Output id0 = ops::Identity(scope.WithOpName("id0"), sw.output_true);
Output id1 = ops::Identity(scope.WithOpName("id1"), sw.output_false);
Output or0 = ops::LogicalOr(scope.WithOpName("or0"), id0, id0);
Output or1 = ops::LogicalOr(scope.WithOpName("or1"), id0, y);
Output or2 = ops::LogicalOr(
scope.WithOpName("or2").WithControlDependencies(id1), y, y);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("or0");
item.fetch.push_back("or1");
item.fetch.push_back("or2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id0", node.name());
if (node.name() == "or0") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("switch:1", node.input(1));
++found;
}
if (node.name() == "or1") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("y", node.input(1));
++found;
}
if (node.name() == "or2") {
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^id1", node.input(2));
++found;
}
}
EXPECT_EQ(found, 3);
}
TEST_F(DependencyOptimizerTest, Transitive_Reduction_Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Square(s.WithOpName("x"), c);
Output neg1 = ops::Neg(s.WithOpName("neg1"), x);
Output neg2 =
ops::Neg(s.WithOpName("neg2").WithControlDependencies({x}), neg1);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(4, output.node_size());
EXPECT_EQ("neg2", output.node(3).name());
EXPECT_EQ(1, output.node(3).input_size());
EXPECT_EQ("neg1", output.node(3).input(0));
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_Identity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
Output id_after_var = ops::Identity(scope.WithOpName("id_after_var"), v_in);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(
scope.WithOpName("switch").WithControlDependencies(id_after_var), v_in,
v_ctrl);
Output id0 = ops::Identity(scope.WithOpName("id0"), s.output_true);
Output grappler_added_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1")
.WithControlDependencies(id_after_var)
.WithControlDependencies(grappler_added_id),
{1.0f, 2.0f}, {1, 2});
Output id1 = ops::Identity(scope.WithOpName("id1"), c1);
Output id2 = ops::Identity(scope.WithOpName("id2"), id0);
Output fetch =
ops::Identity(scope.WithOpName("fetch").WithControlDependencies(id1), c1);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("id2");
item.fetch.push_back("fetch");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 2, output.node_size());
bool found = false;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("id0", node.name());
EXPECT_NE("id1", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
found = true;
}
}
EXPECT_TRUE(found);
}
TEST_F(DependencyOptimizerTest, IdentityInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::Identity(scope.WithOpName("id_f"), s.output_false);
auto id_t = ops::Identity(scope.WithOpName("id_t"), s.output_true);
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(6, output.node_size());
EXPECT_EQ("out1", output.node(4).name());
EXPECT_EQ(1, output.node(4).input_size());
EXPECT_EQ("s", output.node(4).input(0));
EXPECT_EQ("out2", output.node(5).name());
EXPECT_EQ(1, output.node(5).input_size());
EXPECT_EQ("s:1", output.node(5).input(0));
}
TEST_F(DependencyOptimizerTest, RemoveIdentityN_SwitchInput) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::IdentityN(scope.WithOpName("id_f"), {s.output_false});
auto id_t = ops::IdentityN(scope.WithOpName("id_t"), {s.output_true});
auto id_b =
ops::IdentityN(scope.WithOpName("id_b"), {s.output_false, s.output_true});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t[0]);
Output out3 = ops::Identity(scope.WithOpName("out3"), id_b[0]);
Output out4 = ops::Identity(scope.WithOpName("out4"), id_b[1]);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3", "out4"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(8, output.node_size());
auto out1_node = output.node(7);
EXPECT_EQ("out1", out1_node.name());
EXPECT_EQ(1, out1_node.input_size());
EXPECT_EQ("s", out1_node.input(0));
auto out2_node = output.node(4);
EXPECT_EQ("out2", out2_node.name());
EXPECT_EQ(1, out2_node.input_size());
EXPECT_EQ("s:1", out2_node.input(0));
auto out3_node = output.node(5);
EXPECT_EQ("out3", out3_node.name());
EXPECT_EQ(1, out3_node.input_size());
EXPECT_EQ("s", out3_node.input(0));
auto out4_node = output.node(6);
EXPECT_EQ("out4", out4_node.name());
EXPECT_EQ(1, out4_node.input_size());
EXPECT_EQ("s:1", out4_node.input(0));
}
TEST_F(DependencyOptimizerTest, DoNotRemoveIdentityNWithControlDependency) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output input1 = ops::Placeholder(scope.WithOpName("input1"), DT_BOOL);
Output input2 = ops::Const(scope.WithOpName("input2"), {1, 2});
auto id_n = ops::IdentityN(scope.WithOpName("id_n"), {input1, input2});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_n[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_n[1]);
auto out3 =
ops::NoOp(scope.WithOpName("out3").WithControlDependencies(id_n[1]));
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3"};
DependencyOptimizer optimizer;
GraphDef optimized_graph_def;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph_def);
TF_EXPECT_OK(status);
EXPECT_EQ(6, optimized_graph_def.node_size());
}
TEST_F(DependencyOptimizerTest,
Identity_DeviceCrossing_ConsumerOnDifferentDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_3 =
ops::Const(s.WithOpName("one_on_3").WithDevice("/gpu:3"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:3"), x_on_2, one_on_3);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, Identity_DeviceCrossing_ConsumerOnSameDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_2 =
ops::Const(s.WithOpName("one_on_2").WithDevice("/gpu:2"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:2"), x_on_2, one_on_2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(3, output.node_size());
for (const auto& node : output.node()) {
EXPECT_NE("x_on_2", node.name());
if (node.name() == "result") {
EXPECT_EQ("x_on_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveGreaterEqualWithNoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::P |
1,379 | cpp | tensorflow/tensorflow | model_pruner | tensorflow/core/grappler/optimizers/model_pruner.cc | tensorflow/core/grappler/optimizers/model_pruner_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_MODEL_PRUNER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_MODEL_PRUNER_H_
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
namespace tensorflow {
namespace grappler {
class ModelPruner : public GraphOptimizer {
public:
ModelPruner() {}
~ModelPruner() override {}
string name() const override { return "model_pruner"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include <unordered_set>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
namespace tensorflow {
namespace grappler {
namespace {
bool IsTrivialIdentity(const NodeDef& node, const GraphView& graph_view) {
for (const auto input :
graph_view.GetFanins(node, true)) {
if (input.port_id == Graph::kControlSlot) {
return false;
} else if (IsSwitch(*input.node)) {
return false;
}
}
for (const auto output :
graph_view.GetFanouts(node, true)) {
if (output.port_id == Graph::kControlSlot) {
return false;
} else if (IsMerge(*output.node)) {
return false;
}
}
return true;
}
bool IsTrivialOp(const NodeDef& node, const GraphView& graph_view) {
if (IsStopGradient(node)) {
return true;
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
return IsTrivialIdentity(node, graph_view);
}
if (IsNoOp(node) && node.input().empty()) {
return true;
}
if (IsConstant(node) && node.input().empty() &&
graph_view.NumFanouts(node, false) == 0) {
return true;
}
return IsAddN(node) && NumNonControlInputs(node) <= 1;
}
bool RemovalIncreasesEdgeCount(const NodeDef& node,
const GraphView& graph_view) {
int in_degree =
graph_view.NumFanins(node, true);
int out_degree =
graph_view.NumFanouts(node, true);
return in_degree * out_degree > in_degree + out_degree;
}
bool IsOutputPortRefValue(const NodeDef& node, int port_id,
const OpRegistryInterface& op_registry) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = op_registry.LookUp(node.op(), &op_reg_data);
if (s.ok()) {
DataType output_type;
s = OutputTypeForNode(node, op_reg_data->op_def, port_id, &output_type);
if (s.ok() && IsRefType(output_type)) {
return true;
}
}
return false;
}
bool CanRemoveNode(const NodeDef& node, const GraphView& graph_view,
const absl::flat_hash_set<string>& function_names,
const OpRegistryInterface& op_registry) {
if (IsNoOp(node) &&
(node.input().empty() ||
graph_view.NumFanouts(node, true) == 0)) {
return true;
}
if (IsConstant(node) && node.input().empty() &&
graph_view.NumFanouts(node, false) == 0) {
return true;
}
if (RemovalIncreasesEdgeCount(node, graph_view)) {
return false;
}
for (const auto input :
graph_view.GetFanins(node, true)) {
if (node.device() != input.node->device()) {
return false;
} else if (input.port_id == Graph::kControlSlot) {
continue;
} else if (function_names.find(input.node->op()) != function_names.end()) {
return false;
} else if (IsOutputPortRefValue(*input.node, input.port_id, op_registry)) {
return false;
}
}
for (const auto output :
graph_view.GetFanouts(node, false)) {
if (function_names.find(output.node->op()) != function_names.end()) {
return false;
}
}
return true;
}
void ForwardInputsInternal(
const NodeDef& node,
const absl::flat_hash_set<const NodeDef*>& nodes_to_delete,
bool add_as_control, NodeDef* new_node,
const absl::flat_hash_map<string, const NodeDef*>& optimized_nodes,
const GraphView& graph_view) {
auto itr = optimized_nodes.find(node.name());
if (itr != optimized_nodes.end()) {
for (const string& input : itr->second->input()) {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
}
return;
}
for (const auto& input : node.input()) {
const NodeDef* input_node = graph_view.GetNode(NodeName(input));
if (input_node == nullptr) {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
continue;
}
if (nodes_to_delete.find(input_node) != nodes_to_delete.end()) {
ForwardInputsInternal(*input_node, nodes_to_delete,
add_as_control || IsControlInput(input), new_node,
optimized_nodes, graph_view);
} else {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
}
}
}
void ForwardInputs(const NodeDef& original_node,
const absl::flat_hash_set<const NodeDef*>& nodes_to_delete,
NodeDef* new_node,
absl::flat_hash_map<string, const NodeDef*>* optimized_nodes,
const GraphView& graph_view) {
ForwardInputsInternal(original_node, nodes_to_delete,
false, new_node, *optimized_nodes,
graph_view);
if (!new_node->name().empty()) {
(*optimized_nodes)[new_node->name()] = new_node;
}
int pos = 0;
for (int i = 0; i < new_node->input_size(); ++i) {
if (!IsControlInput(new_node->input(i))) {
new_node->mutable_input()->SwapElements(pos, i);
++pos;
}
}
DedupControlInputs(new_node);
}
absl::flat_hash_map<string, absl::flat_hash_set<int>> IdentityNTerminalPorts(
const NodeMap& node_map, const std::vector<string>& terminal_nodes,
int graph_size) {
std::vector<string> to_visit;
to_visit.reserve(graph_size);
absl::flat_hash_set<string> visited(terminal_nodes.begin(),
terminal_nodes.end());
for (const string& terminal_node : terminal_nodes) {
NodeDef* node = node_map.GetNode(terminal_node);
if (node == nullptr) {
continue;
}
for (const string& input : node->input()) {
to_visit.push_back(input);
}
}
absl::flat_hash_set<string> identity_n_fanouts;
while (!to_visit.empty()) {
string curr = to_visit.back();
to_visit.pop_back();
NodeDef* curr_node = node_map.GetNode(curr);
if (curr_node == nullptr ||
visited.find(curr_node->name()) != visited.end()) {
continue;
}
if (IsIdentityN(*curr_node)) {
if (identity_n_fanouts.find(curr) == identity_n_fanouts.end()) {
identity_n_fanouts.emplace(curr);
int pos = NodePositionIfSameNode(curr, curr_node->name());
if (pos >= 0) {
to_visit.push_back(curr_node->input(pos));
}
for (const string& input : curr_node->input()) {
if (IsControlInput(input) &&
identity_n_fanouts.find(input) == identity_n_fanouts.end()) {
to_visit.push_back(input);
}
}
}
} else {
for (const string& input : curr_node->input()) {
to_visit.push_back(input);
}
visited.emplace(curr_node->name());
}
}
absl::flat_hash_map<string, absl::flat_hash_set<int>> identity_n_ports;
for (const auto& fanout : identity_n_fanouts) {
int pos;
string node_name = ParseNodeName(fanout, &pos);
if (node_name.empty() || pos < 0) {
continue;
}
if (identity_n_ports.find(node_name) == identity_n_ports.end()) {
identity_n_ports[node_name] = {pos};
} else {
identity_n_ports[node_name].emplace(pos);
}
}
return identity_n_ports;
}
string NewIdentityFromIdentityN(int pos, const NodeDef& identity_n,
GraphDef* graph, NodeMap* node_map) {
string new_node_name =
strings::StrCat(identity_n.name(), "-", pos, "-grappler-ModelPruner");
if (node_map->NodeExists(new_node_name)) {
return "";
}
NodeDef* new_node = graph->add_node();
Status status = NodeDefBuilder(new_node_name, "Identity")
.Input(identity_n.input(pos), 0,
identity_n.attr().at("T").list().type(pos))
.Device(identity_n.device())
.Finalize(new_node);
if (!status.ok()) {
return "";
}
node_map->AddNode(new_node->name(), new_node);
node_map->AddOutput(NodeName(new_node->input(0)), new_node->name());
return new_node->name();
}
Status RewriteIdentityNAndInputsOutputs(
NodeDef* node, int num_non_control_inputs,
const absl::flat_hash_set<int>& terminal_ports, GraphDef* graph,
NodeMap* node_map) {
struct NodeOutputUpdate {
string input;
string output;
};
absl::flat_hash_map<int, int> terminal_input_pos;
absl::flat_hash_map<int, string> new_identities;
int new_idx = 0;
for (int i = 0; i < num_non_control_inputs; i++) {
if (terminal_ports.find(i) != terminal_ports.end()) {
terminal_input_pos[i] = new_idx++;
} else {
string identity = NewIdentityFromIdentityN(i, *node, graph, node_map);
if (identity.empty()) {
return errors::Internal(
"Could not create Identity node from IdentityN node ", node->name(),
" at port ", i);
}
new_identities[i] = identity;
}
}
std::vector<NodeOutputUpdate> updates;
for (NodeDef* output : node_map->GetOutputs(node->name())) {
for (int i = 0; i < output->input_size(); i++) {
string input = output->input(i);
if (IsControlInput(input)) {
continue;
}
TensorId input_tensor = ParseTensorName(input);
if (input_tensor.node() == node->name()) {
if (terminal_ports.find(input_tensor.index()) == terminal_ports.end()) {
string new_identity = new_identities[input_tensor.index()];
output->set_input(i, new_identity);
updates.push_back({new_identity, output->name()});
} else {
int new_pos = terminal_input_pos[input_tensor.index()];
string updated_input_name =
new_pos > 0 ? strings::StrCat(node->name(), ":", new_pos)
: node->name();
output->set_input(i, updated_input_name);
}
}
}
}
for (const NodeOutputUpdate& update : updates) {
node_map->AddOutput(update.input, update.output);
}
const int num_inputs = node->input_size();
int curr_pos = 0;
auto mutable_inputs = node->mutable_input();
auto mutable_types =
node->mutable_attr()->at("T").mutable_list()->mutable_type();
for (int i = 0; i < num_non_control_inputs; i++) {
if (terminal_input_pos.find(i) != terminal_input_pos.end()) {
mutable_inputs->SwapElements(i, curr_pos);
mutable_types->SwapElements(i, curr_pos);
curr_pos++;
}
}
mutable_types->Truncate(curr_pos);
for (int i = num_non_control_inputs; i < num_inputs; i++) {
mutable_inputs->SwapElements(i, curr_pos++);
}
mutable_inputs->DeleteSubrange(curr_pos, num_inputs - curr_pos);
return absl::OkStatus();
}
Status SplitIdentityNInputs(GraphDef* graph,
const std::vector<string>& terminal_nodes,
bool* updated_graph) {
NodeMap node_map(graph);
for (auto const& terminal :
IdentityNTerminalPorts(node_map, terminal_nodes, graph->node_size())) {
NodeDef* node = node_map.GetNode(terminal.first);
if (node == nullptr) {
continue;
}
const int num_non_control_inputs = NumNonControlInputs(*node);
const int terminal_second_size = terminal.second.size();
if (node->attr().count("T") == 0 ||
node->attr().at("T").list().type_size() != num_non_control_inputs ||
terminal_second_size >= num_non_control_inputs) {
continue;
}
TF_RETURN_IF_ERROR(RewriteIdentityNAndInputsOutputs(
node, num_non_control_inputs, terminal.second, graph, &node_map));
*updated_graph = true;
}
return absl::OkStatus();
}
}
Status ModelPruner::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
const std::unordered_set<string> nodes_to_preserve = item.NodesToPreserve();
std::unique_ptr<GraphDef> pruned_graph_release;
GraphDef* pruned_graph;
if (!nodes_to_preserve.empty()) {
pruned_graph_release.reset(new GraphDef());
pruned_graph = pruned_graph_release.get();
pruned_graph->mutable_node()->Reserve(item.graph.node_size());
std::vector<string> terminal_nodes(nodes_to_preserve.begin(),
nodes_to_preserve.end());
std::sort(terminal_nodes.begin(), terminal_nodes.end());
TF_RETURN_IF_ERROR(
SetTransitiveFaninGraph(item.graph, pruned_graph, terminal_nodes));
bool did_split_identity_n = false;
TF_RETURN_IF_ERROR(SplitIdentityNInputs(pruned_graph, terminal_nodes,
&did_split_identity_n));
if (did_split_identity_n) {
GraphDef fanin_split_identity_n_graph;
TF_RETURN_IF_ERROR(SetTransitiveFaninGraph(
*pruned_graph, &fanin_split_identity_n_graph, terminal_nodes));
pruned_graph->Swap(&fanin_split_identity_n_graph);
}
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
} else {
pruned_graph = const_cast<GraphDef*>(&item.graph);
}
GraphView graph_view(pruned_graph);
absl::flat_hash_set<string> function_names;
for (const auto& function : item.graph.library().function()) {
function_names.insert(function.signature().name());
}
OpRegistryInterface* op_registry = OpRegistry::Global();
absl::flat_hash_set<const NodeDef*> nodes_to_delete;
for (int i = 0; i < pruned_graph->node_size(); ++i) {
NodeDef* node = pruned_graph->mutable_node(i);
DedupControlInputs(node);
if (!IsTrivialOp(*node, graph_view)) {
VLOG(3) << node->name() << " is not trivial.";
continue;
}
if (nodes_to_preserve.find(node->name()) != nodes_to_preserve.end()) {
continue;
}
if (CanRemoveNode(*node, graph_view, function_names, *op_registry)) {
nodes_to_delete.insert(node);
} else {
VLOG(3) << node->name() << " cannot be removed";
}
}
if (nodes_to_delete.empty() && nodes_to_preserve.empty()) {
return errors::Aborted("Nothing to do.");
}
optimized_graph->Clear();
*optimized_graph->mutable_library() = item.graph.library();
*optimized_graph->mutable_versions() = item.graph.versions();
if (nodes_to_delete.empty()) {
optimized_graph->mutable_node()->Swap(pruned_graph->mutable_node());
return absl::OkStatus();
}
const bool fetches_are_known = !item.fetch.empty();
absl::flat_hash_map<string, const NodeDef*> optimized_nodes;
optimized_graph->mutable_node()->Reserve(pruned_graph->node_size());
for (const auto& node : pruned_graph->node()) {
if (!fetches_are_known ||
nodes_to_delete.find(&node) == nodes_to_delete.end()) {
NodeDef* new_node = optimized_graph->add_node();
*new_node = node;
new_node->clear_input();
ForwardInputs(node, nodes_to_delete, new_node, &optimized_nodes,
graph_view);
}
}
VLOG(1) << "Pruned " << nodes_to_delete.size()
<< " nodes from the graph. The graph now contains "
<< optimized_graph->node_size() << " nodes.";
if (optimized_graph->node_size() > item.graph.node_size()) {
return errors::Internal("Pruning increased graph size.");
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/no_op.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDeviceCPU0[] = "/device:CPU:0";
constexpr char kDeviceGPU0[] = "/device:GPU:0";
class ModelPrunerTest : public GrapplerTest {};
TEST_F(ModelPrunerTest, NoPruning) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
}
TEST_F(ModelPrunerTest, StopGradientPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::StopGradient(s.WithOpName("c"), b);
Output d = ops::StopGradient(s.WithOpName("d"), c);
Output e = ops::Sqrt(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::StopGradient(s.WithOpName("c"), b);
Output d = ops::StopGradient(s.WithOpName("d"), b);
Output e = ops::Sqrt(s.WithOpName("e"), {b});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto expected_tensors = EvaluateNodes(item.graph, fetch);
auto actual_tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(expected_tensors.size(), 1);
ASSERT_EQ(actual_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, IdentityPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c").WithControlDependencies(b), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::Sqrt(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch.push_back("e");
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output e = ops::Sqrt(s.WithOpName("e"), {b});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, IdentityNInputPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Const(s.WithOpName("c"), 3.0f, {10, 10});
Output d = ops::Const(s.WithOpName("d"), 4.0f, {10, 10});
auto e =
ops::IdentityN(s.WithOpName("e").WithControlDependencies(d), {a, b, c});
auto f = ops::IdentityN(s.WithOpName("f"), {e[2], e[1], e[0]});
Output g = ops::Sqrt(s.WithOpName("g"), {f[1]});
Output h = ops::Sqrt(s.WithOpName("h"), {f[2]});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"g", "h"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
auto e = ops::IdentityN(s.WithOpName("e"), {a, b});
auto f = ops::IdentityN(s.WithOpName("f"), {e[1], e[0]});
Output g = ops::Sqrt(s.WithOpName("g"), {f[0]});
Output h = ops::Sqrt(s.WithOpName("h"), {f[1]});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 2);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 2);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorEqual<float>(actual_tensors[i], expected_tensors[i]);
}
}
TEST_F(ModelPrunerTest, IdentityNInputPruningWithIdentityNInFetch) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Const(s.WithOpName("c"), 3.0f, {10, 10});
Output d = ops::Const(s.WithOpName("d"), 4.0f, {10, 10});
auto e =
ops::IdentityN(s.WithOpName("e").WithControlDependencies(d), {a, b, c});
auto f = ops::IdentityN(s.WithOpName("f"), {e[0], e[1], e[2]});
auto g = ops::IdentityN(s.WithOpName("g"), {f[1]});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"g"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
auto e = ops::IdentityN(s.WithOpName("e"), {b});
auto g = ops::IdentityN(s.WithOpName("g"), {e[0]});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, NoOpPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::AddN(s.WithOpName("c"), {b});
Output d = ops::AddN(s.WithOpName("d").WithControlDependencies(b), {c});
Output e = ops::AddN(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::AddN(s.WithOpName("c"), {a});
Output d = ops::AddN(s.WithOpName("d"), {a});
Output e = ops::AddN(s.WithOpName("e"), {a});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto actual_tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PreserveIdentities) {
GrapplerItem item;
{
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(scope.WithOpName("switch"), v_in, v_ctrl);
Output id0 = ops::Identity(scope.WithOpName("id0"), s.output_true);
Output id1 =
ops::Identity(scope.WithOpName("id1").WithControlDependencies(v_ctrl),
s.output_false);
Output id2 = ops::Identity(scope.WithOpName("id2"), id0);
Output id3 = ops::Identity(
scope.WithOpName("id3").WithControlDependencies(id0), id1);
auto merge = ops::Merge(scope.WithOpName("merge"), {id0, id1});
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
}
item.fetch = {"id2", "id3", "merge"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
auto v_in_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3}));
Tensor v_ctrl_t(DT_BOOL, TensorShape({}));
v_ctrl_t.flat<bool>()(0) = true;
auto actual_tensors = EvaluateNodes(output, {"merge", "id2"},
{{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}});
ASSERT_EQ(actual_tensors.size(), 2);
auto expected_tensors = EvaluateNodes(
item.graph, {"merge", "id2"}, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}});
ASSERT_EQ(expected_tensors.size(), 2);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorEqual<float>(actual_tensors[i], expected_tensors[i]);
}
}
TEST_F(ModelPrunerTest, PruningSkipsRefOutputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Variable(s.WithOpName("a"), {}, DT_INT64);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::Identity(s.WithOpName("e"), d);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Variable(s.WithOpName("a"), {}, DT_INT64);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), b);
Output e = ops::Identity(s.WithOpName("e"), b);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto a_t = GenerateRandomTensor<DT_INT64>(TensorShape({}));
auto actual_tensors = EvaluateNodes(output, fetch, {{"a", a_t}});
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, fetch, {{"a", a_t}});
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<int64_t>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PruningPreservesFetch) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"c"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PruningPreservesCrossDeviceIdentity) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c =
ops::Const(s.WithOpName("c").WithDevice(kDeviceCPU0), 0.0f, {10, 10});
Output i1 = ops::Identity(s.WithOpName("i1").WithDevice(kDeviceGPU0), c);
Output a1 = ops::Identity(s.WithOpName("a1").WithDevice(kDeviceGPU0), i1);
Output a2 = ops::Identity(s.WithOpName("a2").WithDevice(kDeviceGPU0), i1);
Output i2 = ops::Identity(s.WithOpName("i2").WithDevice(kDeviceCPU0), c);
Output a3 = ops::Identity(s.WithOpName("a3").WithDevice(kDeviceGPU0), i2);
Output a4 = ops::Identity(s.WithOpName("a4").WithDevice(kDeviceGPU0), i2);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"a1", "a2", "a3", "a4"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c =
ops::Const(s.WithOpName("c").WithDevice(kDeviceCPU0), 0.0f, {10, 10});
Output i1 = ops::Identity(s.WithOpName("i1").WithDevice(kDeviceGPU0), c);
Output a1 = ops::Identity(s.WithOpName("a1").WithDevice(kDeviceGPU0), i1);
Output a2 = ops::Identity(s.WithOpName("a2").WithDevice(kDeviceGPU0), i1);
Output a3 = ops::Identity(s.WithOpName("a3").WithDevice(kDeviceGPU0), c);
Output a4 = ops::Identity(s.WithOpName("a4").WithDevice(kDeviceGPU0), c);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
if (GetNumAvailableGPUs() > 0) {
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 4);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 4);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorNear<float>(actual_tensors[i], expected_tensors[i],
1e-6);
}
}
}
TEST_F(ModelPrunerTest, PruneNoOpsWithoutInputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
auto n1 = ops::NoOp(s.WithOpName("no_op1"));
Output c1 = ops::Const(s.WithOpName("c1"), 0.0f, {1, 1});
auto n2 = ops::NoOp(s.WithOpName("no_op2").WithControlDependencies(c1));
Output id1 = ops::Identity(
s.WithOpName("id1").WithControlDependencies({n1, n2}), c1);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"id1"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c1 = ops::Const(s.WithOpName("c1"), 0.0f, {1, 1});
auto n2 = ops::NoOp(s.WithOpName("no_op2").WithControlDependencies(c1));
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies({n2}), c1);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
}
TEST_F(ModelPrunerTest, PruneConstantsWithoutInputsAndOutputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c0 = ops::Const(s.WithOpName("c0"), 0.0f, {1, 1});
Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {1, 1});
Output c2 = ops::Const(s.WithOpName("c2").WithControlDependencies({c0}),
2.0f, {1, 1});
Output c3 = ops::Const(s.WithOpName("c3"), 3.0f, {1, 1});
Output id1 = ops::Identity(s.WithOpName("id1")
.WithControlDependencies({c2})
.WithControlDependencies({c3}),
c0);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"id1"};
ModelPruner pruner;
GraphDef output;
Status status = pruner.Optimize(nullptr, item, &output);
TF_ASSERT_OK(status);
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c0 = ops::Const(s.WithOpName("c0"), 0.0f, {1, 1});
Output c2 = ops::Const(s.WithOpName("c2").WithControlDependencies({c0}),
2.0f, {1, 1});
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies({c2}), c0);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
}
}
}
} |
1,380 | cpp | tensorflow/tensorflow | evaluation_utils | tensorflow/core/grappler/optimizers/evaluation_utils.cc | tensorflow/core/grappler/optimizers/evaluation_utils_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_EVALUATION_UTILS_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_EVALUATION_UTILS_H_
#define EIGEN_USE_THREADS
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace Eigen {
class ThreadPoolInterface;
class ThreadPoolWrapper;
}
namespace tensorflow {
namespace grappler {
class DeviceSimple : public DeviceBase {
public:
DeviceSimple();
~DeviceSimple();
Status MakeTensorFromProto(const TensorProto& tensor_proto,
const AllocatorAttributes alloc_attrs,
Tensor* tensor) override;
Allocator* GetAllocator(AllocatorAttributes attr) override {
return cpu_allocator();
}
const std::string& device_type() const override { return device_type_; }
private:
DeviceBase::CpuWorkerThreads eigen_worker_threads_;
std::unique_ptr<Eigen::ThreadPoolDevice> eigen_device_;
const std::string device_type_ = DEVICE_CPU;
};
Status EvaluateNode(const NodeDef& node,
const gtl::InlinedVector<TensorValue, 4>& inputs,
DeviceBase* cpu_device, ResourceMgr* resource_mgr,
gtl::InlinedVector<TensorValue, 4>* output);
}
}
#endif
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/denormal.h"
#include "tensorflow/core/platform/setround.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
using TensorVector = gtl::InlinedVector<TensorValue, 4>;
const int kDeviceSimpleThreads = 2;
DeviceSimple::DeviceSimple() : DeviceBase(Env::Default()) {
eigen_worker_threads_.num_threads = kDeviceSimpleThreads;
eigen_worker_threads_.workers = new thread::ThreadPool(
Env::Default(), "evaluation_utils", eigen_worker_threads_.num_threads);
eigen_device_.reset(new Eigen::ThreadPoolDevice(
eigen_worker_threads_.workers->AsEigenThreadPool(),
eigen_worker_threads_.num_threads));
set_tensorflow_cpu_worker_threads(&eigen_worker_threads_);
set_eigen_cpu_device(eigen_device_.get());
}
DeviceSimple::~DeviceSimple() {
eigen_device_.reset();
delete eigen_worker_threads_.workers;
}
Status DeviceSimple::MakeTensorFromProto(const TensorProto& tensor_proto,
const AllocatorAttributes alloc_attrs,
Tensor* tensor) {
Tensor parsed(tensor_proto.dtype());
if (!parsed.FromProto(cpu_allocator(), tensor_proto)) {
return errors::InvalidArgument("Cannot parse tensor from tensor_proto.");
}
*tensor = parsed;
return absl::OkStatus();
}
Status EvaluateNode(const NodeDef& node, const TensorVector& inputs,
DeviceBase* cpu_device, ResourceMgr* resource_mgr,
TensorVector* output) {
Status status;
std::unique_ptr<DeviceBase> device;
if (cpu_device == nullptr) {
device.reset(new DeviceSimple());
cpu_device = device.get();
}
std::unique_ptr<OpKernel> op_kernel(
CreateOpKernel(DEVICE_CPU, cpu_device, cpu_device->GetAllocator({}), node,
TF_GRAPH_DEF_VERSION, &status));
TF_RETURN_IF_ERROR(status);
OpKernelContext::Params params;
params.device = cpu_device;
params.frame_iter = FrameAndIter(0, 0);
params.inputs = inputs;
params.op_kernel = op_kernel.get();
params.resource_manager = resource_mgr;
gtl::InlinedVector<AllocatorAttributes, 4> output_attrs;
const int num_outputs = op_kernel->num_outputs();
for (int i = 0; i < num_outputs; i++) {
AllocatorAttributes attr;
attr.set_on_host(true);
output_attrs.push_back(attr);
}
params.output_attr_array = output_attrs.data();
OpKernelContext op_context(¶ms);
op_kernel->Compute(&op_context);
for (int i = 0; i < num_outputs; i++) {
output->push_back(op_context.release_output(i));
}
return op_context.status();
}
}
} | #include "tensorflow/core/platform/cpu_info.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/ThreadPool"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
TEST(EvaluationUtilsTest, DeviceSimple_BasicProperties) {
DeviceSimple dsimple;
ASSERT_TRUE(dsimple.has_eigen_cpu_device());
const Eigen::ThreadPoolInterface* pool =
dsimple.eigen_cpu_device()->getPool();
ASSERT_NE(pool, nullptr);
}
TEST(EvaluationUtilsTest, DeviceSimple_MakeTensorFromProto) {
DeviceSimple dsimple;
TensorProto proto;
Tensor tensor;
EXPECT_FALSE(dsimple.MakeTensorFromProto(proto, {}, &tensor).ok());
Tensor original(tensorflow::DT_INT16, TensorShape{4, 2});
original.flat<int16>().setRandom();
original.AsProtoTensorContent(&proto);
TF_ASSERT_OK(dsimple.MakeTensorFromProto(proto, {}, &tensor));
ASSERT_EQ(tensor.dtype(), original.dtype());
ASSERT_EQ(tensor.shape(), original.shape());
auto buf0 = original.flat<int16>();
auto buf1 = tensor.flat<int16>();
ASSERT_EQ(buf0.size(), buf1.size());
for (int i = 0; i < buf0.size(); ++i) {
EXPECT_EQ(buf0(i), buf1(i));
}
}
}
} |
1,381 | cpp | tensorflow/tensorflow | custom_graph_optimizer_registry | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_CUSTOM_GRAPH_OPTIMIZER_REGISTRY_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_CUSTOM_GRAPH_OPTIMIZER_REGISTRY_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
namespace tensorflow {
namespace grappler {
struct ConfigList {
ConfigList() {}
ConfigList(bool disable_model_pruning,
std::unordered_map<string, RewriterConfig_Toggle> config)
: disable_model_pruning(disable_model_pruning),
toggle_config(std::move(config)) {}
bool operator==(const ConfigList& other) const {
return (disable_model_pruning == other.disable_model_pruning) &&
(toggle_config == other.toggle_config);
}
bool disable_model_pruning;
std::unordered_map<string, RewriterConfig_Toggle> toggle_config;
};
class CustomGraphOptimizerRegistry {
public:
static std::unique_ptr<CustomGraphOptimizer> CreateByNameOrNull(
const string& name);
static std::vector<string> GetRegisteredOptimizers();
typedef std::function<CustomGraphOptimizer*()> Creator;
static void RegisterOptimizerOrDie(const Creator& optimizer_creator,
const string& name);
};
class CustomGraphOptimizerRegistrar {
public:
explicit CustomGraphOptimizerRegistrar(
const CustomGraphOptimizerRegistry::Creator& creator,
const string& name) {
CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(creator, name);
}
};
#define REGISTER_GRAPH_OPTIMIZER_AS(MyCustomGraphOptimizerClass, name) \
namespace { \
static ::tensorflow::grappler::CustomGraphOptimizerRegistrar \
MyCustomGraphOptimizerClass##_registrar( \
[]() { return new MyCustomGraphOptimizerClass; }, (name)); \
}
#define REGISTER_GRAPH_OPTIMIZER(MyCustomGraphOptimizerClass) \
REGISTER_GRAPH_OPTIMIZER_AS(MyCustomGraphOptimizerClass, \
#MyCustomGraphOptimizerClass)
class PluginGraphOptimizerRegistry {
public:
static std::vector<std::unique_ptr<CustomGraphOptimizer>> CreateOptimizers(
const std::set<string>& device_types);
typedef std::function<CustomGraphOptimizer*()> Creator;
static ConfigList GetPluginConfigs(bool use_plugin_optimizers,
const std::set<string>& device_types);
static void RegisterPluginOptimizerOrDie(const Creator& optimizer_creator,
const std::string& device_type,
ConfigList& configs);
static void PrintPluginConfigsIfConflict(
const std::set<string>& device_types);
static bool IsConfigsConflict(ConfigList& user_config,
ConfigList& plugin_config);
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <string>
#include <unordered_map>
#include "absl/base/call_once.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace grappler {
namespace {
typedef std::unordered_map<string, CustomGraphOptimizerRegistry::Creator>
RegistrationMap;
RegistrationMap* registered_optimizers = nullptr;
RegistrationMap* GetRegistrationMap() {
if (registered_optimizers == nullptr)
registered_optimizers = new RegistrationMap;
return registered_optimizers;
}
typedef std::unordered_map<string, PluginGraphOptimizerRegistry::Creator>
PluginRegistrationMap;
PluginRegistrationMap* GetPluginRegistrationMap() {
static PluginRegistrationMap* registered_plugin_optimizers =
new PluginRegistrationMap;
return registered_plugin_optimizers;
}
typedef std::unordered_map<string, ConfigList> PluginConfigMap;
PluginConfigMap* GetPluginConfigMap() {
static PluginConfigMap* plugin_config_map = new PluginConfigMap;
return plugin_config_map;
}
const ConfigList& DefaultPluginConfigs() {
static ConfigList* default_plugin_configs = new ConfigList(
false,
{{"implementation_selector", RewriterConfig::ON},
{"function_optimization", RewriterConfig::ON},
{"common_subgraph_elimination", RewriterConfig::ON},
{"arithmetic_optimization", RewriterConfig::ON},
{"debug_stripper", RewriterConfig::ON},
{"constant_folding", RewriterConfig::ON},
{"shape_optimization", RewriterConfig::ON},
{"auto_mixed_precision", RewriterConfig::ON},
{"auto_mixed_precision_onednn_bfloat16", RewriterConfig::ON},
{"auto_mixed_precision_mkl", RewriterConfig::ON},
{"auto_mixed_precision_cpu", RewriterConfig::ON},
{"pin_to_host_optimization", RewriterConfig::ON},
{"layout_optimizer", RewriterConfig::ON},
{"remapping", RewriterConfig::ON},
{"loop_optimization", RewriterConfig::ON},
{"dependency_optimization", RewriterConfig::ON},
{"auto_parallel", RewriterConfig::ON},
{"memory_optimization", RewriterConfig::ON},
{"scoped_allocator_optimization", RewriterConfig::ON}});
return *default_plugin_configs;
}
}
std::unique_ptr<CustomGraphOptimizer>
CustomGraphOptimizerRegistry::CreateByNameOrNull(const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it == GetRegistrationMap()->end()) return nullptr;
return std::unique_ptr<CustomGraphOptimizer>(it->second());
}
std::vector<string> CustomGraphOptimizerRegistry::GetRegisteredOptimizers() {
std::vector<string> optimizer_names;
optimizer_names.reserve(GetRegistrationMap()->size());
for (const auto& opt : *GetRegistrationMap())
optimizer_names.emplace_back(opt.first);
return optimizer_names;
}
void CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
const Creator& optimizer_creator, const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it != GetRegistrationMap()->end()) {
LOG(FATAL) << "CustomGraphOptimizer is registered twice: " << name;
}
GetRegistrationMap()->insert({name, optimizer_creator});
}
std::vector<std::unique_ptr<CustomGraphOptimizer>>
PluginGraphOptimizerRegistry::CreateOptimizers(
const std::set<string>& device_types) {
std::vector<std::unique_ptr<CustomGraphOptimizer>> optimizer_list;
for (auto it = GetPluginRegistrationMap()->begin();
it != GetPluginRegistrationMap()->end(); ++it) {
if (device_types.find(it->first) == device_types.end()) continue;
static absl::once_flag plugin_optimizer_flag;
absl::call_once(plugin_optimizer_flag, [&]() {
LOG(INFO) << "Plugin optimizer for device_type " << it->first
<< " is enabled.";
});
optimizer_list.emplace_back(
std::unique_ptr<CustomGraphOptimizer>(it->second()));
}
return optimizer_list;
}
void PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
const Creator& optimizer_creator, const std::string& device_type,
ConfigList& configs) {
auto ret = GetPluginConfigMap()->insert({device_type, configs});
if (!ret.second) {
LOG(FATAL) << "PluginGraphOptimizer with device_type "
<< device_type << " is registered twice.";
}
GetPluginRegistrationMap()->insert({device_type, optimizer_creator});
}
void PluginGraphOptimizerRegistry::PrintPluginConfigsIfConflict(
const std::set<string>& device_types) {
bool init = false, conflict = false;
ConfigList plugin_configs;
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (!init) {
plugin_configs = cur_plugin_configs;
init = true;
} else {
if (!(plugin_configs == cur_plugin_configs)) {
conflict = true;
break;
}
}
}
if (!conflict) return;
LOG(WARNING) << "Plugins have conflicting configs. Potential performance "
"regression may happen.";
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
string logs = "";
strings::StrAppend(&logs, "disable_model_pruning\t\t",
cur_plugin_configs.disable_model_pruning, "\n");
for (auto const& pair : cur_plugin_configs.toggle_config) {
strings::StrAppend(&logs, pair.first, string(32 - pair.first.size(), ' '),
(pair.second != RewriterConfig::OFF), "\n");
}
LOG(WARNING) << "Plugin's configs for device_type " << device_type << ":\n"
<< logs;
}
}
ConfigList PluginGraphOptimizerRegistry::GetPluginConfigs(
bool use_plugin_optimizers, const std::set<string>& device_types) {
if (!use_plugin_optimizers) return DefaultPluginConfigs();
ConfigList ret_plugin_configs = DefaultPluginConfigs();
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (cur_plugin_configs.disable_model_pruning == true)
ret_plugin_configs.disable_model_pruning = true;
for (auto& pair : cur_plugin_configs.toggle_config) {
if (cur_plugin_configs.toggle_config[pair.first] == RewriterConfig::OFF)
ret_plugin_configs.toggle_config[pair.first] = RewriterConfig::OFF;
}
}
return ret_plugin_configs;
}
bool PluginGraphOptimizerRegistry::IsConfigsConflict(
ConfigList& user_config, ConfigList& plugin_config) {
if (plugin_config == DefaultPluginConfigs()) return false;
if (user_config.disable_model_pruning != plugin_config.disable_model_pruning)
return true;
for (auto& pair : user_config.toggle_config) {
if ((user_config.toggle_config[pair.first] == RewriterConfig::ON) &&
(plugin_config.toggle_config[pair.first] == RewriterConfig::OFF))
return true;
}
return false;
}
}
} | #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
static const char* kTestOptimizerName = "Test";
static const char* kTestPluginOptimizerName = "TestPlugin";
class TestGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER_AS(TestGraphOptimizer, "StaticRegister");
TEST(CustomGraphOptimizerRegistryTest, DynamicRegistration) {
std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
std::unique_ptr<const CustomGraphOptimizer> test_optimizer;
ASSERT_EQ(
0, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
EXPECT_EQ(nullptr, test_optimizer);
CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
[]() { return new TestGraphOptimizer; }, "DynamicRegister");
optimizers = CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
ASSERT_EQ(
1, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(CustomGraphOptimizerRegistryTest, StaticRegistration) {
const std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
EXPECT_EQ(1,
std::count(optimizers.begin(), optimizers.end(), "StaticRegister"));
std::unique_ptr<const CustomGraphOptimizer> test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("StaticRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(GraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestGraphOptimizer; };
EXPECT_DEATH(CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
creator, "StaticRegister"),
"twice");
}
class TestPluginGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestPluginOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
TEST(PluginGraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestPluginGraphOptimizer; };
ConfigList config_list;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "CPU",
config_list);
EXPECT_DEATH(PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
creator, "GPU", config_list),
"twice");
}
}
}
} |
1,382 | cpp | tensorflow/tensorflow | auto_parallel | tensorflow/core/grappler/optimizers/auto_parallel.cc | tensorflow/core/grappler/optimizers/auto_parallel_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_AUTO_PARALLEL_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_AUTO_PARALLEL_H_
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
class AutoParallel : public GraphOptimizer {
public:
AutoParallel(int num_replicas) : num_replicas_(num_replicas) {
CHECK(num_replicas_ >= 2);
}
~AutoParallel() override {}
string name() const override { return "autoparallel"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
private:
GraphDef graph_;
std::map<string, NodeDef*> all_nodes_;
std::set<string> apply_gradients_nodes_;
std::set<string> replica_nodes_;
std::set<string> shared_nodes_;
const GrapplerItem* item_;
int num_replicas_;
int num_gpus_;
Status Initialize(const GrapplerItem& item);
NodeDef* AddNodeDivConst();
NodeDef* AddNodeDiv(const string& name, const string& input_a,
const string& input_b);
NodeDef* AddNodeControl(const string& name, const std::set<string>& deps,
GraphDef* graph);
bool NotSharedNode(const string& name);
void AddSharedNodes(GraphDef* graph);
void AddOneReplica(GraphDef* graph, int number);
void BuildGraph(GraphDef* graph);
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/auto_parallel.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace grappler {
const char kAutoParallelPrefix[] = "AutoParallel";
NodeDef* AutoParallel::AddNodeDivConst() {
NodeDef* node = graph_.add_node();
node->set_name(strings::StrCat(kAutoParallelPrefix, "-Div-Const"));
node->set_op("Const");
AttrValue attr_data_type;
attr_data_type.set_type(DT_FLOAT);
node->mutable_attr()->insert({"dtype", attr_data_type});
AttrValue attr_tensor;
auto tensor = attr_tensor.mutable_tensor();
tensor->add_float_val(static_cast<float>(num_replicas_));
tensor->set_dtype(DT_FLOAT);
node->mutable_attr()->insert({"value", attr_tensor});
return node;
}
NodeDef* AutoParallel::AddNodeDiv(const string& name, const string& input_a,
const string& input_b) {
NodeDef* node = graph_.add_node();
node->set_name(strings::StrCat(kAutoParallelPrefix, "-Div-", name));
node->set_op("RealDiv");
node->add_input(input_a);
node->add_input(input_b);
AttrValue attr_type;
attr_type.set_type(DT_FLOAT);
node->mutable_attr()->insert({"T", attr_type});
return node;
}
NodeDef* AutoParallel::AddNodeControl(const string& name,
const std::set<string>& deps,
GraphDef* graph) {
NodeDef* node = graph->add_node();
node->set_name(name);
node->set_op("NoOp");
for (const auto& dep : deps) {
node->add_input(strings::StrCat("^", dep));
}
return node;
}
Status AutoParallel::Initialize(const GrapplerItem& item) {
num_gpus_ = GetNumAvailableGPUs();
LOG(INFO) << "Number of GPUs: " << num_gpus_;
item_ = &item;
graph_ = item.graph;
LOG(INFO) << "Original graph size: " << graph_.node_size();
if (item.fetch.empty()) {
return Status(absl::StatusCode::kInvalidArgument,
"No fetch nodes provided.");
}
if (item.MainVariables().empty()) {
return Status(absl::StatusCode::kInvalidArgument, "No variables provided.");
}
for (const auto& init : item.init_ops) {
VLOG(1) << "Init node: " << init;
}
for (const auto& fetch : item.fetch) {
VLOG(1) << "Fetch node: " << fetch;
}
for (const auto& var : item.MainVariables()) {
VLOG(2) << "Variable: " << var->name();
}
const std::set<string> apply_gradients_ops = {"ApplyGradientDescent",
"ApplyProximalGradientDescent",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyProximalAdagrad",
"ApplyAdagradDA",
"ApplyFtrl",
"ApplyMomentum",
"ApplyAdam",
"ApplyRMSProp",
"ApplyCenteredRMSProp"};
for (int i = 0; i < graph_.node_size(); i++) {
all_nodes_.insert(
std::make_pair(graph_.node(i).name(), graph_.mutable_node(i)));
if (apply_gradients_ops.find(graph_.node(i).op()) !=
apply_gradients_ops.end()) {
apply_gradients_nodes_.insert(graph_.node(i).name());
VLOG(2) << "Apply gradients node: " << graph_.node(i).name();
}
}
auto div_const_node = AddNodeDivConst();
all_nodes_.insert(std::make_pair(div_const_node->name(), div_const_node));
std::map<string, int> gradient_pos = {{"ApplyGradientDescent", 2},
{"ApplyProximalGradientDescent", 4},
{"ApplyAdadelta", 6},
{"ApplyAdagrad", 3},
{"ApplyProximalAdagrad", 5},
{"ApplyAdagradDA", 3},
{"ApplyFtrl", 3},
{"ApplyMomentum", 3},
{"ApplyAdam", 9},
{"ApplyRMSProp", 7},
{"ApplyCenteredRMSProp", 8}};
for (const auto& apply_gradient_node_name : apply_gradients_nodes_) {
auto apply_gradients_op = all_nodes_[apply_gradient_node_name]->op();
auto apply_gradients_node = all_nodes_[apply_gradient_node_name];
auto div_node = AddNodeDiv(
apply_gradient_node_name,
apply_gradients_node->input(gradient_pos[apply_gradients_op]),
div_const_node->name());
all_nodes_.insert(std::make_pair(div_node->name(), div_node));
*apply_gradients_node->mutable_input(gradient_pos[apply_gradients_op]) =
div_node->name();
}
LOG(INFO) << "Graph size after adding div nodes: " << all_nodes_.size();
std::vector<const NodeDef*> train_nodes;
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, item.fetch, &train_nodes));
LOG(INFO) << "Number of training nodes: " << train_nodes.size();
const NodeDef* dequeue_node = nullptr;
for (const auto& train_node : train_nodes) {
if (IsDequeueOp(*train_node)) {
dequeue_node = train_node;
break;
}
}
std::vector<const NodeDef*> input_nodes;
if (dequeue_node) {
LOG(INFO) << "Dequeue node: " << dequeue_node->name();
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, {dequeue_node->name()},
{}, &input_nodes));
}
LOG(INFO) << "Number of input nodes: " << input_nodes.size();
std::set<string> dont_replicate_nodes;
for (const auto& variable : item.MainVariables()) {
dont_replicate_nodes.insert(variable->name());
}
for (const auto& init : item.init_ops) {
dont_replicate_nodes.insert(NodeName(init));
}
for (const auto& input_node : input_nodes) {
if (input_node->name() != dequeue_node->name()) {
dont_replicate_nodes.insert(input_node->name());
}
}
for (const auto& node : train_nodes) {
if (dont_replicate_nodes.find(node->name()) == dont_replicate_nodes.end()) {
replica_nodes_.insert(node->name());
}
}
LOG(INFO) << "Number of replica nodes: " << replica_nodes_.size();
for (const auto& node : all_nodes_) {
if (replica_nodes_.find(node.first) == replica_nodes_.end()) {
shared_nodes_.insert(node.first);
}
}
LOG(INFO) << "Number of shared nodes: " << shared_nodes_.size();
return absl::OkStatus();
}
bool AutoParallel::NotSharedNode(const string& name) {
return shared_nodes_.find(name) == shared_nodes_.end();
}
void AutoParallel::AddSharedNodes(GraphDef* graph) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", 0);
for (const auto& node : shared_nodes_) {
auto new_node = graph->add_node();
*new_node = *all_nodes_[node];
for (int i = 0; i < new_node->input_size(); i++) {
if (NotSharedNode(NodeName(new_node->input(i)))) {
string new_name = AddPrefixToNodeName(new_node->input(i), prefix);
*new_node->mutable_input(i) = new_name;
}
}
}
}
void AutoParallel::AddOneReplica(GraphDef* graph, int number) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", number);
for (const auto& node : replica_nodes_) {
auto new_node = graph->add_node();
*new_node = *all_nodes_[node];
if (NotSharedNode(new_node->name())) {
new_node->set_name(AddPrefixToNodeName(new_node->name(), prefix));
if (num_gpus_ > 0) {
new_node->set_device(strings::StrCat("/gpu:", number % num_gpus_));
}
for (int i = 0; i < new_node->input_size(); i++) {
if (NotSharedNode(NodeName(new_node->input(i)))) {
string new_name = AddPrefixToNodeName(new_node->input(i), prefix);
*new_node->mutable_input(i) = new_name;
}
}
}
}
}
void AutoParallel::BuildGraph(GraphDef* graph) {
AddSharedNodes(graph);
for (int i = 0; i < num_replicas_; i++) {
AddOneReplica(graph, i);
}
std::set<string> fetches;
for (size_t i = 0; i < item_->fetch.size(); i++) {
for (int j = 0; j < num_replicas_; j++) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", j);
string fetch = AddPrefixToNodeName(item_->fetch[i], prefix);
fetches.insert(fetch);
}
}
string name_control =
strings::StrCat(kAutoParallelPrefix, "-Control-", "Fetch");
auto control = AddNodeControl(name_control, fetches, graph);
for (const auto& fetch : item_->fetch) {
AddNodeControl(fetch, {control->name()}, graph);
}
*graph->mutable_library() = item_->graph.library();
*graph->mutable_versions() = item_->graph.versions();
LOG(INFO) << "Parallelized graph size: " << graph->node_size();
}
Status AutoParallel::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
TF_RETURN_IF_ERROR(Initialize(item));
BuildGraph(output);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/auto_parallel.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class AutoParallelTest : public ::testing::Test {};
TEST_F(AutoParallelTest, SimpleParallel) {
tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope();
Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1});
Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1});
Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a});
Output identity = ops::Identity(s.WithOpName("identity"), {var});
Output fifo_queue = ops::FIFOQueue(s.WithOpName("fifo_queue"), {DT_FLOAT});
auto dequeue = ops::QueueDequeueMany(s.WithOpName("dequeue"), {fifo_queue},
{constant_b}, {DT_FLOAT});
Output add = ops::AddN(s.WithOpName("add"), {constant_a, dequeue[0]});
Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1});
Output apply_gradient = ops::ApplyGradientDescent(
s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add});
GrapplerItem item;
item.init_ops.push_back("assign");
item.fetch.push_back("apply_gradient");
item.init_ops.push_back("assign");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AutoParallel parallel(2);
GraphDef output;
Status status = parallel.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(21, output.node_size());
const NodeDef& node_assign = output.node(0);
EXPECT_EQ("assign", node_assign.name());
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_assign.input(1));
const NodeDef& node_constant_b = output.node(1);
EXPECT_EQ("constant_b", node_constant_b.name());
const NodeDef& node_fifo_queue = output.node(2);
EXPECT_EQ("fifo_queue", node_fifo_queue.name());
const NodeDef& node_identity = output.node(3);
EXPECT_EQ("identity", node_identity.name());
EXPECT_EQ("var", node_identity.input(0));
const NodeDef& node_var = output.node(4);
EXPECT_EQ("var", node_var.name());
const NodeDef& node_div_const0 = output.node(5);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-Const",
node_div_const0.name());
const NodeDef& node_div0 = output.node(6);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-apply_gradient",
node_div0.name());
const NodeDef& node_add0 = output.node(7);
EXPECT_EQ("AutoParallel-Replica-0/add", node_add0.name());
const NodeDef& node_gradient0 = output.node(8);
EXPECT_EQ("AutoParallel-Replica-0/apply_gradient", node_gradient0.name());
const NodeDef& node_constant_a0 = output.node(9);
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_constant_a0.name());
const NodeDef& node_dequeue0 = output.node(10);
EXPECT_EQ("AutoParallel-Replica-0/dequeue", node_dequeue0.name());
const NodeDef& node_learning_rate0 = output.node(11);
EXPECT_EQ("AutoParallel-Replica-0/learning_rate", node_learning_rate0.name());
const NodeDef& node_div_const1 = output.node(12);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-Const",
node_div_const1.name());
const NodeDef& node_div1 = output.node(13);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-apply_gradient",
node_div1.name());
const NodeDef& node_add1 = output.node(14);
EXPECT_EQ("AutoParallel-Replica-1/add", node_add1.name());
const NodeDef& node_gradient1 = output.node(15);
EXPECT_EQ("AutoParallel-Replica-1/apply_gradient", node_gradient1.name());
const NodeDef& node_constant_a1 = output.node(16);
EXPECT_EQ("AutoParallel-Replica-1/constant_a", node_constant_a1.name());
const NodeDef& node_dequeue1 = output.node(17);
EXPECT_EQ("AutoParallel-Replica-1/dequeue", node_dequeue1.name());
const NodeDef& node_learning_rate1 = output.node(18);
EXPECT_EQ("AutoParallel-Replica-1/learning_rate", node_learning_rate1.name());
const NodeDef& node_fetch = output.node(19);
EXPECT_EQ("AutoParallel-Control-Fetch", node_fetch.name());
EXPECT_EQ("^AutoParallel-Replica-0/apply_gradient", node_fetch.input(0));
EXPECT_EQ("^AutoParallel-Replica-1/apply_gradient", node_fetch.input(1));
const NodeDef& node_gradient = output.node(20);
EXPECT_EQ("apply_gradient", node_gradient.name());
EXPECT_EQ("^AutoParallel-Control-Fetch", node_gradient.input(0));
}
TEST_F(AutoParallelTest, SimpleParallelNoDequeue) {
tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope();
Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1});
Output constant_c = ops::Const(s.WithOpName("constant_c"), 1.0f, {1});
Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1});
Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a});
Output add = ops::AddN(s.WithOpName("add"), {constant_a, constant_c});
Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1});
Output apply_gradient = ops::ApplyGradientDescent(
s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add});
GrapplerItem item;
item.init_ops.push_back("assign");
item.fetch.push_back("apply_gradient");
item.init_ops.push_back("assign");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AutoParallel parallel(2);
GraphDef output;
Status status = parallel.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
}
}
} |
1,383 | cpp | tensorflow/tensorflow | implementation_selector | tensorflow/core/grappler/optimizers/implementation_selector.cc | tensorflow/core/grappler/optimizers/implementation_selector_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_IMPLEMENTATION_SELECTOR_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_IMPLEMENTATION_SELECTOR_H_
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
class ImplementationSelector : public CustomGraphOptimizer {
public:
ImplementationSelector() = default;
~ImplementationSelector() override = default;
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override {
return "implementation_selector";
}
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
Status LoadFunctions(const GraphDef& graph);
Status MaybeOptimizeFunctionCall(utils::MutableNodeView* node_view) const;
Status SelectImplementation(GraphDef* graph) const;
Status SelectDeviceIndex(GraphDef* graph) const;
std::unique_ptr<FunctionLibraryApiInfo> lib_info_;
ImplementationSelector(const ImplementationSelector&) = delete;
void operator=(const ImplementationSelector&) = delete;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/implementation_selector.h"
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
constexpr char kConstOp[] = "Const";
constexpr char kCaseOp[] = "Case";
constexpr char kStatelessCaseOp[] = "StatelessCase";
constexpr char kDeviceIndexOp[] = "DeviceIndex";
string FindForwardNode(utils::MutableNodeView* backward_node) {
const int last_input_index = backward_node->NumRegularFanins() - 1;
const utils::MutableFanoutView& input =
backward_node->GetRegularFanin(last_input_index);
if (IsIdentity(*input.node_view()->node())) {
return input.node_view()->node()->input(0);
} else if (IsPartitionedCall(*input.node_view()->node()) ||
IsStatefulPartitionedCall(*input.node_view()->node())) {
return backward_node->node()->input(last_input_index);
} else {
return "";
}
}
void UpdateForwardIdentityNodeDtype(utils::MutableNodeView* forward_node,
const DataTypeVector& dtypes) {
const auto& fanouts_vector = forward_node->GetRegularFanouts();
for (int pos = 0, pos_limit = fanouts_vector.size(); pos < pos_limit; ++pos) {
const auto& fanouts_at_pos = fanouts_vector[pos];
for (const auto& fanout : fanouts_at_pos) {
if ("Identity" == fanout.node_view()->GetOp()) {
(*fanout.node_view()->node()->mutable_attr())["T"].set_type(
dtypes[pos]);
VLOG(3) << "Updated DTYPE for Identity node: "
<< fanout.node_view()->node()->DebugString();
}
}
}
}
Status UpdateNodeDef(utils::MutableNodeView* node_view, const string& funcName,
const FunctionApiInfo& apiInfo) {
NodeDef* node_def = node_view->node();
VLOG(3) << "Node def before swap is: " << node_def->DebugString();
node_def->mutable_attr()->find("f")->second.mutable_func()->set_name(
funcName);
auto tin = node_def->mutable_attr()->find("Tin");
tin->second.mutable_list()->clear_type();
for (const auto& tin_dtype : apiInfo.input_arg_dtypes()) {
tin->second.mutable_list()->add_type(tin_dtype);
}
auto tout = node_def->mutable_attr()->find("Tout");
tout->second.mutable_list()->clear_type();
for (const auto& tout_dtype : apiInfo.output_arg_dtypes()) {
tout->second.mutable_list()->add_type(tout_dtype);
}
if (apiInfo.function_type() == FunctionApiInfo::BACKWARD) {
std::vector<std::string> control_deps;
for (int i = node_def->input_size() - 1; i >= 0; --i) {
if (!IsControlInput(node_def->input(i))) break;
control_deps.push_back(node_def->input(i));
node_def->mutable_input()->RemoveLast();
}
const int prev_input_size = node_def->input_size();
const int diff = prev_input_size - apiInfo.input_arg_dtypes().size();
if (diff >= 0) {
for (int i = 0; i < diff; ++i) node_def->mutable_input()->RemoveLast();
} else {
const string last_input = FindForwardNode(node_view);
const std::vector<string> name_index = ::absl::StrSplit(last_input, ':');
if (name_index.size() != 2) {
return errors::InvalidArgument(
"Invalid format of input node name: ", last_input,
" Expected: {forward_node_name}:{index}");
}
const absl::string_view node_name = name_index[0];
int last_index;
if (!::absl::SimpleAtoi(name_index[1], &last_index)) {
return errors::InvalidArgument(
"The index of input node is expected to be number, got: ",
name_index[1]);
}
for (int i = 1; i <= -diff; ++i)
node_def->add_input(strings::StrCat(node_name, ":", i + last_index));
}
for (std::string& control : control_deps)
node_def->add_input(std::move(control));
} else if (apiInfo.function_type() == FunctionApiInfo::FORWARD) {
UpdateForwardIdentityNodeDtype(node_view, apiInfo.output_arg_dtypes());
}
VLOG(3) << "Node def after swap is: " << node_def->DebugString();
return absl::OkStatus();
}
Status ImplementationSelector::LoadFunctions(const GraphDef& graph) {
lib_info_ = std::make_unique<FunctionLibraryApiInfo>();
TF_RETURN_IF_ERROR(lib_info_->Init(graph.library()));
return absl::OkStatus();
}
Status ImplementationSelector::MaybeOptimizeFunctionCall(
utils::MutableNodeView* node_view) const {
NodeDef* node_def = node_view->node();
std::vector<string> function_attribute_names;
for (const auto& attr : node_def->attr()) {
if (attr.second.has_func() &&
lib_info_->GetApiInfo(attr.second.func().name()) != nullptr) {
function_attribute_names.emplace_back(attr.first);
}
}
if (function_attribute_names.empty() &&
lib_info_->GetApiInfo(node_def->op()) == nullptr) {
return absl::OkStatus();
}
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(node_def->device(), &parsed_name) ||
!parsed_name.has_type) {
return errors::Internal("Could not parse device name:", node_def->device());
}
VLOG(2) << "Op " << node_def->name() << " runs on " << node_def->device()
<< " = (" << parsed_name.type << ")";
for (const auto& attr_name : function_attribute_names) {
string function_name = node_def->attr().at(attr_name).func().name();
if (::absl::StrContains(function_name, "_specialized_for_")) continue;
std::vector<string> equiv_func_names;
TF_RETURN_IF_ERROR(lib_info_->GetEquivalentImplementations(
function_name, &equiv_func_names));
for (const auto& func_name : equiv_func_names) {
const auto& func_api_info = lib_info_->GetApiInfo(func_name);
if (func_api_info->preferred_device() == parsed_name.type) {
VLOG(2) << "Swapping: " << function_name << " TO: " << func_name;
TF_RETURN_IF_ERROR(UpdateNodeDef(node_view, func_name, *func_api_info));
break;
}
}
}
if (lib_info_->GetApiInfo(node_def->op()) != nullptr &&
!::absl::StrContains(node_def->op(), "_specialized_for_")) {
std::vector<string> equiv_func_names;
TF_RETURN_IF_ERROR(lib_info_->GetEquivalentImplementations(
node_def->op(), &equiv_func_names));
for (const string& func_name : equiv_func_names) {
const auto func_api_info = lib_info_->GetApiInfo(func_name);
if (func_api_info->preferred_device() == parsed_name.type) {
node_def->set_op(func_name);
break;
}
}
}
return absl::OkStatus();
}
Status FindDeviceIndex(const utils::MutableNodeView* device_index_node,
const string& device, int* index) {
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(device, &parsed_name) ||
!parsed_name.has_type) {
return errors::Internal("Could not parse device name:", device);
}
const auto& device_list =
device_index_node->GetAttr("device_names")->list().s();
auto it = absl::c_find(device_list, parsed_name.type);
if (it != device_list.end()) {
*index = it - device_list.begin();
} else {
*index = device_list.size();
}
return absl::OkStatus();
}
void RewriteDeviceIndexOp(utils::MutableNodeView* device_index_node,
int index) {
auto node = device_index_node->node();
node->set_op(kConstOp);
EraseRegularNodeAttributes(node);
(*node->mutable_attr())["dtype"].set_type(DT_INT32);
auto* tensor = (*node->mutable_attr())["value"].mutable_tensor();
tensor->set_dtype(DT_INT32);
tensor->add_int_val(index);
VLOG(2) << "Node after rewriting:" << node->DebugString();
}
Status ImplementationSelector::SelectDeviceIndex(GraphDef* graph) const {
Status status;
VLOG(2) << "graph before rewriting device index:" << graph->DebugString();
utils::MutableGraphView graph_view(graph, &status);
TF_RETURN_IF_ERROR(status);
const int num_nodes = graph_view.NumNodes();
for (int k = 0; k < num_nodes; ++k) {
auto* node_view = graph_view.GetNode(k);
if (node_view->GetOp() != kDeviceIndexOp) {
continue;
}
VLOG(2) << "Found a node to rewrite the device index";
for (const auto& fanouts : node_view->GetRegularFanouts()) {
for (const auto& fanout : fanouts) {
if (fanout.node_view()->GetOp() != kCaseOp &&
fanout.node_view()->GetOp() != kStatelessCaseOp)
continue;
int index;
Status status =
FindDeviceIndex(node_view, fanout.node_view()->GetDevice(), &index);
if (status.ok()) {
RewriteDeviceIndexOp(node_view, index);
}
}
}
}
return absl::OkStatus();
}
Status ImplementationSelector::SelectImplementation(GraphDef* graph) const {
if (!graph->has_library()) {
VLOG(2) << "Skipping graph since it does not have function def";
return absl::OkStatus();
}
if (lib_info_->empty()) {
VLOG(2) << "Skipping optimization since lib_info is empty";
return absl::OkStatus();
}
Status status;
utils::MutableGraphView graph_view(graph, &status);
TF_RETURN_IF_ERROR(status);
const int num_nodes = graph_view.NumNodes();
for (int k = 0; k < num_nodes; ++k) {
TF_RETURN_IF_ERROR(MaybeOptimizeFunctionCall(graph_view.GetNode(k)));
}
return absl::OkStatus();
}
Status ImplementationSelector::Optimize(Cluster* cluster,
const GrapplerItem& item,
GraphDef* optimized_graph) {
auto status = LoadFunctions(item.graph);
if (!status.ok()) {
VLOG(2) << "Skipping optimization due to error while loading function "
<< "libraries: " << status;
return errors::Aborted("Skipped Optimization");
}
*optimized_graph = item.graph;
status = SelectDeviceIndex(optimized_graph);
if (!status.ok()) {
*optimized_graph = item.graph;
VLOG(2) << "Could not rewrite device index due to error:" << status;
}
return SelectImplementation(optimized_graph);
}
}
} | #include "tensorflow/core/grappler/optimizers/implementation_selector.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char CpuDevice[] = "/device:CPU:0";
constexpr char GpuDevice[] = "/device:GPU:0";
constexpr char TpuDevice[] = "/device:TPU_REPLICATED_CORE";
class ImplementationSelectorTest : public GrapplerTest {};
TEST_F(ImplementationSelectorTest, NoUpdate) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {CpuDevice});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<CustomGraphOptimizer> optimizer(new ImplementationSelector);
ASSERT_NE(nullptr, optimizer);
TF_ASSERT_OK(optimizer->Init());
GraphDef output;
const Status status = optimizer->Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndex) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexStatelessCase) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "StatelessCase", {"x"}, {{"T", DT_FLOAT}}, GpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexMultiOps) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexNotFound) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, TpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexError) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, "")});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("DeviceIndex", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, TwoTypesOfSwapImplementation) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, NoSwapWithImplementsOnly) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y1") {
EXPECT_EQ("XTimesTwo", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementation) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("GPU");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, GpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 5);
for (const NodeDef& node : output.node()) {
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementationTpu) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto tpu_def = test::function::XAddX();
auto* func2_attr = tpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("TPU_REPLICATED_CORE");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, tpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 5);
for (const NodeDef& node : output.node()) {
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementationEval) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("random_boost");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XTimesFour();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("random_boost");
(*func2_attr)["api_preferred_device"].set_s("GPU");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, CpuDevice),
NDef("y", "XTimesFour", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
const Tensor input = test::AsScalar<float>(1.0f);
item.fetch = {"z"};
item.feed.emplace_back("x", input);
const auto four_times_boosted_tensor = EvaluateFetchNodes(item);
test::ExpectTensorEqual<float>(four_times_boosted_tensor[0],
test::AsScalar<float>(4.0f));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
GrapplerItem optimized = item.WithGraph(std::move(output));
const auto twice_boosted_tensor = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(twice_boosted_tensor[0],
test::AsScalar<float>(2.0f));
}
TEST_F(ImplementationSelectorTest, SwapImplementationWithGradient) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionDef boost_1 = FDH::Create(
"Boost1", {"x:float"}, {"z:float", "s:float"}, {},
{{{"boost"}, "Add", {"x", "x"}, {{"T", DT_FLOAT}}},
FDH::Const("one", 1.0f)},
{{"z", "boost:z:0"}, {"s", "one:output:0"}});
auto* boost_1_attr = boost_1.mutable_attr();
(*boost_1_attr)["api_implements"].set_s("random_boost");
(*boost_1_attr)["api_preferred_device"].set_s("CPU");
(*boost_1_attr)["backward_function_name"].set_s("BoostCpuGradient");
FunctionDef boost_1_gradient = FDH::Create(
"Boost1Gradient", {"x:float", "s:float"}, {"dx:float"}, {},
{FDH::Const("two", 2.0f),
{{"grad"}, "Mul", {"x", "two:output:0"}, {{"T", DT_FLOAT}}}},
{{"dx", "grad:z:0"}});
auto* boost_1_grad_attr = boost_1_gradient.mutable_attr();
(*boost_1_grad_attr)["api_implements"].set_s("random_boost");
(*boost_1_grad_attr)["api_preferred_device"].set_s("CPU");
(*boost_1_grad_attr)["forward_function_name"].set_s("BoostCpu");
FunctionDef boost_2_func = FDH::Create(
"Boost2", {"x:float"}, {"z:float", "s1:float", "s2:float"}, {},
{FDH::Const("four", 4.0f),
{{"boost"}, "Mul", {"x", "four:output:0"}, {{"T", DT_FLOAT}}},
FDH::Const("one", 1.0f),
FDH::Const("two", 2.0f)},
{{"z", "boost:z:0"}, {"s1", "one:output:0"}, {"s2", "two:output:0"}});
auto* boost_2_attr = boost_2_func.mutable_attr();
(*boost_2_attr)["api_implements"].set_s("random_boost");
(*boost_2_attr)["api_preferred_device"].set_s("GPU");
(*boost_2_attr)["backward_function_name"].set_s("BoostGpuGradient");
FunctionDef boost_2_gradient = FDH::Create(
"Boost2Gradient", {"x:float", "s1:float", "s2:float"}, {"dx:float"}, {},
{FDH::Const("four", 4.0f),
{{"grad"}, "Mul", {"x", "four:output:0"}, {{"T", DT_FLOAT}}}},
{{"dx", "grad:z:0"}});
auto* boost_2_grad_attr = boost_2_gradient.mutable_attr();
(*boost_2_grad_attr)["api_implements"].set_s("random_boost");
(*boost_2_grad_attr)["api_preferred_device"].set_s("GPU");
(*boost_2_grad_attr)["forward_function_name"].set_s("BoostGpu");
const auto forward =
NDef("lstm/StatefulPartitionedCall", "StatefulPartitionedCall", {"input"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("Boost2")}},
CpuDevice);
const auto backward =
NDef("gradient/lstm/StatefulPartitionedCall", "StatefulPartitionedCall",
{"input", "lstm/StatefulPartitionedCall:1",
"lstm/StatefulPartitionedCall:2"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("Boost2Gradient")}},
CpuDevice);
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("input", "Placeholder", {}, {{"dtype", DT_FLOAT}}, CpuDevice),
forward, backward,
NDef("output", "Identity", {"lstm/StatefulPartitionedCall:0"},
{{"T", DT_FLOAT}}, CpuDevice)},
{boost_1, boost_1_gradient, boost_2_func, boost_2_gradient});
const Tensor input = test::AsScalar<float>(1.0f);
item.fetch = {"output"};
item.feed.emplace_back("input", input);
const auto four_times_boosted_tensor = EvaluateFetchNodes(item);
test::ExpectTensorEqual<float>(four_times_boosted_tensor[0],
test::AsScalar<float>(4.0f));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
GrapplerItem optimized = item.WithGraph(std::move(output));
const auto twice_boosted_tensor = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(twice_boosted_tensor[0],
test::AsScalar<float>(2.0f));
}
}
}
} |
1,384 | cpp | tensorflow/tensorflow | generic_layout_optimizer_transposer_factory | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_TRANSPOSER_FACTORY_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_TRANSPOSER_FACTORY_H_
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
namespace tensorflow {
namespace grappler {
class TransposerFactory {
public:
explicit TransposerFactory() {}
std::shared_ptr<Transposer> GetTransposer(const NodeDef& node);
protected:
template <typename T>
std::shared_ptr<Transposer> GetOrCreateIfNotFound(const string& key) {
auto& transposer = transposer_map_[key];
if (transposer == nullptr) {
transposer = std::make_shared<T>();
}
return transposer;
}
absl::flat_hash_map<string, std::shared_ptr<Transposer>> transposer_map_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include "tensorflow/core/grappler/op_types.h"
namespace tensorflow {
namespace grappler {
std::shared_ptr<Transposer> TransposerFactory::GetTransposer(
const NodeDef& node) {
if (IsDefaultLayoutSensitiveOp(node)) {
return GetOrCreateIfNotFound<DefaultLayoutSensitiveOpTransposer>(
"DefaultLayoutSensitiveOp");
}
if (IsAvgPoolGrad(node)) {
return GetOrCreateIfNotFound<AvgPoolGradTransposer>("AvgPoolGrad");
}
if (IsBiasAddV2(node)) {
return GetOrCreateIfNotFound<BiasAddTransposer>("BiasAdd");
}
if (IsBiasAddGrad(node)) {
return GetOrCreateIfNotFound<BiasAddGradTransposer>("BiasAddGrad");
}
if (IsConv2DBackpropFilter(node) ||
IsDepthwiseConv2dNativeBackpropFilter(node)) {
return GetOrCreateIfNotFound<Conv2DBackpropFilterTransposer>(
"Conv2DBackpropFilter");
}
if (IsConv2DBackpropInput(node) ||
IsDepthwiseConv2dNativeBackpropInput(node)) {
return GetOrCreateIfNotFound<Conv2DBackpropInputTransposer>(
"Conv2DBackpropInput");
}
if (IsConv3D(node)) {
return GetOrCreateIfNotFound<Conv3DTransposer>("Conv3D");
}
if (IsConv3DBackpropInputV2(node)) {
return GetOrCreateIfNotFound<Conv3DBackpropInputTransposer>(
"Conv3DBackpropInput");
}
if (IsConv3DBackpropFilterV2(node)) {
return GetOrCreateIfNotFound<Conv3DBackpropFilterTransposer>(
"Conv3DBackpropFilter");
}
if (IsFusedBatchNormEx(node)) {
return GetOrCreateIfNotFound<FusedBatchNormExTransposer>(
"FusedBatchNormEx");
}
if (IsFusedBatchNormGrad(node)) {
return GetOrCreateIfNotFound<FusedBatchNormGradTransposer>(
"FusedBatchNormGrad");
}
if (IsMaxPoolV2(node)) {
return GetOrCreateIfNotFound<MaxPoolV2Transposer>("MaxPoolV2");
}
if (IsMaxPoolGrad(node) || IsMaxPoolGradGradV1(node)) {
return GetOrCreateIfNotFound<MaxPoolGradTransposer>("MaxPoolGrad");
}
if (IsMaxPoolGradV2(node) || IsMaxPoolGradGradV2(node)) {
return GetOrCreateIfNotFound<MaxPoolGradV2Transposer>("MaxPoolGradV2");
}
if (IsMaxPool3D(node)) {
return GetOrCreateIfNotFound<MaxPool3DTransposer>("MaxPool3D");
}
if (IsDefaultLayoutAgnosticOp(node)) {
return GetOrCreateIfNotFound<DefaultLayoutAgnosticOpTransposer>(
"DefaultLayoutAgnosticOp");
}
if (IsAddN(node)) {
return GetOrCreateIfNotFound<AddNTransposer>("AddN");
}
if (IsBinaryOp(node)) {
return GetOrCreateIfNotFound<BinaryOpTransposer>("BinaryOp");
}
if (IsConcat(node)) {
return GetOrCreateIfNotFound<ConcatOpTransposer>("Concat");
}
if (IsFill(node)) {
return GetOrCreateIfNotFound<FillOpTransposer>("Fill");
}
if (IsIdentityN(node)) {
return GetOrCreateIfNotFound<IdentityNTransposer>("IdentityN");
}
if (IsMerge(node)) {
return GetOrCreateIfNotFound<MergeTransposer>("Merge");
}
if (IsMirrorPad(node) || IsMirrorPadGrad(node) || IsPad(node)) {
return GetOrCreateIfNotFound<PadTransposer>("Pad");
}
if (IsReduceOp(node)) {
return GetOrCreateIfNotFound<ReduceTransposer>("ReduceOp");
}
if (IsReverseV2(node)) {
return GetOrCreateIfNotFound<ReverseV2Transposer>("ReverseV2");
}
if (IsSelect(node)) {
return GetOrCreateIfNotFound<SelectTransposer>("Select");
}
if (IsShape(node)) {
return GetOrCreateIfNotFound<ShapeTransposer>("Shape");
}
if (IsShapeN(node)) {
return GetOrCreateIfNotFound<ShapeNTransposer>("ShapeN");
}
if (IsSlice(node)) {
return GetOrCreateIfNotFound<SliceTransposer>("Slice");
}
if (IsSplit(node)) {
return GetOrCreateIfNotFound<SplitTransposer>("Split");
}
if (IsSplitV(node)) {
return GetOrCreateIfNotFound<SplitVTransposer>("SplitV");
}
if (IsSqueeze(node)) {
return GetOrCreateIfNotFound<SqueezeTransposer>("Squeeze");
}
if (IsStridedSlice(node)) {
return GetOrCreateIfNotFound<StridedSliceTransposer>("StridedSlice");
}
if (IsSwitch(node)) {
return GetOrCreateIfNotFound<SwitchTransposer>("Switch");
}
if (IsTernaryOp(node)) {
return GetOrCreateIfNotFound<TernaryOpTransposer>("TernaryOp");
}
if (IsTile(node)) {
return GetOrCreateIfNotFound<TileTransposer>("Tile");
}
if (IsUnaryGrad(node)) {
return GetOrCreateIfNotFound<UnaryGradTransposer>("UnaryGrad");
}
return nullptr;
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void CheckSameTransposerForOps(absl::Span<const string> ops,
TransposerFactory* factory,
absl::flat_hash_set<Transposer*>* transposers) {
absl::flat_hash_set<Transposer*> created_transposers;
for (int i = 0; i < ops.size(); i++) {
NodeDef node;
node.set_op(ops[i]);
std::shared_ptr<Transposer> transposer1 = factory->GetTransposer(node);
ASSERT_NE(transposer1, nullptr);
if (i == 0) {
EXPECT_TRUE(transposers->insert(transposer1.get()).second);
} else {
EXPECT_FALSE(transposers->insert(transposer1.get()).second);
}
std::shared_ptr<Transposer> transposer2 = factory->GetTransposer(node);
ASSERT_NE(transposer2, nullptr);
EXPECT_EQ(transposer1.get(), transposer2.get());
created_transposers.insert(transposer1.get());
}
if (!ops.empty()) {
EXPECT_EQ(created_transposers.size(), 1);
}
}
TEST(TransposerFactoryTest, SanityCheck) {
TransposerFactory factory;
absl::flat_hash_set<Transposer*> transposers;
CheckSameTransposerForOps(
{"Conv2D", "FusedBatchNorm", "DepthwiseConv2dNative"}, &factory,
&transposers);
CheckSameTransposerForOps({"AvgPoolGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"BiasAddGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"_FusedBatchNormEx"}, &factory, &transposers);
CheckSameTransposerForOps({"FusedBatchNormGrad", "FusedBatchNormGradV2"},
&factory, &transposers);
CheckSameTransposerForOps(
{"Conv2DBackpropFilter", "DepthwiseConv2dNativeBackpropFilter"}, &factory,
&transposers);
CheckSameTransposerForOps(
{"Conv2DBackpropInput", "DepthwiseConv2dNativeBackpropInput"}, &factory,
&transposers);
CheckSameTransposerForOps({"MaxPoolGrad", "MaxPoolGradGrad"}, &factory,
&transposers);
CheckSameTransposerForOps({"MaxPoolGradV2", "MaxPoolGradGradV2"}, &factory,
&transposers);
CheckSameTransposerForOps({"AddN"}, &factory, &transposers);
CheckSameTransposerForOps({"IdentityN"}, &factory, &transposers);
CheckSameTransposerForOps({"Merge", "RefMerge"}, &factory, &transposers);
CheckSameTransposerForOps({"Select"}, &factory, &transposers);
CheckSameTransposerForOps({"Switch", "RefSwitch"}, &factory, &transposers);
CheckSameTransposerForOps({"Betainc"}, &factory, &transposers);
CheckSameTransposerForOps({"TanhGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"Squeeze"}, &factory, &transposers);
CheckSameTransposerForOps({"MaxPoolV2"}, &factory, &transposers);
CheckSameTransposerForOps({"RealDiv", "Atan2", "Complex"}, &factory,
&transposers);
CheckSameTransposerForOps({"Concat", "ConcatV2"}, &factory, &transposers);
CheckSameTransposerForOps({"Pad", "PadV2", "MirrorPad", "MirrorPadGrad"},
&factory, &transposers);
CheckSameTransposerForOps({"ReverseV2"}, &factory, &transposers);
CheckSameTransposerForOps({"Tile"}, &factory, &transposers);
CheckSameTransposerForOps({"Shape"}, &factory, &transposers);
CheckSameTransposerForOps({"ShapeN"}, &factory, &transposers);
CheckSameTransposerForOps({"Fill"}, &factory, &transposers);
CheckSameTransposerForOps({"Slice"}, &factory, &transposers);
CheckSameTransposerForOps({"Split"}, &factory, &transposers);
CheckSameTransposerForOps({"SplitV"}, &factory, &transposers);
CheckSameTransposerForOps({"StridedSlice"}, &factory, &transposers);
CheckSameTransposerForOps({"Sum", "Mean", "Prod", "Max", "Min", "All", "Any"},
&factory, &transposers);
NodeDef node_unknown;
node_unknown.set_op("UnknownOp");
std::shared_ptr<Transposer> transposer_unknown =
factory.GetTransposer(node_unknown);
EXPECT_TRUE(transposer_unknown == nullptr);
}
TEST(TransposerFactoryTest, ShouldUseAllOpTransposer) {
TransposerFactory factory;
std::vector<OpDef> op_defs;
OpRegistry::Global()->GetRegisteredOps(&op_defs);
NodeDef node;
AttrValue value;
value.set_type(DataType::DT_DOUBLE);
node.mutable_attr()->insert({"T", value});
for (const OpDef& op_def : op_defs) {
node.set_op(op_def.name());
std::shared_ptr<Transposer> transposer = factory.GetTransposer(node);
if (transposer != nullptr) {
EXPECT_TRUE(IsLayoutSensitiveOp(node) || IsLayoutAgnosticOp(node))
<< "Transposer for op \"" << node.op()
<< "\" is created but not used. Add it to IsLayourSensitiveOp or "
"IslayoutAgnosticOp.";
}
}
}
}
}
} |
1,385 | cpp | tensorflow/tensorflow | common_subgraph_elimination | tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc | tensorflow/core/grappler/optimizers/common_subgraph_elimination_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_COMMON_SUBGRAPH_ELIMINATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_COMMON_SUBGRAPH_ELIMINATION_H_
#include <unordered_set>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class Cluster;
struct GrapplerItem;
class CommonSubgraphElimination : public GraphOptimizer {
public:
CommonSubgraphElimination() {}
explicit CommonSubgraphElimination(RewriterConfig::Toggle opt_level)
: opt_level_(opt_level) {}
~CommonSubgraphElimination() override {}
string name() const override { return "common_subgraph_elimination"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
friend class CommonSubgraphEliminationTest;
bool CanDedup(const NodeDef& node) const;
Status DedupComputations(GraphDef* optimized_graph);
RewriterConfig::Toggle opt_level_;
bool fetch_nodes_known_ = false;
std::unordered_set<string> nodes_to_preserve_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
class Cluster;
}
}
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
class UniqueNodes {
public:
NodeDef* FindOrAddRepresentative(NodeDef* node) {
uint64 sig = ComputeSignature(*node);
std::vector<NodeDef*>& candidates = rep_[sig];
for (auto& candidate : candidates) {
if ((candidate == node) || SameNode(*candidate, *node)) {
return candidate;
}
}
candidates.push_back(node);
return node;
}
void RemoveRepresentative(NodeDef* node) {
auto it = memoized_signatures_.find(node);
if (it == memoized_signatures_.end()) return;
std::vector<NodeDef*>& candidates = rep_[it->second];
for (int i = 0, end = candidates.size(); i < end; ++i) {
if (candidates[i] == node) {
std::swap(candidates[i], candidates[candidates.size() - 1]);
candidates.resize(candidates.size() - 1);
break;
}
}
memoized_signatures_.erase(node);
}
private:
uint64 ComputeSignature(const NodeDef& node);
bool SameNode(const NodeDef& node1, const NodeDef& node2) const;
absl::flat_hash_map<uint64, std::vector<NodeDef*>> rep_;
absl::flat_hash_map<const NodeDef*, uint64> memoized_signatures_;
};
uint64 UniqueNodes::ComputeSignature(const NodeDef& node) {
auto it = memoized_signatures_.find(&node);
if (it != memoized_signatures_.end()) return it->second;
uint64 h = Hash64(node.op());
h = Hash64Combine(Hash64(node.device()), h);
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
uint64 input_hash = Hash64Combine(
Hash64(input_tensor.node().data(), input_tensor.node().size()),
std::hash<int>()(input_tensor.index()));
h = Hash64CombineUnordered(input_hash, h);
}
for (const auto& attr : node.attr()) {
uint64 attr_hash =
Hash64Combine(Hash64(attr.first), FastAttrValueHash(attr.second));
h = Hash64CombineUnordered(attr_hash, h);
}
memoized_signatures_.emplace(&node, h);
return h;
}
bool UniqueNodes::SameNode(const NodeDef& node1, const NodeDef& node2) const {
if (node1.op() != node2.op()) {
return false;
}
if (node1.device() != node2.device()) {
return false;
}
if (node1.input_size() != node2.input_size()) {
return false;
}
if (node1.attr_size() != node2.attr_size()) {
return false;
}
auto it1 = node1.input().begin();
auto it2 = node2.input().begin();
for (; it1 != node1.input().end(); ++it1, ++it2) {
if (*it1 != *it2) return false;
}
for (const auto& attr1 : node1.attr()) {
auto it = node2.attr().find(attr1.first);
if (it == node2.attr().end()) return false;
if (!AreAttrValuesEqual(attr1.second, it->second,
true)) {
return false;
}
}
return true;
}
bool CommonSubgraphElimination::CanDedup(const NodeDef& node) const {
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (IsEnter(node) || IsExit(node)) {
return false;
}
if (node.device().find("SPU") != string::npos) {
return false;
}
if (IsAssert(node) || IsPrint(node)) {
return true;
}
return IsFreeOfSideEffect(node);
}
Status CommonSubgraphElimination::DedupComputations(GraphDef* optimized_graph) {
CanonicalizeGraph(optimized_graph);
GraphTopologyView graph_view;
if (!graph_view.InitializeFromGraph(*optimized_graph).ok()) {
LOG(WARNING) << "Failed to initialize GraphTopologyView.";
return absl::OkStatus();
}
absl::flat_hash_set<const NodeDef*> feeds_inplace_op;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& root = optimized_graph->node(i);
if (feeds_inplace_op.find(&root) != feeds_inplace_op.end()) continue;
if (ModifiesInputsInPlace(root)) {
const auto is_continue_traversal = [&](const NodeDef* node) -> bool {
return node->op() == root.op() || !NeverForwardsInputs(*node);
};
DfsTraversal(graph_view, {&root}, TraversalDirection::kFollowInputs,
DfsPredicates::Advance(is_continue_traversal),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
feeds_inplace_op.insert(node);
}));
}
}
std::vector<bool> can_dedup(optimized_graph->node_size());
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& node = optimized_graph->node(i);
can_dedup[i] = (feeds_inplace_op.find(&node) == feeds_inplace_op.end()) &&
CanDedup(node);
}
bool stop = true;
std::set<int> duplicates;
UniqueNodes nodes;
NodeMap node_map(optimized_graph);
do {
stop = true;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
if (!can_dedup[i] || duplicates.find(i) != duplicates.end()) {
continue;
}
NodeDef* node = optimized_graph->mutable_node(i);
NodeDef* rep = nodes.FindOrAddRepresentative(node);
if (rep == node) {
continue;
}
const auto fanouts = node_map.GetOutputs(node->name());
for (NodeDef* fanout : fanouts) {
bool updated_fanout = false;
for (int i = 0; i < fanout->input_size(); ++i) {
string* fanout_input = fanout->mutable_input(i);
const int position =
NodePositionIfSameNode(*fanout_input, node->name());
if (position < -1) {
continue;
} else {
if (!updated_fanout) {
nodes.RemoveRepresentative(fanout);
}
updated_fanout = true;
if (position > 0) {
*fanout_input = StrCat(rep->name(), ":", position);
} else if (position == 0) {
*fanout_input = rep->name();
} else {
*fanout_input = StrCat("^", rep->name());
}
}
}
if (updated_fanout) {
node_map.UpdateInput(fanout->name(), node->name(), rep->name());
CanonicalizeNode(fanout);
}
}
if (fetch_nodes_known_) {
node->Clear();
}
duplicates.insert(i);
stop = false;
}
} while (!stop);
if (fetch_nodes_known_ && !duplicates.empty()) {
EraseNodesFromGraph(duplicates, optimized_graph);
}
return absl::OkStatus();
}
Status CommonSubgraphElimination::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
*optimized_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
return DedupComputations(optimized_graph);
}
}
} | #include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer_test_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
}
class CommonSubgraphEliminationTest : public ArithmeticOptimizerTest {};
TEST_F(CommonSubgraphEliminationTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
CommonSubgraphElimination optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {3.14, 2.7}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.14, 2.7}, {1, 2});
Output div = ops::Div(s.WithOpName("div"), c1, c2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 2);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 2);
EXPECT_EQ(new_div->input(0), "c1");
EXPECT_EQ(new_div->input(1), "c1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDeduppingAssertAndCheckNumerics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_BOOL, ops::Placeholder::Shape({}));
Output c = ops::Const(s.WithOpName("c"), {3.14, 2.7}, {1, 2});
auto check1 = ops::CheckNumerics(s.WithOpName("check1"), c, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("check2"), c, "foo");
auto assert1 = ops::Assert(s.WithOpName("assert1"), p, {c});
auto assert2 = ops::Assert(s.WithOpName("assert2"), p, {c});
Output div = ops::Div(s.WithOpName("div").WithControlDependencies(
{assert1.operation, assert2.operation}),
check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
Tensor bool_t(DT_BOOL, TensorShape({}));
bool_t.scalar<bool>().setConstant(true);
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}});
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 6);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 3);
EXPECT_EQ(new_div->input(0), "check1");
EXPECT_EQ(new_div->input(1), "check2");
EXPECT_EQ(new_div->input(2), "^assert1");
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}});
EXPECT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupCommutative) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {1.0f, 2.0f}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.0f, 4.0f}, {1, 2});
Output mul1 = ops::Mul(s.WithOpName("mul1"), c1, c2);
Output mul2 = ops::Mul(s.WithOpName("mul2"), c2, c1);
Output div1 = ops::Div(s.WithOpName("div1"), mul1, mul2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div1"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 4);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_c2 = node_map.GetNode("c2");
ASSERT_NE(new_c2, nullptr);
const NodeDef* new_mul1 = node_map.GetNode("mul1");
ASSERT_NE(new_mul1, nullptr);
ASSERT_EQ(new_mul1->input_size(), 2);
EXPECT_EQ(new_mul1->input(0), "c1");
EXPECT_EQ(new_mul1->input(1), "c2");
const NodeDef* new_div1 = node_map.GetNode("div1");
ASSERT_NE(new_div1, nullptr);
ASSERT_EQ(new_div1->input_size(), 2);
EXPECT_EQ(new_div1->input(0), "mul1");
EXPECT_EQ(new_div1->input(1), "mul1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
} |
1,386 | cpp | tensorflow/tensorflow | shape_optimizer | tensorflow/core/grappler/optimizers/shape_optimizer.cc | tensorflow/core/grappler/optimizers/shape_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_SHAPE_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_SHAPE_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class ShapeOptimizer : public GraphOptimizer {
public:
ShapeOptimizer() {}
explicit ShapeOptimizer(RewriterConfig::Toggle opt_level) {}
~ShapeOptimizer() override {}
string name() const override { return "shape_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
Status ShapeOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
bool can_optimize = false;
bool has_div = false;
bool has_size = false;
bool has_shape = false;
bool has_prod = false;
auto is_int = [](const NodeDef& node) -> bool {
return node.attr().at("T").type() == DT_INT32 ||
node.attr().at("T").type() == DT_INT64;
};
for (const NodeDef& node : item.graph.node()) {
if (IsShape(node)) {
has_shape = true;
} else if (IsProd(node) && is_int(node)) {
has_prod = true;
} else if (IsDiv(node) && is_int(node)) {
has_div = true;
} else if (IsSize(node)) {
has_size = true;
}
if ((has_shape && has_prod) || (has_div && has_size)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return absl::AbortedError("Nothing to do.");
}
*optimized_graph = item.graph;
GraphProperties properties(item);
bool inferred_properties = false;
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (!IsShape(node)) {
continue;
}
for (MutableGraphView::InputPort fanout :
graph.GetFanout(MutableGraphView::OutputPort(&node, 0))) {
if (fanout.node->op() != "Prod") {
continue;
}
if (fanout.node->attr().count("keep_dims") != 0 &&
fanout.node->attr().at("keep_dims").b()) {
continue;
}
const MutableGraphView::OutputPort reduce_indices =
graph.GetRegularFanin(MutableGraphView::InputPort(fanout.node, 1));
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop =
properties.GetOutputProperties(reduce_indices.node->name());
const int prop_size = prop.size();
if (prop_size <= reduce_indices.port_id) {
continue;
}
const TensorShapeProto& reduction_indices_shape =
prop[reduce_indices.port_id].shape();
if (NumCoefficients(reduction_indices_shape) == 1) {
const auto& input_props = properties.GetInputProperties(node.name());
if (input_props.size() != 1) {
continue;
}
NodeDef size_node(*fanout.node);
const DataType type = input_props[0].dtype();
size_node.set_op("Size");
size_node.set_input(0, node.input(0));
size_node.set_input(1, AsControlDependency(node));
size_node.mutable_attr()->erase("Tidx");
size_node.mutable_attr()->erase("keep_dims");
(*size_node.mutable_attr())["out_type"] = fanout.node->attr().at("T");
(*size_node.mutable_attr())["T"].set_type(type);
size_node.set_device(node.device());
Status s = IsKernelRegisteredForNode(size_node);
if (!s.ok()) {
continue;
}
fanout.node->Swap(&size_node);
}
}
}
}
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (node.op() == "Div") {
const MutableGraphView::OutputPort input1 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 0));
const MutableGraphView::OutputPort input2 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 1));
if (input1.node == nullptr || input2.node == nullptr) continue;
if (!IsSize(*input1.node) || !IsSize(*input2.node)) {
continue;
}
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop1 = properties.GetInputProperties(input1.node->name());
const auto& prop2 = properties.GetInputProperties(input2.node->name());
if (prop1.size() != 1 || prop2.size() != 1) {
continue;
}
const TensorShapeProto& shape1 = prop1[0].shape();
const TensorShapeProto& shape2 = prop2[0].shape();
int64_t result = ComputeSizeRatio(shape1, shape2);
if (result >= 0) {
node.set_op("Const");
DataType dtype = node.attr().at("T").type();
node.mutable_attr()->erase("T");
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorProto* t = (*node.mutable_attr())["value"].mutable_tensor();
t->set_dtype(dtype);
*t->mutable_tensor_shape() = TensorShapeProto();
if (dtype == DT_INT32) {
t->add_int_val(result);
} else {
t->add_int64_val(result);
}
node.set_input(0, AsControlDependency(node.input(0)));
node.set_input(1, AsControlDependency(node.input(1)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class ShapeOptimizerTest : public GrapplerTest {};
TEST_F(ShapeOptimizerTest, OptimizeShapeProduct) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithOpName("e"), c, d, attrs.KeepDims(false));
Output f = ops::ReduceProd(s.WithOpName("f"), c, d, attrs.KeepDims(true));
GrapplerItem item;
item.fetch = {"e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
} else if (node.name() == "f") {
found++;
EXPECT_EQ("Prod", node.op());
EXPECT_EQ("c", node.input(0));
}
}
EXPECT_EQ(2, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
EXPECT_NEAR(tensors_expected[1].scalar<int>()(),
tensors_actual[1].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeProductMissingKernel) {
{
std::vector<std::unique_ptr<Device>> devices;
SessionOptions session_options;
session_options.config.mutable_gpu_options()
->set_per_process_gpu_memory_fraction(0.1);
session_options.env = Env::Default();
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(session_options, "", &devices));
bool found_gpu = false;
for (const auto& d : devices) {
if (d->device_type() == DEVICE_GPU) {
found_gpu = true;
break;
}
}
if (!found_gpu) {
LOG(INFO) << "Skipping test that requires GPU.";
return;
}
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), string("Hello"), {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithDevice("/gpu:0").WithOpName("e"), c, d,
attrs.KeepDims(false));
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("/cpu:0", node.device());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeRatio) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 32});
Output b = ops::Const(s.WithOpName("b"), 3.14f, {32, 16});
Output c = ops::Size(s.WithOpName("c"), a);
Output d = ops::Size(s.WithOpName("d"), b);
Output e = ops::Div(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Const", node.op());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
}
}
} |
1,387 | cpp | tensorflow/tensorflow | generic_layout_optimizer | tensorflow/core/grappler/optimizers/generic_layout_optimizer.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_H_
#include <string>
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class GenericLayoutOptimizer : public GraphOptimizer {
public:
explicit GenericLayoutOptimizer(string enforced_layout = "")
: GenericLayoutOptimizer(RewriterConfig::DEFAULT,
RewriterConfig::NO_CONVERSION_ON_CPU,
enforced_layout) {}
explicit GenericLayoutOptimizer(RewriterConfig::Toggle opt_level,
string enforced_layout = "")
: GenericLayoutOptimizer(opt_level, RewriterConfig::NO_CONVERSION_ON_CPU,
enforced_layout) {}
explicit GenericLayoutOptimizer(RewriterConfig::Toggle opt_level,
RewriterConfig::CpuLayout layout_conversion,
string enforced_layout = "")
: opt_level_(opt_level),
cpu_layout_conversion_(layout_conversion),
enforced_layout_(enforced_layout) {}
~GenericLayoutOptimizer() override = default;
string name() const override { return "layout"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
private:
RewriterConfig::Toggle opt_level_;
RewriterConfig::CpuLayout cpu_layout_conversion_;
const string enforced_layout_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include <utility>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kNHWC[] = "NHWC";
constexpr char kNCHW[] = "NCHW";
constexpr float kGPURatioThreshold = 0.5;
constexpr float kConvGPUExpectedDtypeThreshold = 0.5;
struct MutableNodeViewFormatter {
void operator()(std::string* out, utils::MutableNodeView* node_view) const {
absl::StrAppend(out, node_view->node()->name());
}
};
struct GpuStats {
int num_gpus;
int num_voltas;
int num_amperes;
};
inline GpuStats GetNumGPUs(const Cluster& cluster) {
auto devices = cluster.GetDevices();
GpuStats gpu_stats{};
for (const auto& device : devices) {
if (device.second.type() != kGPU) {
continue;
}
gpu_stats.num_gpus++;
auto compute_capability_it =
device.second.environment().find("architecture");
if (compute_capability_it == device.second.environment().end()) {
continue;
}
double compute_capability = 0.0;
if (absl::SimpleAtod(compute_capability_it->second, &compute_capability)) {
if (compute_capability >= 7.0) gpu_stats.num_voltas++;
if (compute_capability >= 8.0) gpu_stats.num_amperes++;
}
}
return gpu_stats;
}
inline bool ConvBackpropExists(const TransposeContext& context,
absl::string_view device,
const DataType& data_type) {
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2DBackpropFilter(*node_def) &&
!IsConv2DBackpropInput(*node_def) &&
!IsConv3DBackpropFilterV2(*node_def) &&
!IsConv3DBackpropInputV2(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(device))) {
continue;
}
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
if (t_attr->type() == data_type) {
return true;
}
}
return false;
}
inline std::pair<string, string> GetSrcAndDstDataFormats(
const TransposeContext& context, GpuStats gpu_stats) {
string src_format = kNHWC;
string dst_format = kNCHW;
const bool is_NHWC_enforced =
(!context.enforced_layout.empty() && context.enforced_layout == "NHWC");
const bool volta_ready =
(static_cast<float>(gpu_stats.num_voltas) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
const bool ampere_ready =
(static_cast<float>(gpu_stats.num_amperes) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
int num_conv_gpu = 0;
int num_conv_gpu_prefer_swap = 0;
bool fp32_backprop = ConvBackpropExists(context, kGPU, DT_FLOAT);
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2D(*node_def) && !IsConv3D(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(kGPU))) {
continue;
}
num_conv_gpu++;
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
const DataType dtype = t_attr->type();
if ((volta_ready && dtype == DT_HALF) ||
(ampere_ready && dtype == DT_BFLOAT16) ||
(ampere_ready && dtype == DT_FLOAT &&
tsl::tensor_float_32_execution_enabled() && !fp32_backprop)) {
num_conv_gpu_prefer_swap++;
}
}
const bool should_swap =
num_conv_gpu > 0 &&
(static_cast<float>(num_conv_gpu_prefer_swap) /
static_cast<float>(num_conv_gpu)) >= kConvGPUExpectedDtypeThreshold;
if (is_NHWC_enforced || (context.enforced_layout.empty() && should_swap)) {
std::swap(src_format, dst_format);
}
VLOG(2) << "Layout conversion of " << src_format << " to " << dst_format
<< " will take place.";
return {src_format, dst_format};
}
Status ExpandLayoutSensitiveOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutSensitiveOp(*node_def)) {
std::shared_ptr<Transposer> transposer =
transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout sensitive operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
Status ExpandLayoutAgnosticOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutAgnosticOp(*node_def)) {
const auto& transposer = transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout agnostic operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
inline bool IsCancellableConstPermTransposeNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
Tensor fanout_tensor;
if (!GetValueAttrFromConstInputNode(fanout_transpose, IsTranspose, 1,
&fanout_tensor)) {
return false;
}
Tensor fanin_tensor;
if (!GetValueAttrFromConstInputNode(fanin_transpose, IsTranspose, 1,
&fanin_tensor)) {
return false;
}
if (fanout_tensor.NumElements() != fanin_tensor.NumElements()) {
return false;
}
const auto& fanout_tensor_data = fanout_tensor.unaligned_flat<int32>();
const auto& fanin_tensor_data = fanin_tensor.unaligned_flat<int32>();
const int num_elements = fanout_tensor.NumElements();
for (int i = 0; i < num_elements; ++i) {
if (fanout_tensor_data(fanin_tensor_data(i)) != i) {
return false;
}
}
return true;
}
inline bool IsCancellableDataFormatNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
if (!IsDataFormatOp(fanout_transpose) || !IsDataFormatOp(fanin_transpose)) {
return false;
}
auto src_dst_match = [](const utils::MutableNodeView& src,
const utils::MutableNodeView& dst) {
const auto* src_format = src.GetAttr(kAttrSrcFormat);
if (src_format == nullptr) {
return false;
}
const auto* dst_format = dst.GetAttr(kAttrDstFormat);
if (dst_format == nullptr) {
return false;
}
return src_format->s() == dst_format->s();
};
return src_dst_match(fanin_transpose, fanout_transpose) &&
src_dst_match(fanout_transpose, fanin_transpose);
}
inline bool IsCancellableNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
return IsCancellableConstPermTransposeNodePair(fanout_transpose,
fanin_transpose) ||
IsCancellableDataFormatNodePair(fanout_transpose, fanin_transpose);
}
Status EraseCancellableNodes(TransposeContext* context) {
const int original_num_nodes = context->num_nodes;
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = original_num_nodes; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (node->NumRegularFanins() < 1) {
continue;
}
const auto& regular_fanin_0 = node->GetRegularFanin(0);
auto* fanin_node = regular_fanin_0.node_view();
if (fanin_node->node_index() < original_num_nodes) {
continue;
}
if (!IsCancellableNodePair(*node, *fanin_node)) {
continue;
}
const auto& fanin_to_forward = fanin_node->GetRegularFanin(0);
TensorId fanin_id_to_forward(fanin_to_forward.node_view()->GetName(),
fanin_to_forward.index());
for (const auto& regular_fanout : node->GetRegularFanout(0)) {
mutation->AddOrUpdateRegularFanin(regular_fanout.node_view(),
regular_fanout.index(),
fanin_id_to_forward);
}
mutation->RemoveNode(node);
if (node->NumRegularFanins() > 1) {
mutation->RemoveNode(node->GetRegularFanin(1).node_view());
}
mutation->RemoveNode(fanin_node);
if (fanin_node->NumRegularFanins() > 1) {
mutation->RemoveNode(fanin_node->GetRegularFanin(1).node_view());
}
}
return mutation->Apply();
}
Status EraseCancellableNodesAroundPad(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
absl::flat_hash_set<utils::MutableNodeView*> cancelled_transposes;
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* transpose_after = graph_view->GetNode(i);
if (!IsTranspose(*transpose_after->node())) continue;
if (cancelled_transposes.contains(transpose_after)) continue;
const auto& transpose_after_fanin = transpose_after->GetRegularFanin(0);
auto* pad = transpose_after_fanin.node_view();
if (!IsPad(*pad->node())) continue;
const auto& pad_fanin_0 = pad->GetRegularFanin(0);
auto* transpose_before = pad_fanin_0.node_view();
if (!IsTranspose(*transpose_before->node())) continue;
if (transpose_before->NumRegularFanouts() != 1) continue;
if (!IsCancellableConstPermTransposeNodePair(*transpose_after,
*transpose_before))
continue;
Tensor paddings_t;
if (!GetValueAttrFromConstInputNode(*pad, IsPad, 1, &paddings_t)) continue;
const auto& pad_fanin_1 = pad->GetRegularFanin(1);
auto* paddings = pad_fanin_1.node_view();
if (paddings->NumRegularFanouts() != 1) continue;
Tensor permute_t;
if (!GetValueAttrFromConstInputNode(*transpose_after, IsTranspose, 1,
&permute_t))
continue;
std::vector<utils::MutableNodeView*> pad_fanout_transposes;
pad_fanout_transposes.emplace_back(transpose_after);
bool pad_has_unsupported_fanout = false;
for (auto& fanout : pad->GetRegularFanout(0)) {
auto* extra_transpose = fanout.node_view();
if (extra_transpose == transpose_after) continue;
Tensor extra_permute_t;
if (!GetValueAttrFromConstInputNode(*extra_transpose, IsTranspose, 1,
&extra_permute_t) ||
extra_permute_t.tensor_data() != permute_t.tensor_data()) {
pad_has_unsupported_fanout = true;
break;
}
pad_fanout_transposes.emplace_back(extra_transpose);
}
if (pad_has_unsupported_fanout) continue;
VLOG(0) << "Cancel Transpose nodes around Pad:"
<< " transpose_before=" << transpose_before->node()->name()
<< " pad=" << pad->node()->name() << " transpose_after="
<< absl::StrJoin(pad_fanout_transposes, ",",
MutableNodeViewFormatter());
auto permutation_s = absl::Span<int32>(permute_t.flat<int32>().data(),
permute_t.NumElements());
auto paddings_s = absl::Span<int32>(paddings_t.flat<int32>().data(),
paddings_t.NumElements());
TF_RETURN_IF_ERROR(
PermuteDouble(absl::StrCat("paddings in ", pad->GetName()),
permutation_s, &paddings_s));
AttrValue permuted_paddings_tensor;
paddings_t.AsProtoTensorContent(permuted_paddings_tensor.mutable_tensor());
mutation->AddOrUpdateNodeAttr(paddings, "value", permuted_paddings_tensor);
const auto transpose_to_identity =
[&cancelled_transposes,
&mutation](utils::MutableNodeView* transpose) -> void {
mutation->UpdateNodeOp(transpose, "Identity");
mutation->RemoveNodeAttr(transpose, "Tperm");
mutation->RemoveRegularFanin(transpose, 1);
cancelled_transposes.insert(transpose);
};
transpose_to_identity(transpose_before);
absl::c_for_each(pad_fanout_transposes, transpose_to_identity);
}
return mutation->Apply();
}
Status EraseOutputShapeAttrs(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (IsArg(*node->node())) {
continue;
}
mutation->RemoveNodeAttr(node, kAttrOutputShape);
TF_RETURN_IF_ERROR(mutation->Apply());
}
return absl::OkStatus();
}
}
Status GenericLayoutOptimizer::Optimize(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output) {
if (cluster == nullptr) {
LOG(WARNING)
<< "generic layout optimizer was called with cluster == nullptr";
return errors::Aborted("cluster == nullptr.");
}
if (!enforced_layout_.empty() && enforced_layout_ != "NHWC" &&
enforced_layout_ != "NCHW") {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Invalid value for enforced_layout: ", enforced_layout_,
". Supported layouts: 'NHWC', 'NCHW'."));
}
const auto gpu_stats = GetNumGPUs(*cluster);
const bool is_aggressive = opt_level_ == RewriterConfig::AGGRESSIVE;
TransposeContext context;
context.enforced_layout = enforced_layout_;
if (gpu_stats.num_gpus > 0) {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
const auto src_dst_formats = GetSrcAndDstDataFormats(context, gpu_stats);
context.AssignDeviceAndDataFormats(kGPU, src_dst_formats.first,
src_dst_formats.second);
} else {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
switch (cpu_layout_conversion_) {
case RewriterConfig::NCHW_TO_NHWC:
context.AssignDeviceAndDataFormats(kCPU, kNCHW, kNHWC);
break;
case RewriterConfig::NHWC_TO_NCHW:
return errors::Aborted(
"Conversion from NHWC to NCHW is currently not available for "
"CPU.");
default:
*output = item.graph;
VLOG(2) << "No layout conversion will take place for CPU.";
return absl::OkStatus();
}
}
TransposerFactory transposer_factory;
TF_RETURN_IF_ERROR(ExpandLayoutSensitiveOp(&context, &transposer_factory));
if (context.graph.node_size() > context.num_nodes || is_aggressive) {
TF_RETURN_IF_ERROR(ExpandLayoutAgnosticOp(&context, &transposer_factory));
TF_RETURN_IF_ERROR(EraseCancellableNodes(&context));
TF_RETURN_IF_ERROR(EraseCancellableNodesAroundPad(&context));
TF_RETURN_IF_ERROR(
context.graph_view->SortTopologically(false, {}));
}
TF_RETURN_IF_ERROR(EraseOutputShapeAttrs(&context));
*output = context.graph;
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
using ::tensorflow::Scope;
using ::tensorflow::ops::Conv2D;
using ::tensorflow::ops::Conv3D;
using ::tensorflow::ops::Identity;
using ::tensorflow::ops::RandomUniform;
constexpr int kBatchSize = 32;
constexpr int kWidth = 10;
constexpr int kHeight = 10;
constexpr int kDepthIn = 8;
constexpr int kKernel = 3;
constexpr int kDepthOut = 16;
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
#define DIMS(n, h, w, c) \
{ n, h, w, c }
#define SRC_DATA_FORMAT "NHWC"
#define DST_DATA_FORMAT "NCHW"
#define DEVICE "GPU"
#define REWRITER_CONFIG \
RewriterConfig::DEFAULT, RewriterConfig::NO_CONVERSION_ON_CPU
#define PERMUTATION_SRC_TO_DST \
{ 0, 3, 1, 2 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 2, 3, 1 }
#define DIMS_5D(n, d, h, w, c) \
{ n, d, h, w, c }
#define SRC_DATA_FORMAT_5D "NDHWC"
#define DST_DATA_FORMAT_5D "NCDHW"
#else
#define DIMS(n, h, w, c) \
{ n, c, h, w }
#define SRC_DATA_FORMAT "NCHW"
#define DST_DATA_FORMAT "NHWC"
#define DEVICE "CPU"
#define REWRITER_CONFIG RewriterConfig::DEFAULT, RewriterConfig::NCHW_TO_NHWC
#define PERMUTATION_SRC_TO_DST \
{ 0, 2, 3, 1 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 3, 1, 2 }
#define DIMS_5D(n, d, h, w, c) \
{ n, c, d, h, w }
#define SRC_DATA_FORMAT_5D "NCDHW"
#define DST_DATA_FORMAT_5D "NDHWC"
#endif
template <typename T = float>
Output SimpleConv2D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_shape(
DIMS(batch_size, input_height, input_width, input_depth));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv = ops::Conv2D(s->WithOpName("Conv2D").WithDevice(device), input,
filter, DIMS(1, stride, stride, 1), padding,
ops::Conv2D::Attrs().DataFormat(SRC_DATA_FORMAT));
return conv;
}
Output SimpleConv2DBackpropInput(tensorflow::Scope* s, int input_size,
int filter_size, const string& padding,
bool dilated, const int input_sizes_length) {
int batch_size = 128;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_sizes_shape({input_sizes_length});
Tensor input_data(DT_INT32, input_sizes_shape);
if (input_sizes_length == 4) {
test::FillValues<int>(
&input_data, DIMS(batch_size, input_height, input_width, input_depth));
} else {
test::FillValues<int>(&input_data, {input_height, input_width});
}
Output input_sizes =
ops::Const(s->WithOpName("InputSizes"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Output filter =
ops::Variable(s->WithOpName("Filter"), filter_shape, DT_FLOAT);
int output_height = input_height;
int output_width = input_width;
TensorShape output_shape(
DIMS(batch_size, output_height, output_width, filter_count));
Tensor output_data(DT_FLOAT, output_shape);
test::FillIota<float>(&output_data, 1.0f);
Output output =
ops::Const(s->WithOpName("Output"), Input::Initializer(output_data));
Output conv_backprop_input;
Output input_sizes_i =
ops::Identity(s->WithOpName("InputSizesIdentity"), input_sizes);
ops::Conv2DBackpropInput::Attrs attrs;
attrs = attrs.DataFormat(SRC_DATA_FORMAT);
if (dilated) {
attrs = attrs.Dilations(DIMS(1, 2, 2, 1));
}
conv_backprop_input = ops::Conv2DBackpropInput(
s->WithOpName("Conv2DBackpropInput"), input_sizes_i, filter, output,
DIMS(1, stride, stride, 1), padding, attrs);
return conv_backprop_input;
}
template <typename T = float>
Output SimpleConv3D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 4;
int input_channel = 3;
int filter_count = 6;
int stride = 1;
TensorShape input_shape(DIMS_5D(batch_size, input_depth, input_height,
input_width, input_channel));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, filter_size, input_channel, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv =
ops::Conv3D(s->WithOpName("Conv3D").WithDevice(device), input, filter,
DIMS_5D(1, stride, stride, stride, 1), padding,
ops::Conv3D::Attrs().DataFormat(SRC_DATA_FORMAT_5D));
return conv;
}
class GenericLayoutOptimizerTest : public GrapplerTest {
protected:
void SetUp() override {
bool gpu_available = GetNumAvailableGPUs() > 0;
if (gpu_available) {
virtual_cluster_ =
std::make_unique<SingleMachine>(10, 1, 1);
} else {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.mutable_environment()->insert({"architecture", "6"});
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device},
{ "/GPU:1",
gpu_device }}));
#else
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device}}));
#endif
}
TF_ASSERT_OK(virtual_cluster_->Provision());
}
void TearDown() override {
TF_ASSERT_OK(virtual_cluster_->Shutdown());
tsl::enable_tensor_float_32_execution(true);
}
std::unique_ptr<Cluster> virtual_cluster_;
};
void VerifyRegularFaninMatch(const utils::NodeView* node, int port,
absl::string_view fanin_name, int fanin_port) {
ASSERT_GE(node->NumRegularFanins(), port);
const auto& fanin = node->GetRegularFanin(port);
EXPECT_EQ(fanin.node_view()->GetName(), fanin_name);
EXPECT_EQ(fanin.index(), fanin_port);
}
void VerifyRegularFanoutMatch(const utils::NodeView* node, int port,
absl::string_view fanout_name, int fanout_port) {
bool found = false;
for (const auto& regular_fanout : node->GetRegularFanout(port)) {
if (regular_fanout.node_view()->GetName() == fanout_name &&
regular_fanout.index() == fanout_port) {
found = true;
}
}
EXPECT_TRUE(found);
}
void VerifyDataFormatAttributeMatch(const utils::NodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr("data_format");
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->s(), attr_value);
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv2DGraph) {
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope, 4, 2, "VALID", "");
auto identity = Identity(scope.WithOpName("Output"), conv2d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_node, 1, "Filter", 0);
VerifyDataFormatAttributeMatch(conv2d_node, SRC_DATA_FORMAT);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
}
TEST_F(GenericLayoutOptimizerTest, PreserveFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
auto i = ops::Identity(s.WithOpName("i"), conv);
GrapplerItem item;
item.fetch.push_back("Conv2D");
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, EmptyDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, GPUDevice) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tsl::enable_tensor_float_32_execution(false);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv =
SimpleConv2D(&s, 4, 2, "VALID", "/job:w/replica:0/task:0/device:GPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, "NCHW");
}
TEST_F(GenericLayoutOptimizerTest, CPUDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "/CPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv_node, "NHWC");
#else
VerifyDataFormatAttributeMatch(conv_node, DST_DATA_FORMAT);
#endif
}
TEST_F(GenericLayoutOptimizerTest, NoOptimizeIntegerConvolution) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D<int32>(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, Connectivity) {
Scope scope = Scope::NewRootScope();
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto i1 = ops::Identity(scope.WithOpName("i1"), conv);
auto i2 = ops::Identity(scope.WithOpName("i2"), i1);
auto i3 = ops::Identity(scope.WithOpName("i3"), i2);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
Status status;
utils::GraphView graph_view_original(&item.graph, &status);
const int i1_index = graph_view_original.GetNode("i1")->node_index();
const int i2_index = graph_view_original.GetNode("i2")->node_index();
item.graph.mutable_node()->SwapElements(i1_index, i2_index);
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* node_i2_output = graph_view.GetNode("i2");
ASSERT_NE(node_i2_output, nullptr);
ASSERT_EQ(node_i2_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(node_i2_output, 0, "i1", 0);
}
TEST_F(GenericLayoutOptimizerTest, Conv2DBackpropInputNonConstInputSizes) {
for (const int input_sizes_length : {2, 4}) {
Scope s = Scope::NewRootScope();
auto conv = SimpleConv2DBackpropInput(&s, 7, 2, "SAME", false,
input_sizes_length);
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_backprop_node = graph_view.GetNode("Conv2DBackpropInput");
ASSERT_NE(conv2d_backprop_node, nullptr);
ASSERT_EQ(conv2d_backprop_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(conv2d_backprop_node, 0, "InputSizesIdentity", 0);
}
}
TEST_F(GenericLayoutOptimizerTest, Conv2DDataFormatVecPermuteCollapse) {
tsl::enable_tensor_float_32_execution(false);
Scope scope =
Scope::NewRootScope().WithDevice(absl::StrCat("/device:", DEVICE, ":0"));
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto shape = ops::Shape(scope.WithOpName("shape"), conv);
auto value = ops::Const(scope.WithOpName("value"), 0, {});
auto fill = ops::Fill(scope.WithOpName("fill"), shape, value);
auto i = ops::Identity(scope.WithOpName("i"), fill);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(
conv2d_node, 0,
absl::StrCat("Conv2D-0-Transpose", SRC_DATA_FORMAT, "To", DST_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* shape_node = graph_view.GetNode("shape");
ASSERT_NE(shape_node, nullptr);
ASSERT_EQ(shape_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(shape_node, 0, conv2d_node->GetName(), 0);
auto* fill_node = graph_view.GetNode("fill");
ASSERT_NE(fill_node, nullptr);
ASSERT_EQ(fill_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(fill_node, 0, shape_node->GetName(), 0);
VerifyRegularFanoutMatch(
fill_node, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* graph_output = graph_view.GetNode("i");
ASSERT_NE(graph_output, nullptr);
ASSERT_EQ(graph_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(
graph_output, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
}
TEST_F(GenericLayoutOptimizerTest, DoNotPruneNonAddedCancellableTransposes) {
GrapplerItem item;
{
Scope scope = Scope::NewRootScope().WithDevice(
absl::StrCat("/device:", DEVICE, ":0"));
auto input = ops::RandomUniform(scope.WithOpName("input"),
DIMS(kBatchSize, kHeight, kWidth, kDepthIn),
DT_FLOAT);
auto input_in_transpose =
ops::Transpose(scope.WithOpName("input_in_transpose"), input,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto input_out_transpose = ops::Transpose(
scope.WithOpName("input_out_transpose"), input_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
Tensor bias_data(DT_FLOAT, TensorShape({kDepthIn}));
test::FillIota<float>(&bias_data, 1.0f);
auto bias_add = ops::BiasAdd(
scope.WithOpName("bias_add"), input_out_transpose, bias_data,
ops::BiasAdd::Attrs().DataFormat(SRC_DATA_FORMAT));
auto output_in_transpose =
ops::Transpose(scope.WithOpName("output_in_transpose"), bias_add,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto output_out_transpose = ops::Transpose(
scope.WithOpName("output_out_transpose"), output_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
auto output =
ops::Identity(scope.WithOpName("output"), output_out_transpose);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
}
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* input_node = graph_view.GetNode("input");
ASSERT_NE(input_node, nullptr);
auto* input_in_transpose_node = graph_view.GetNode("input_in_transpose");
ASSERT_NE(input_in_transpose_node, nullptr);
ASSERT_EQ(input_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_in_transpose_node, 0, input_node->GetName(), 0);
auto* input_out_transpose_node = graph_view.GetNode("input_out_transpose");
ASSERT_NE(input_out_transpose_node, nullptr);
ASSERT_EQ(input_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_out_transpose_node, 0,
input_in_transpose_node->GetName(), 0);
auto* bias_add_in_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-Transpose", SRC_DATA_FORMAT, "To",
DST_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_in_transpose_node, nullptr);
ASSERT_EQ(bias_add_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_in_transpose_node, 0,
input_out_transpose_node->GetName(), 0);
auto* bias_add_node = graph_view.GetNode("bias_add");
ASSERT_NE(bias_add_node, nullptr);
ASSERT_EQ(bias_add_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_node, 0,
bias_add_in_transpose_node->GetName(), 0);
auto* bias_add_out_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-0-Transpose", DST_DATA_FORMAT, "To",
SRC_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_out_transpose_node, nullptr);
ASSERT_EQ(bias_add_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_out_transpose_node, 0,
bias_add_node->GetName(), 0);
auto* output_in_transpose_node = graph_view.GetNode("output_in_transpose");
ASSERT_NE(output_in_transpose_node, nullptr);
ASSERT_EQ(output_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_in_transpose_node, 0,
bias_add_out_transpose_node->GetName(), 0);
auto* output_out_transpose_node = graph_view.GetNode("output_out_transpose");
ASSERT_NE(output_out_transpose_node, nullptr);
ASSERT_EQ(output_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_out_transpose_node, 0,
output_in_transpose_node->GetName(), 0);
auto* output_node = graph_view.GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_out_transpose_node->GetName(),
0);
}
TEST_F(GenericLayoutOptimizerTest, CancelTransposeAroundPad) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(
RewriterConfig::AGGRESSIVE,
RewriterConfig::NCHW_TO_NHWC );
const Tensor kPermuteNhwcToNchw = test::AsTensor<int32>({0, 3, 1, 2});
const Tensor kPermuteNchwToNhwc = test::AsTensor<int32>({0, 2, 3, 1});
const Tensor kPad = test::AsTensor<int32>({1, 2, 3, 4, 5, 6, 7, 8}, {4, 2});
GrapplerItem item;
item.graph = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {}, {{"dtype", DT_INT32}, {"value", kPad}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Transpose", {"x", "perm_nhwc_to_nchw"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("transpose_2", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
const Tensor kPermutedPaddings =
test::AsTensor<int32>({1, 2, 5, 6, 7, 8, 3, 4}, {4, 2});
GraphDef expected = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermutedPaddings}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
NDef("transpose_2", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
});
CompareGraphs(expected, output);
Tensor x = GenerateRandomTensor<DT_FLOAT>({2, 6, 6, 8});
item.fetch = {"transpose_1", "transpose_2"};
item.feed.emplace_back("x", x);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
ASSERT_EQ(tensors_expected.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(GenericLayoutOptimizerTest, PreserveInputShapes) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(RewriterConfig::AGGRESSIVE);
AttrValue output_shapes;
auto* shape = output_shapes.mutable_list()->add_shape();
shape->add_dim()->set_size(-1);
GrapplerItem item;
item.graph = test::function::GDef({NDef(
"x", "_Arg", {},
{{"T", DT_FLOAT}, {"index", 0}, {"_output_shapes", output_shapes}})});
item.feed.emplace_back("x", Tensor(DT_FLOAT));
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* arg = graph_view.GetNode("x");
ASSERT_NE(arg, nullptr);
EXPECT_TRUE(arg->HasAttr("_output_shapes"));
EXPECT_EQ(arg->GetAttr("_output_shapes")->DebugString(),
output_shapes.DebugString());
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv3DGraph_CPU) {
Scope scope = Scope::NewRootScope();
auto conv3d = SimpleConv3D(&scope, 32, 1, "VALID", "/CPU:0");
auto identity = Identity(scope.WithOpName("Output"), conv3d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv3d_node = graph_view.GetNode("Conv3D");
ASSERT_NE(conv3d_node, nullptr);
ASSERT_EQ(conv3d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv3d_node, 1, "Filter", 0);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv3d_node, SRC_DATA_FORMAT_5D);
#else
auto* input_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-Transpose", SRC_DATA_FORMAT_5D, "To",
DST_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "Input", 0);
VerifyRegularFaninMatch(conv3d_node, 0, input_transpose_node->GetName(), 0);
VerifyDataFormatAttributeMatch(conv3d_node, DST_DATA_FORMAT_5D);
auto* output_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-0-Transpose", DST_DATA_FORMAT_5D, "To",
SRC_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, conv3d_node->GetName(), 0);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
#endif
}
}
} |
1,388 | cpp | tensorflow/tensorflow | function_optimizer | tensorflow/core/grappler/optimizers/function_optimizer.cc | tensorflow/core/grappler/optimizers/function_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_FUNCTION_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_FUNCTION_OPTIMIZER_H_
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
class FunctionOptimizer : public GraphOptimizer {
public:
explicit FunctionOptimizer(RewriterConfig::Toggle opt_level,
bool lower_control_flow)
: opt_level_(opt_level), lower_control_flow_(lower_control_flow) {}
~FunctionOptimizer() override = default;
string name() const override { return "function_optimizer"; };
bool UsesFunctionLibrary() const override { return true; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
friend class FunctionOptimizerTest;
Status RunFunctionOptimizerPass(const GrapplerItem& item,
GraphDef* optimized_graph) const;
RewriterConfig::Toggle opt_level_;
bool lower_control_flow_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr const char* const kFuncAttr = FunctionLibraryDefinition::kFuncAttr;
constexpr const char* const kNoSpecializeAttr = "_nospecialize";
constexpr const char* const kGrapplerSpecializedFuncAttr =
"_GrapplerSpecializedFunc";
bool IsDirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
return func_node.op() == func.signature().name();
}
bool IsIndirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
if (!IsPartitionedCall(func_node) && !IsStatefulPartitionedCall(func_node)) {
return false;
}
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return func_attr != nullptr && func_attr->has_func() &&
func_attr->func().name() == func.signature().name();
}
AttrSlice FunctionInstantiationAttributes(const FunctionDef& func,
const NodeDef& func_node) {
if (IsDirectFunctionCall(func, func_node)) {
return AttrSlice(func_node);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return AttrSlice(&func_attr->func().attr());
} else {
LOG(WARNING) << "Can't resolve function instantiation attributes: "
<< SummarizeNodeDef(func_node);
return AttrSlice();
}
}
class FakeDevice : public Device {
public:
FakeDevice(Env* env, const string& device) : Device(env, attr(device)) {}
explicit FakeDevice(const string& device) : FakeDevice(nullptr, device) {}
Status Sync() override { return absl::OkStatus(); }
private:
static DeviceAttributes attr(const string& device) {
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(device, &parsed_name);
DCHECK(parsed) << "Failed to parse full device name: " << device;
DeviceAttributes attr;
attr.set_name(device);
attr.set_device_type(parsed_name.type);
return attr;
}
};
bool MarkedNoSpecialize(const FunctionDef& fdef) {
const auto attr = AttrSlice(&fdef.attr());
bool nospecialize = false;
return TryGetNodeAttr(attr, kNoSpecializeAttr, &nospecialize) && nospecialize;
}
struct FunctionSpecializationSignature {
using InputPort = int;
using OutputPort = int;
string func_name;
bool is_in_fetch_set;
absl::flat_hash_set<OutputPort> active_outputs;
absl::flat_hash_map<string, DataType> type_parameters;
absl::flat_hash_map<string, AttrValue> body_parameters;
absl::flat_hash_map<InputPort, string> const_inputs;
bool operator==(const FunctionSpecializationSignature& other) const {
bool equals = func_name == other.func_name &&
is_in_fetch_set == other.is_in_fetch_set &&
active_outputs == other.active_outputs &&
type_parameters == other.type_parameters &&
const_inputs == other.const_inputs;
if (!equals) return false;
if (body_parameters.size() != other.body_parameters.size()) return false;
for (const auto& lhs : body_parameters) {
auto it = other.body_parameters.find(lhs.first);
if (it == other.body_parameters.end()) return false;
if (!AreAttrValuesEqual(lhs.second, (*it).second,
true)) {
return false;
}
}
return true;
}
template <typename H>
friend H AbslHashValue(H h, const FunctionSpecializationSignature& s) {
H base = H::combine(std::move(h), s.func_name, s.is_in_fetch_set);
std::vector<uint64> hashes;
hashes.reserve(s.active_outputs.size()
+ s.type_parameters.size() * 2
+ s.body_parameters.size() * 2
+ s.const_inputs.size() * 2);
absl::c_transform(s.active_outputs, std::back_inserter(hashes),
hash<OutputPort>());
using TypeParam = std::pair<const string, DataType>;
absl::c_for_each(s.type_parameters, [&hashes](const TypeParam& type_param) {
AttrValue attr_value;
attr_value.set_type(type_param.second);
hashes.push_back(Hash64(type_param.first));
hashes.push_back(AttrValueHash(attr_value));
});
using BodyParam = std::pair<const string, AttrValue>;
absl::c_for_each(s.body_parameters, [&hashes](const BodyParam& body_param) {
hashes.push_back(Hash64(body_param.first));
hashes.push_back(FastAttrValueHash(body_param.second));
});
using ConstInput = std::pair<const InputPort, string>;
absl::c_for_each(s.const_inputs, [&hashes](const ConstInput& const_input) {
hashes.push_back(hash<InputPort>()(const_input.first));
hashes.push_back(Hash64(const_input.second));
});
absl::c_sort(hashes);
return H::combine_contiguous(std::move(base), hashes.data(), hashes.size());
}
};
struct FunctionSpecialization {
string specialized_func_name;
bool is_in_fetch_set;
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
absl::flat_hash_set<int> active_outputs;
std::vector<std::pair<int, int>> output_mapping;
};
class FunctionOptimizerContext {
public:
explicit FunctionOptimizerContext(const GrapplerItem& item,
RewriterConfig::Toggle opt_level,
const GraphDef& graph)
: item_(&item),
opt_level_(opt_level),
function_library_(OpRegistry::Global(), graph.library()),
truly_const_nodes_(InferTrulyConstNodes(item, graph)),
graph_view_(&graph) {}
const GrapplerItem& item() const { return *item_; }
const int graph_version() const { return item_->graph.versions().producer(); }
RewriterConfig::Toggle opt_level() const { return opt_level_; }
const FunctionLibraryDefinition& function_library() const {
return function_library_;
}
FunctionLibraryDefinition& function_library() { return function_library_; }
const absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>&
tensor_mapping() const {
return tensor_mapping_;
}
const GraphView& graph_view() const { return graph_view_; }
bool IsFeedNode(const string& node_name) const {
return absl::c_any_of(
item_->feed, [&](const std::pair<std::string, Tensor>& feed) {
return ParseTensorName(feed.first).node() == node_name;
});
}
bool IsFetchNode(const string& node_name) const {
return absl::c_any_of(item_->fetch, [&](const string& fetch) {
return ParseTensorName(fetch).node() == node_name;
});
}
bool IsTrulyConst(const string& name) const {
return TrulyConstNode(name) != nullptr;
}
const NodeDef* TrulyConstNode(const string& name) const {
return gtl::FindWithDefault(truly_const_nodes_, name, nullptr);
}
const FunctionSpecialization* FindFunctionSpecialization(
const FunctionSpecializationSignature& sig) const {
return gtl::FindOrNull(specialized_functions_, sig);
}
void AddSpecializedFunction(const FunctionSpecializationSignature& sig,
const FunctionSpecialization& specialized_func) {
specialized_functions_.emplace(sig, specialized_func);
}
void AddTensorMapping(const SafeTensorId& from, const SafeTensorId& to) {
DCHECK(from.index() != Graph::kControlSlot)
<< "Tensor mapping must be from regular tensor";
DCHECK(to.index() != Graph::kControlSlot)
<< "Tensor mapping must be to regular tensor";
auto inserted = tensor_mapping_.insert({from, to});
DCHECK(inserted.second)
<< "Failed to insert duplicated tensor mapping: "
<< "from=" << from.ToString() << " to=" << to.ToString();
}
void AddTensorMapping(const string& func_node,
const FunctionSpecialization& specialized_func) {
for (const auto& pair : specialized_func.output_mapping) {
int from_idx = pair.first;
int to_idx = pair.second;
if (from_idx != to_idx) {
SafeTensorId from_tensor(func_node, from_idx);
SafeTensorId to_tensor(func_node, to_idx);
AddTensorMapping(from_tensor, to_tensor);
}
}
}
private:
static absl::flat_hash_map<string, const NodeDef*> InferTrulyConstNodes(
const GrapplerItem& item, const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> feed_nodes;
for (const auto& feed : item.feed) {
feed_nodes.insert(feed.first);
}
absl::flat_hash_map<string, const NodeDef*> const_nodes;
for (const NodeDef& node : graph.node()) {
if (IsConstant(node) && !feed_nodes.contains(node.name())) {
const_nodes[node.name()] = &node;
}
}
return const_nodes;
}
const GrapplerItem* item_;
RewriterConfig::Toggle opt_level_;
FunctionLibraryDefinition function_library_;
absl::flat_hash_map<string, const NodeDef*> truly_const_nodes_;
absl::flat_hash_map<FunctionSpecializationSignature,
const FunctionSpecialization>
specialized_functions_;
absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>
tensor_mapping_;
GraphView graph_view_;
FunctionOptimizerContext(const FunctionOptimizerContext&) = delete;
void operator=(const FunctionOptimizerContext&) = delete;
};
const FunctionDef* FindFunctionCall(const FunctionOptimizerContext& ctx,
const NodeDef& node) {
if (IsPartitionedCall(node) || IsStatefulPartitionedCall(node)) {
const AttrValue* func_attr = AttrSlice(node).Find("f");
return (func_attr != nullptr && func_attr->has_func())
? ctx.function_library().Find(func_attr->func().name())
: nullptr;
}
return ctx.function_library().Find(node.op());
}
absl::flat_hash_set<int> GetActiveOutputs(const NodeDef& node,
const FunctionOptimizerContext& ctx,
int size_hint = 0) {
absl::flat_hash_set<int> active_outputs;
active_outputs.reserve(static_cast<size_t>(size_hint));
const auto node_fanout_edges =
ctx.graph_view().GetFanoutEdges(node, false);
for (const GraphView::Edge& edge : node_fanout_edges) {
active_outputs.insert(edge.src.port_id);
}
for (const string& fetch : ctx.item().fetch) {
TensorId fetch_tensor = ParseTensorName(fetch);
if (fetch_tensor.node() == node.name()) {
active_outputs.insert(fetch_tensor.index());
}
}
return active_outputs;
}
bool HasTrulyConstInputs(const NodeDef& node,
const FunctionOptimizerContext& ctx) {
const auto is_truly_const = [&ctx](const string& input) {
return ctx.IsTrulyConst(NodeName(input));
};
return absl::c_any_of(node.input(), is_truly_const);
}
bool HasUnusedOutputs(const NodeDef& func_node, const FunctionDef& func,
const FunctionOptimizerContext& ctx) {
int num_outputs = func.signature().output_arg_size();
const absl::flat_hash_set<int> active_outputs =
GetActiveOutputs(func_node, ctx, num_outputs);
int active_outputs_size = active_outputs.size();
return active_outputs_size != num_outputs;
}
FunctionDefLibrary PruneFunctionLibrary(const FunctionLibraryDefinition& flib,
const GraphDef& optimized_graph) {
FunctionLibraryDefinition pruned_flib =
flib.ReachableDefinitions(optimized_graph);
int pruned_functions = static_cast<int>(pruned_flib.num_functions()) -
static_cast<int>(flib.num_functions());
VLOG(3) << "Pruned function library: " << pruned_flib.num_functions()
<< " functions (" << pruned_functions << ")";
return pruned_flib.ToProto();
}
Status PushDownConstInputs(const NodeDef& func_node,
const FunctionOptimizerContext& ctx,
GrapplerFunctionItem* item,
absl::flat_hash_set<string>* const_inputs,
absl::flat_hash_set<string>* control_deps) {
const auto record_control_deps = [&](const NodeDef* const_input) {
for (int i = const_input->input_size() - 1; i >= 0; --i) {
const string& input = const_input->input(i);
if (IsControlInput(input))
control_deps->insert(input);
else
break;
}
};
for (int i = func_node.input_size() - 1; i >= 0; --i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) continue;
const string node_name = NodeName(input);
if (ctx.IsTrulyConst(node_name)) {
VLOG(3) << "Push const into function body: input=" << input;
const auto* const_input = CHECK_NOTNULL(ctx.TrulyConstNode(node_name));
const_inputs->insert(input);
record_control_deps(const_input);
TF_RETURN_IF_ERROR(ReplaceInputWithConst(*const_input, i, item));
}
}
return absl::OkStatus();
}
void RemovePushedDownConstInputs(const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
std::vector<string> keep_inputs;
const auto& inputs = specialized_func_node->input();
absl::c_copy_if(inputs, std::back_inserter(keep_inputs),
[&](const string& input) {
return !specialization.const_inputs.contains(input);
});
specialized_func_node->clear_input();
for (const auto& keep : keep_inputs) specialized_func_node->add_input(keep);
if (!specialization.control_deps.empty()) {
absl::flat_hash_set<string> existing_control_deps;
for (const string& input : keep_inputs) {
existing_control_deps.insert(AsControlDependency(NodeName(input)));
}
for (const string& ctrl : specialization.control_deps) {
if (!existing_control_deps.contains(ctrl)) {
VLOG(3) << "Forward control dependency: input=" << ctrl;
specialized_func_node->add_input(ctrl);
}
}
}
}
void RemovePushedDownConstInputTypes(
const FunctionSpecialization& specialization, const NodeDef& func_node,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
const AttrValue* tin = AttrSlice(func_node).Find("Tin");
if (tin == nullptr || !tin->has_list()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tin"].mutable_list()->clear_type();
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (!specialization.const_inputs.contains(input)) {
DataType dt = tin->list().type(i);
(*attr)["Tin"].mutable_list()->add_type(dt);
}
}
}
void RemoveUnusedOutputsTypes(const FunctionSpecialization& specialization,
const NodeDef& func_node,
NodeDef* specialized_func_node) {
const AttrValue* tout = AttrSlice(func_node).Find("Tout");
if (tout == nullptr || !tout->has_list()) return;
int specialization_active_outputs_size = specialization.active_outputs.size();
if (specialization_active_outputs_size == tout->list().type_size()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tout"].mutable_list()->clear_type();
for (int i = 0; i < tout->list().type_size(); ++i) {
if (specialization.active_outputs.contains(i)) {
DataType dt = tout->list().type(i);
(*attr)["Tout"].mutable_list()->add_type(dt);
}
}
}
Status UpdateSpecializedFunctionCallSite(const FunctionDef& func,
const NodeDef& func_node,
const string& specialized_func_name,
NodeDef* specialized_func_node) {
if (IsDirectFunctionCall(func, func_node)) {
specialized_func_node->set_op(specialized_func_name);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* attr = specialized_func_node->mutable_attr();
(*attr)[kFuncAttr].mutable_func()->set_name(specialized_func_name);
} else {
return absl::InvalidArgumentError("Unknown function call site");
}
return absl::OkStatus();
}
Status UpdateSpecializedFunctionNode(
const FunctionDef& func, const NodeDef& func_node,
const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
bool is_indirect_call = IsIndirectFunctionCall(func, func_node);
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionCallSite(
func, func_node, specialization.specialized_func_name,
specialized_func_node));
RemovePushedDownConstInputs(specialization, specialized_func_node);
if (is_indirect_call) {
RemovePushedDownConstInputTypes(specialization, func_node,
specialized_func_node);
}
if (is_indirect_call && !specialization.is_in_fetch_set) {
RemoveUnusedOutputsTypes(specialization, func_node, specialized_func_node);
}
specialized_func_node->mutable_attr()->erase("_gradient_op_type");
return absl::OkStatus();
}
Status InitializeFunctionSpecializationSignature(
const NodeDef& func_node, const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionOptimizerContext& ctx, FunctionSpecializationSignature* sig) {
DCHECK(sig->const_inputs.empty());
DCHECK(sig->active_outputs.empty());
sig->func_name = func.signature().name();
sig->is_in_fetch_set = ctx.IsFetchNode(func_node.name());
sig->active_outputs = GetActiveOutputs(func_node, ctx);
TF_RETURN_IF_ERROR(InstantiationTypeParameters(func, func_instantiation_attr,
&sig->type_parameters));
TF_RETURN_IF_ERROR(InstantiationBodyParameters(func, func_instantiation_attr,
&sig->body_parameters));
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (ctx.IsTrulyConst(input)) {
sig->const_inputs.emplace(i, input);
}
}
return absl::OkStatus();
}
string SpecializedFunctionName(const FunctionOptimizerContext& ctx,
const FunctionDef& func,
const NodeDef& func_node) {
return absl::Substitute(
"$0_specialized_for_$1_at_$2", func.signature().name(),
absl::StrReplaceAll(func_node.name(), {{"/", "_"}}), ctx.item().id);
}
Status SpecializeFunction(const NodeDef& func_node, const FunctionDef& func,
FunctionOptimizerContext* ctx,
GraphDef* optimized_graph) {
VLOG(2) << "Specialize function call: " << SummarizeNodeDef(func_node);
const AttrSlice func_instantiation_attr =
FunctionInstantiationAttributes(func, func_node);
FunctionSpecializationSignature signature;
TF_RETURN_IF_ERROR(InitializeFunctionSpecializationSignature(
func_node, func, func_instantiation_attr, *ctx, &signature));
const FunctionSpecialization* already_specialized =
ctx->FindFunctionSpecialization(signature);
if (already_specialized) {
VLOG(2) << "Function was already specialized in identical context: "
"specialized_name="
<< already_specialized->specialized_func_name;
NodeDef* specialized_func_node = optimized_graph->add_node();
*specialized_func_node = func_node;
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionNode(
func, func_node, *already_specialized, specialized_func_node));
ctx->AddTensorMapping(specialized_func_node->name(), *already_specialized);
return absl::OkStatus();
}
const auto& flib = ctx->function_library();
GrapplerFunctionItem item;
TF_RETURN_IF_ERROR(MakeGrapplerFunctionItem(
func, func_instantiation_attr, flib, ctx->graph_version(), &item));
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
TF_RETURN_IF_ERROR(PushDownConstInputs(func_node, *ctx, &item, &const_inputs,
&control_deps));
std::vector<std::pair<int, int>> output_mapping;
if (!signature.is_in_fetch_set) {
int num_func_outputs = item.output_size();
absl::flat_hash_set<int> remove;
for (int i = 0; i < num_func_outputs; ++i) {
if (!signature.active_outputs.count(i)) remove.insert(i);
}
TF_RETURN_IF_ERROR(RemoveFunctionOutputs(remove, &item, &output_mapping));
}
FunctionDef specialized_func;
TF_RETURN_IF_ERROR(MakeFunctionDef(item, flib, &specialized_func));
const string specialized_func_name =
SpecializedFunctionName(*ctx, func, func_node);
if (flib.Contains(specialized_func_name)) {
return absl::InternalError("Created duplicate funct | #include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/flatset.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/job:localhost/replica:0/task:0/device:CPU:0";
}
class FunctionOptimizerTest : public GrapplerTest {};
TEST_F(FunctionOptimizerTest, InlineFunction_SimpleFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
const string arg0 = "Func/y/input/_0";
const string ret0 = "Func/y/output/_1";
const Tensor kTwo = test::AsScalar<int64_t>(2);
GraphDef expected = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef(arg0, "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("y/two", "Const", {}, {{"dtype", DT_INT64}, {"value", kTwo}}),
NDef("y/scale", "Cast", {"y/two"},
{{"DstT", DT_FLOAT}, {"SrcT", DT_INT64}}),
NDef("y/y", "Mul", {arg0, "y/scale"}, {{"T", DT_FLOAT}}),
NDef(ret0, "Identity", {"y/y"}, {{"T", DT_FLOAT}}),
NDef("z", "Identity", {ret0}, {{"T", DT_FLOAT}})},
{});
for (NodeDef& node : *expected.mutable_node()) node.set_device(kDevice);
CompareGraphs(expected, output);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FixedTypeFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<float>(2.0f);
FunctionDef x_times_two = FunctionDefHelper::Define(
"XTimesTwo",
{"x: float"},
{"y: float"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_FLOAT}}},
{{"enter"},
"Enter",
{"x"},
{{"T", DT_FLOAT}, {"frame_name", "frame"}}},
{{"y"}, "Mul", {"x", "two"}, {{"T", DT_FLOAT}}},
});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
x_times_two,
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "XTimesTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithOutputMapping) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "Exp_func", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "Exp_func");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithInputForwarding) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"ForwardInputs",
{"in0: float", "in1: float", "arg2: float", "arg3: int32", "arg4: float"},
{"out0: float", "arg2: float", "arg3: int32"},
{},
{},
{{"out0", "in0"}, {"arg2", "arg2"}, {"arg3", "arg3"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x2", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x3", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("x4", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "ForwardInputs", {"x0", "x1", "x2", "x3", "x4"}, {}, kDevice),
NDef("z0", "Identity", {"y:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z1", "Identity", {"y:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z2", "Identity", {"y:2"}, {{"T", DT_INT32}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "ForwardInputs");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z0", "z1", "z2"};
item.feed.emplace_back("x0", test::AsScalar<float>(3.14f));
item.feed.emplace_back("x1", test::AsScalar<float>(2.7f));
item.feed.emplace_back("x2", test::AsScalar<float>(1.0f));
item.feed.emplace_back("x4", test::AsScalar<float>(-1.0f));
item.feed.emplace_back("x3", test::AsScalar<int>(1234));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
test::ExpectTensorEqual<int>(tensors_expected[2], tensors[2]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithoutInput) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("y", "GenerateTwo", {}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "GenerateTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithNestedFunctionCall) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("outputs", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func, square_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "MySquare");
EXPECT_NE(node.op(), "MyMul");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"outputs"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_TestFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"out1", "out2"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out1", "out2"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
test::ExpectTensorEqual<float>(expected[1], optimized[1]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_IdentityFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Create(
"Identity_func",
{"in: float"},
{"out: float"},
{},
{{{"Identity"}, "Identity", {"in"}, {{"T", DT_FLOAT}}}},
{{"out", "Identity:output:0"}});
auto x = ops::Const(scope, 1.0f, {3, 5, 7});
auto z = ops::Const(scope, 3.0f, {3, 5, 7});
NameAttrList fn;
fn.set_name("Identity_func");
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, z},
{DT_FLOAT}, fn);
auto out = ops::Identity(scope.WithOpName("out"), g0.output[0]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected = EvaluateNodes(item.graph, {"out"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradientNoInlineFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
(*func.mutable_attr())["_noinline"].set_b(true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
CompareGraphs(item.graph, output);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionSimpleFunction) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func} );
Tensor pi = test::AsScalar<float>(3.14f);
item.feed.emplace_back("a", pi);
item.feed.emplace_back("b", pi);
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
{
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
GraphDef optimized_graph;
TF_EXPECT_OK(item.AddDevice(kDevice));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, kDevice),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithControlDependencies) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::ON, true);
const Tensor kOne = test::AsScalar<float>(1.0);
const Tensor kTwo = test::AsScalar<float>(2.0);
const TensorShape scalar = TensorShape({});
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T", "v: resource"}, {"z:T"}, {"T: {float, double}"},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"add"},
"AssignAddVariableOp",
{"v", "one:output:0"},
{{"dtype", DT_FLOAT}}},
{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}},
{{"size_effects", "add"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out_1", "out_2"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}}),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("f1", "PartitionedCall", {"a", "b", "v", "^init_v"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("f2", "PartitionedCall", {"f1", "f1", "v", "^f1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out_1", "Identity", {"f2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_2", "ReadVariableOp", {"v", "^f1", "^f2"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}},
kDevice),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("Func/f1/input_control_node/_0", "NoOp", {"^init_v"}, {}, kDevice),
NDef("Func/f1/input/_1", "Identity",
{"a", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_2", "Identity",
{"b", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_3", "Identity",
{"v", "^Func/f1/input_control_node/_0"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f1/one", "Const", {"^Func/f1/input_control_node/_0"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f1/mul", "Mul", {"Func/f1/input/_1", "Func/f1/input/_2"},
{{"T", DT_FLOAT}}, kDevice),
NDef("f1/add", "AssignAddVariableOp", {"Func/f1/input/_3", "f1/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("Func/f1/output/_4", "Identity", {"f1/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/output_control_node/_5", "NoOp", {"^f1/add"}, {}, kDevice),
NDef("Func/f2/input_control_node/_6", "NoOp",
{"^Func/f1/output_control_node/_5"}, {}, kDevice),
NDef("Func/f2/input/_7", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_8", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_9", "Identity",
{"v", "^Func/f2/input_control_node/_6"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f2/one", "Const", {"^Func/f2/input_control_node/_6"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f2/add", "AssignAddVariableOp", {"Func/f2/input/_9", "f2/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("f2/mul", "Mul", {"Func/f2/input/_7", "Func/f2/input/_8"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/output/_10", "Identity", {"f2/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f2/output_control_node/_11", "NoOp", {"^f2/add"}, {},
kDevice),
NDef("out_1", "Identity", {"Func/f2/output/_10"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out_2", "ReadVariableOp",
{"v", "^Func/f1/output_control_node/_5",
"^Func/f2/output_control_node/_11"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
item.feed.emplace_back("a", kOne);
item.feed.emplace_back("b", kTwo);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 2);
EXPECT_EQ(tensors_expected[0].flat<float>()(0), 4.0);
EXPECT_EQ(tensors_expected[1].flat<float>()(0), 3.0);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, cpu1),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
CompareGraphs(expected, optimized_graph);
}
TEST_F(FunctionOptimizerTest,
InlineMultipleIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"e"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "PartitionedCall", {"a", "c"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("e", "Identity", {"d"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_c_x = "Func/c/input/_0";
const string input_c_y = "Func/c/input/_1";
const string output_c_z = "Func/c/output/_2";
const string input_d_x = "Func/d/input/_3";
const string input_d_y = "Func/d/input/_4";
const string output_d_z = "Func/d/output/_5";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, |
1,389 | cpp | tensorflow/tensorflow | tfg_optimizer_hook | tensorflow/core/grappler/optimizers/tfg_optimizer_hook.cc | tensorflow/core/grappler/optimizers/tfg_optimizer_hook_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_TFG_OPTIMIZER_HOOK_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_TFG_OPTIMIZER_HOOK_H_
#include <functional>
#include <string>
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
namespace mlir {
class PassManager;
namespace tfg {
using TFGPassPipelineBuilder = std::function<void(PassManager& pm)>;
class TFGGrapplerOptimizer : public tensorflow::grappler::GraphOptimizer {
public:
explicit TFGGrapplerOptimizer(TFGPassPipelineBuilder builder,
unsigned num_tfg_threads = 0);
~TFGGrapplerOptimizer() override;
std::string name() const override;
bool UsesFunctionLibrary() const override { return true; }
tensorflow::Status Optimize(tensorflow::grappler::Cluster* cluster,
const tensorflow::grappler::GrapplerItem& item,
tensorflow::GraphDef* optimized_graph) override;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/ir/tf_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/dump_graph.h"
using tensorflow::Status;
using tensorflow::errors::InvalidArgument;
namespace mlir {
namespace tfg {
class TFGGrapplerOptimizer::Impl {
public:
explicit Impl(TFGPassPipelineBuilder builder, unsigned num_tfg_threads)
: ctx_(MLIRContext::Threading::DISABLED), mgr_(&ctx_) {
DialectRegistry registry;
registry.addExtension(+[](MLIRContext* ctx, TFGraphDialect* dialect) {
dialect->addInterfaces<TensorFlowOpRegistryInterface>();
});
ctx_.appendDialectRegistry(registry);
builder(mgr_);
if (num_tfg_threads) {
llvm::ThreadPoolStrategy strategy;
strategy.ThreadsRequested = num_tfg_threads;
threadpool_ = std::make_unique<llvm::DefaultThreadPool>(strategy);
ctx_.setThreadPool(*threadpool_);
}
}
LogicalResult RunPipeline(ModuleOp module) { return mgr_.run(module); }
MLIRContext* GetContext() { return &ctx_; }
std::string GetPipelineString() {
std::string pipeline;
llvm::raw_string_ostream os(pipeline);
mgr_.printAsTextualPipeline(os);
return os.str();
}
private:
std::unique_ptr<llvm::DefaultThreadPool> threadpool_;
MLIRContext ctx_;
PassManager mgr_;
};
TFGGrapplerOptimizer::TFGGrapplerOptimizer(TFGPassPipelineBuilder builder,
unsigned num_tfg_threads)
: impl_(std::make_unique<Impl>(std::move(builder), num_tfg_threads)) {}
TFGGrapplerOptimizer::~TFGGrapplerOptimizer() = default;
std::string TFGGrapplerOptimizer::name() const {
return absl::StrCat("tfg_optimizer{", impl_->GetPipelineString(), "}");
}
Status TFGGrapplerOptimizer::Optimize(
tensorflow::grappler::Cluster* cluster,
const tensorflow::grappler::GrapplerItem& item,
tensorflow::GraphDef* optimized_graph) {
if (VLOG_IS_ON(4)) {
tensorflow::DumpGraphDefToFile(
absl::StrCat("tfg_before_graph_", item.id, "_",
std::hash<std::string>()(name())),
item.graph);
}
VLOG(5) << "TFG Before Graph: \n" << item.graph.DebugString();
tensorflow::GraphDebugInfo debug_info;
tensorflow::metrics::ScopedCounter<2> metrics(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"TfgOptimizer", "convert_graphdef_to_tfg"});
auto error_or_module =
ImportGraphDef(impl_->GetContext(), debug_info, item.graph);
if (!error_or_module.ok()) {
auto status = error_or_module.status();
tensorflow::errors::AppendToMessage(
&status, "when importing GraphDef to MLIR module in GrapplerHook");
LOG(ERROR) << name() << " failed: " << status.ToString();
return absl::AbortedError(status.message());
}
metrics.ReportAndStop();
ModuleOp module = (*error_or_module).get();
if (failed(impl_->RunPipeline(module))) {
return absl::InvalidArgumentError("MLIR Graph Optimizer failed: ");
}
tensorflow::GraphDef graphdef;
metrics.Reset({"TfgOptimizer", "convert_tfg_to_graphdef"});
TF_RETURN_WITH_CONTEXT_IF_ERROR(
ConvertToGraphDef(module, &graphdef),
"when exporting MLIR module to GraphDef in GrapplerHook");
(void)graphdef.mutable_library();
metrics.ReportAndStop();
*optimized_graph = std::move(graphdef);
if (VLOG_IS_ON(4)) {
tensorflow::DumpGraphDefToFile(
absl::StrCat("tfg_after_graph_", item.id, "_",
std::hash<std::string>()(name())),
*optimized_graph);
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "TFG After Graph: \n"
<< optimized_graph->DebugString() << "\nMLIR module: \n";
module.dump();
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h"
#include <utility>
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
class TestPass : public PassWrapper<TestPass, OperationPass<GraphOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPass);
StringRef getArgument() const override { return "grappler-hook-test-pass"; }
void runOnOperation() override {
GraphOp graph = getOperation();
for (TFOp op : graph.getOps()) op.setName(op.name() + "_visited");
}
};
class AlwaysFailPass
: public PassWrapper<AlwaysFailPass, OperationPass<GraphOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AlwaysFailPass);
StringRef getArgument() const override { return "grappler-hook-fail-pass"; }
void runOnOperation() override { signalPassFailure(); }
};
}
}
}
namespace tensorflow {
namespace grappler {
namespace {
TEST(TFGOptimizerTest, TestCustomPipeline) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(s.WithOpName("b"), 1.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ("a", item.graph.node(0).name());
EXPECT_EQ("b", item.graph.node(1).name());
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::TestPass>());
});
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_ASSERT_OK(status);
EXPECT_EQ("a_visited", output.node(0).name());
EXPECT_EQ("b_visited", output.node(1).name());
}
TEST(TFGOptimizerTest, TestCustomPipelineName) {
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::TestPass>());
});
EXPECT_EQ(optimizer.name(),
"tfg_optimizer{any(tfg.graph(grappler-hook-test-pass))}");
}
TEST(TFGOptimizerTest, TestImportErrorReturnsAborted) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AttrValue attr;
attr.set_i(0);
item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)});
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(errors::IsAborted(status));
}
TEST(TFGOptimizerTest, TestPassErrorIsFatal) {
Scope s = Scope::NewRootScope();
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::AlwaysFailPass>());
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(status.ok());
EXPECT_FALSE(errors::IsAborted(status));
EXPECT_TRUE(errors::IsInvalidArgument(status));
}
TEST(TFGOptimizerTest, TestImportErrorMetaOptimizerIsNotFatal) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AttrValue attr;
attr.set_i(0);
item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)});
std::vector<std::unique_ptr<GraphOptimizer>> optimizers;
optimizers.push_back(std::make_unique<mlir::tfg::TFGGrapplerOptimizer>(
[](mlir::PassManager &mgr) {}));
GraphDef output;
Status status =
RunMetaOptimizer(std::move(item), {}, nullptr, nullptr, &output);
TF_EXPECT_OK(status);
}
}
}
} |
1,390 | cpp | tensorflow/tensorflow | generic_layout_optimizer_transposer | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_TRANSPOSER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GENERIC_LAYOUT_OPTIMIZER_TRANSPOSER_H_
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
constexpr char kAttrSrcFormat[] = "src_format";
constexpr char kAttrDstFormat[] = "dst_format";
constexpr char kAttrOutputShape[] = "_output_shapes";
constexpr char kGPU[] = "GPU";
constexpr char kCPU[] = "CPU";
struct TransposeContext {
static Status InitializeTransposeContext(bool assume_valid_feeds,
const GrapplerItem& item,
const Cluster* cluster,
TransposeContext* context);
static Status InitializeTransposeContext(const GrapplerItem& item,
const Cluster* cluster,
TransposeContext* context) {
return InitializeTransposeContext(false, item, cluster, context);
}
void AssignDeviceAndDataFormats(absl::string_view target_device,
absl::string_view src_format,
absl::string_view dst_format);
FrameView frames;
GraphDef graph;
int num_nodes;
absl::flat_hash_set<string> nodes_to_preserve;
std::unique_ptr<GraphProperties> graph_properties;
std::unique_ptr<utils::MutableGraphView> graph_view;
string target_device;
string src_format;
string dst_format;
absl::flat_hash_map<char, int> src_dim_indices;
absl::flat_hash_map<char, int> dst_dim_indices;
std::vector<int> src_to_dst;
std::vector<int> dst_to_src;
string enforced_layout;
};
class Transposer {
public:
explicit Transposer() {}
Transposer(const Transposer&) = delete;
Transposer& operator=(const Transposer&) = delete;
virtual ~Transposer() {}
bool ShouldProcess(const TransposeContext& context,
const utils::MutableNodeView& node) const;
virtual Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) = 0;
Status CreateConstPermNode(TransposeContext* context,
absl::string_view node_name,
absl::string_view device,
absl::Span<const int> permutation,
absl::string_view control_node_name,
utils::MutationNewNode* added_node);
Status CreateTransposeNode(
TransposeContext* context, absl::string_view name_format,
const DataType& data_type, absl::string_view device,
TensorShapeProto fanin_shape, absl::Span<const int> permutation,
absl::string_view control_node_name, utils::MutationNewNode* added_node,
string* transpose_node_name);
Status UpdateFaninEdgesWithOp(TransposeContext* context,
absl::Span<const int> dst_ports,
utils::MutableNodeView* dst_node,
absl::string_view op);
Status UpdateFanoutEdgesWithOp(TransposeContext* context,
absl::Span<const int> src_ports,
utils::MutableNodeView* src_node,
absl::string_view op);
Status CreateDataFormatNode(TransposeContext* context,
absl::string_view node_name, absl::string_view op,
absl::string_view device,
const DataType& data_type, bool is_fanin_on_host,
bool is_src_format_to_dst_format,
utils::MutationNewNode* added_node);
protected:
int GetFanoutPortRank(const utils::MutableNodeView& node, int port) const;
bool IsFanoutPortRankN(const utils::MutableNodeView& node, int port,
int n) const;
bool IsFanoutPortsRankN(const utils::MutableNodeView& node,
absl::Span<const int> ports, int n) const;
int GetFaninPortRank(const utils::MutableNodeView& node, int port) const;
bool IsFaninPortRankN(const utils::MutableNodeView& node, int port,
int n) const;
bool IsFaninPortDimsNIfConst(const utils::MutableNodeView& node, int port,
absl::Span<const int> dims) const;
bool IsFaninPortsDimsNIfConst(const utils::MutableNodeView& node,
absl::Span<const int> ports,
absl::Span<const int> dims) const;
bool CanProcessNode(const TransposeContext& context,
const utils::MutableNodeView& node) const;
Status UpdateEdge(TransposeContext* context, absl::string_view name_format,
absl::string_view op, const AttrValue* input_shape,
bool is_in_frame, bool is_src_format_to_dst_format,
const int src_port, const int dst_port,
utils::MutableNodeView* src_node,
utils::MutableNodeView* dst_node);
string GetFaninNameFormat(absl::string_view node_name, int port,
absl::string_view src_format,
absl::string_view dst_format);
string GetFanoutNameFormat(absl::string_view node_name, int port, int index,
absl::string_view src_format,
absl::string_view dst_format);
string LayoutOptimizerNode(absl::string_view node_name);
string GetReshapeNodeNameFormat(absl::string_view node_name, int index,
absl::string_view src_format,
absl::string_view dst_format);
string GetShapeConstNodeNameFormat(absl::string_view node_name, int index);
};
class LayoutSensitiveOpTransposer : public Transposer {
public:
explicit LayoutSensitiveOpTransposer() : Transposer() {}
Status UpdateNode(TransposeContext* context, utils::MutableNodeView* node);
};
class DefaultLayoutSensitiveOpTransposer : public LayoutSensitiveOpTransposer {
public:
explicit DefaultLayoutSensitiveOpTransposer()
: LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class BiasAddTransposer : public LayoutSensitiveOpTransposer {
public:
explicit BiasAddTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class AvgPoolGradTransposer : public LayoutSensitiveOpTransposer {
public:
explicit AvgPoolGradTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class BiasAddGradTransposer : public LayoutSensitiveOpTransposer {
public:
explicit BiasAddGradTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class Conv2DBackpropFilterTransposer : public LayoutSensitiveOpTransposer {
public:
explicit Conv2DBackpropFilterTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class Conv2DBackpropInputTransposer : public LayoutSensitiveOpTransposer {
public:
explicit Conv2DBackpropInputTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class Conv3DTransposer : public LayoutSensitiveOpTransposer {
public:
explicit Conv3DTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class Conv3DBackpropFilterTransposer : public LayoutSensitiveOpTransposer {
public:
explicit Conv3DBackpropFilterTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class Conv3DBackpropInputTransposer : public LayoutSensitiveOpTransposer {
public:
explicit Conv3DBackpropInputTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class FusedBatchNormExTransposer : public LayoutSensitiveOpTransposer {
public:
explicit FusedBatchNormExTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class FusedBatchNormGradTransposer : public LayoutSensitiveOpTransposer {
public:
explicit FusedBatchNormGradTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool IsTraining(const utils::MutableNodeView& node) const;
};
class MaxPoolV2Transposer : public LayoutSensitiveOpTransposer {
public:
explicit MaxPoolV2Transposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class MaxPool3DTransposer : public LayoutSensitiveOpTransposer {
public:
explicit MaxPool3DTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class MaxPoolGradTransposer : public LayoutSensitiveOpTransposer {
public:
explicit MaxPoolGradTransposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class MaxPoolGradV2Transposer : public LayoutSensitiveOpTransposer {
public:
explicit MaxPoolGradV2Transposer() : LayoutSensitiveOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class LayoutAgnosticOpTransposer : public Transposer {
public:
explicit LayoutAgnosticOpTransposer() : Transposer() {}
protected:
bool IsAfterDstToSrcTransform(const TransposeContext& context,
const utils::MutableNodeView& node) const;
std::vector<int> GetVariadicNDFaninPorts(const TransposeContext& context,
const utils::MutableNodeView& node,
int rank) const;
};
class DefaultLayoutAgnosticOpTransposer : public LayoutAgnosticOpTransposer {
public:
explicit DefaultLayoutAgnosticOpTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class AddNTransposer : public LayoutAgnosticOpTransposer {
public:
explicit AddNTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class BinaryOpTransposer : public LayoutAgnosticOpTransposer {
public:
explicit BinaryOpTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool IsNDOperateWithMD(const utils::MutableNodeView& node, int n, int m);
bool IsFaninShapeSupported(const utils::MutableNodeView& node, int rank);
std::vector<int> GetNDDataFaninPorts(const utils::MutableNodeView& node,
int rank);
Status AddNodeShapeConst(utils::Mutation* mutation,
absl::string_view node_name,
absl::string_view node_device, bool node_in_frame,
int num_channels, absl::string_view depended_node,
int rank);
Status AddNodeReshape(utils::Mutation* mutation, absl::string_view node_name,
absl::string_view node_device,
absl::string_view input_name,
absl::string_view shape_const_node_name,
const DataType& data_type);
Status MaybeReshapeVectorFanin(TransposeContext* context,
utils::MutableNodeView* node, int rank);
};
class ConcatOpTransposer : public LayoutAgnosticOpTransposer {
public:
explicit ConcatOpTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class FillOpTransposer : public LayoutAgnosticOpTransposer {
public:
explicit FillOpTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class IdentityNTransposer : public LayoutAgnosticOpTransposer {
public:
explicit IdentityNTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class MergeTransposer : public LayoutAgnosticOpTransposer {
public:
explicit MergeTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool IsEveryFaninAfterDstToSrcTransform(
const TransposeContext& context,
const utils::MutableNodeView& node) const;
};
class PadTransposer : public LayoutAgnosticOpTransposer {
public:
explicit PadTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class ReduceTransposer : public LayoutAgnosticOpTransposer {
public:
explicit ReduceTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool KeepDims(const utils::MutableNodeView& node);
bool IsAlongAxis(const Tensor& tensor, absl::Span<const int> axis, int rank);
bool IsReduceAxisSupported(const TransposeContext& context,
const utils::MutableNodeView& node, int rank);
};
class ReverseV2Transposer : public LayoutAgnosticOpTransposer {
public:
explicit ReverseV2Transposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class SelectTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SelectTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
protected:
bool IsFaninScalarVector4D(const utils::MutableNodeView& fanin, int port);
std::vector<int> GetFaninPorts(const utils::MutableNodeView& fanin, int port);
};
class ShapeTransposer : public LayoutAgnosticOpTransposer {
public:
explicit ShapeTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class ShapeNTransposer : public LayoutAgnosticOpTransposer {
public:
explicit ShapeNTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class SliceTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SliceTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class SplitTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SplitTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class SplitVTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SplitVTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class SqueezeTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SqueezeTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool IsInputConvertible(const TransposeContext& context,
const utils::MutableNodeView& node) const;
bool IsAlongAxis(const AttrValue& attr, absl::Span<const int> axis,
int rank) const;
bool IsDimsSupported(const TransposeContext& context,
const utils::MutableNodeView& node) const;
Status UpdateSqueezeDims(TransposeContext* context,
utils::MutableNodeView* node);
};
class StridedSliceTransposer : public LayoutAgnosticOpTransposer {
public:
explicit StridedSliceTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
private:
bool IsMaskZero(const utils::MutableNodeView& node, absl::string_view mask);
bool HasOnlyBeginEndMask(const utils::MutableNodeView& node);
Status PermuteMask(TransposeContext* context, utils::MutableNodeView* node,
absl::string_view mask);
};
class SwitchTransposer : public LayoutAgnosticOpTransposer {
public:
explicit SwitchTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class TernaryOpTransposer : public LayoutAgnosticOpTransposer {
public:
explicit TernaryOpTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class TileTransposer : public LayoutAgnosticOpTransposer {
public:
explicit TileTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
class UnaryGradTransposer : public LayoutAgnosticOpTransposer {
public:
explicit UnaryGradTransposer() : LayoutAgnosticOpTransposer() {}
Status TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) override;
};
template <typename T>
Status PermuteSingle(absl::string_view location,
absl::Span<const int> permutation, T* values) {
DCHECK(values != nullptr);
int permutation_size = permutation.size();
if (values->size() != permutation_size) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Size of values ", values->size(),
" does not match size of permutation ",
permutation_size, " @ ", location));
}
typedef typename T::value_type V;
std::vector<V> elements(values->begin(), values->end());
int index = 0;
for (V& element : *values) {
element = elements[permutation[index++]];
}
return absl::OkStatus();
}
template <typename T>
Status PermuteDouble(absl::string_view location,
absl::Span<const int> permutation, T* values) {
DCHECK(values != nullptr);
int permutation_size = permutation.size();
if (values->size() != permutation_size * 2) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Size of values ", values->size(),
" does not match twice the size of permutation ",
permutation_size, " @ ", location));
}
typedef typename T::value_type V;
std::vector<V> elements(values->begin(), values->end());
for (int i = 0; i < values->size(); i = i + 2) {
const int permutation_index = permutation[i / 2];
(*values)[i] = elements[permutation_index * 2];
(*values)[i + 1] = elements[permutation_index * 2 + 1];
}
return absl::OkStatus();
}
string GetDeviceName(const NodeDef& node);
bool IsDefaultLayoutSensitiveOp(const NodeDef& node);
bool IsLayoutSensitiveOp(const NodeDef& node);
bool IsDefaultLayoutAgnosticOp(const NodeDef& node);
bool IsLayoutAgnosticOp(const NodeDef& node);
bool IsTernaryOp(const NodeDef& node);
bool IsUnaryGrad(const NodeDef& node);
bool IsMaxPoolV2(const NodeDef& node);
bool IsMaxPool3D(const NodeDef& node);
bool IsMaxPoolGradV2(const NodeDef& node);
bool IsMaxPoolGradGradV1(const NodeDef& node);
bool IsMaxPoolGradGradV2(const NodeDef& node);
bool IsBinaryOp(const NodeDef& node);
bool IsReduceOp(const NodeDef& node);
std::vector<int> GetDataFaninPorts(const utils::MutableNodeView& node);
std::vector<int> GetDataFanoutPorts(const utils::MutableNodeView& node);
bool GetValueAttrFromConstInputNode(
const utils::MutableNodeView& node,
const std::function<bool(const NodeDef&)>& predicate, int index,
Tensor* tensor);
bool IsDataFormatOp(const utils::MutableNodeView& node);
absl::flat_hash_map<char, int> GetDimensionIndices(
absl::string_view data_format);
std::vector<int> GetPermutation(
const absl::flat_hash_map<char, int>& src_dim_indices,
absl::string_view dst_format);
}
}
#endif
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include <algorithm>
#include <numeric>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kOptimizedSuffix[] = "LayoutOptimizer";
constexpr char kAttrKSize[] = "ksize";
constexpr char kAttrStrides[] = "strides";
constexpr char kAttrDilations[] = "dilations";
constexpr char kAttrExplicitPaddings[] = "explicit_paddings";
constexpr char kAttrDataFormat[] = "data_format";
constexpr char kAttrIsTraining[] = "is_training";
constexpr char kAttrValue[] = "value";
constexpr char kAttrN[] = "N";
constexpr char kAttrT[] = "T";
constexpr char kAttrNumSplit[] = "num_split";
constexpr char kAttrNumOuts[] = "num_outs";
constexpr char kAttrKeepDims[] = "keep_dims";
constexpr char kAttrSqueezeDims[] = "squeeze_dims";
constexpr char kOpTranspose[] = "Transpose";
constexpr char kOpDataFormatVecPermute[] = "DataFormatVecPermute";
constexpr char kOpDataFormatDimMap[] = "DataFormatDimMap";
constexpr char kOpConst[] = "Const";
constexpr char kReshape[] = "Reshape";
constexpr char kReshapeConst[] = "ReshapeConst";
constexpr int kRank = 4;
constexpr int kUnknownRank = -1;
constexpr int kInvalidRank = -2;
inline bool AttrDataFormatMatch(const utils::MutableNodeView& node,
absl::string_view src_data_format,
bool* missing) {
const auto* attr = node.GetAttr(kAttrDataFormat);
if (attr != nullptr) {
return attr->s() == src_data_format;
}
*missing = true;
return false;
}
inline bool AttrDataFormatMatch(const utils::MutableNodeView& node,
absl::string_view src_data_format) {
bool missing = false;
return AttrDataFormatMatch(node, src_data_format, &missing);
}
bool IsNonFloatingConv2D(const utils::MutableNodeView& node) {
if (IsConv2D(*node.node()) || IsConv2DBackpropInput(*node.node())) {
const auto* attr = node.GetAttr(kAttrT);
if (attr != nullptr) {
return !kDataTypeIsFloating.Contains(attr->type());
}
}
return false;
}
bool IsNonFloatingConv3D(const utils::MutableNodeView& node) {
if (IsConv3D(*node.node())) {
const auto* attr = node.GetAttr(kAttrT);
if (attr != nullptr) {
return !kDataTypeIsFloating.Contains(attr->type());
}
}
return false;
}
bool IsComparisonOp(const NodeDef& node) {
bool is_compare = IsApproximateEqual(node) || IsEqual(node) ||
IsGreater(node) || IsGreaterEqual(node) || IsLess(node) ||
IsLessEqual(node) || IsNotEqual(node);
return is_compare;
}
std::vector<int> GetRegularFaninPorts(const utils::MutableNodeView& node) {
const int num_regular_fanins = node.NumRegularFanins();
std::vector<int> values(num_regular_fanins);
std::iota(values.begin(), values.end(), 0);
return values;
}
std::vector<int> GetConcatDataFaninPorts(const utils::MutableNodeView& node) {
const auto* n_attr = node.GetAttr(kAttrN);
const int n = n_attr != nullptr ? n_attr->i() : 0;
const int start = (node.GetOp() == "Concat") ? 1 : 0;
const int end = start + n;
std::vector<int> values(end - start);
std::iota(values.begin(), values.end(), start);
return values;
}
struct ComparatorByNodeNameAndIndex {
bool operator()(const utils::MutableFaninView& node1,
const utils::MutableFaninView& node2) const {
auto* node1_view = node1.node_view();
auto* node2_view = node2.node_view();
auto name_compare = node1_view->GetName().compare(node2_view->GetName());
if (name_compare == 0) {
return node1.index() < node2.index();
}
return name_compare < 0;
}
};
bool IsHostMemory(const NodeDef& node, int output_port) {
if (node.attr().contains("_xla_input") && node.attr().at("_xla_input").b())
return false;
DeviceNameUtils::ParsedName parsed_name;
if (DeviceNameUtils::ParseFullName(node.device(), &parsed_name)) {
DeviceType device_type(parsed_name.type);
Status s = FindKernelDef(device_type, node, nullptr, nullptr);
if (s.ok()) {
tensorflow::MemoryTypeVector in_mtypes;
tensorflow::MemoryTypeVector out_mtypes;
s = tensorflow::MemoryTypesForNode(OpRegistry::Global(), device_type,
node, &in_mtypes, &out_mtypes);
if (s.ok()) {
if (out_mtypes[output_port] == HOST_MEMORY) {
return true;
}
}
} else {
return true;
}
}
return false;
}
std::vector<int> GetDimensionIndicesFromLabel(
const absl::flat_hash_map<char, int>& dim_indices,
absl::Span<const char> labels) {
std::vector<int> indices;
indices.reserve(labels.size());
for (const auto& label : labels) {
indices.push_back(dim_indices.at(label));
}
return indices;
}
class ScopedDataFormatUpgrader {
public:
ScopedDataFormatUpgrader(TransposeCo | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops_internal.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::ExpectTensorEqual;
constexpr int kBatchSize = 32;
constexpr int kWidth = 10;
constexpr int kHeight = 10;
constexpr int kDepthIn = 8;
constexpr int kKernel = 2;
constexpr int kStride1 = 2;
constexpr int kStride2 = 4;
constexpr int kOutWidth = 5;
constexpr int kOutHeight = 5;
constexpr int kDepthOut = 16;
constexpr int kDilation = 2;
constexpr int kPaddingTop = 1;
constexpr int kPaddingBottom = 2;
constexpr int kPaddingLeft = 3;
constexpr int kPaddingRight = 4;
constexpr char kSrcFormat[] = "NHWC";
constexpr char kDstFormat[] = "NCHW";
constexpr char kGPU[] = "GPU";
constexpr char kAttrOutputShapes[] = "_output_shapes";
constexpr char kAttrDataFormat[] = "data_format";
constexpr char kOpTranspose[] = "Transpose";
class TransposerImpl : public Transposer {
public:
explicit TransposerImpl() : Transposer() {}
Status TransposeNode(TransposeContext*, utils::MutableNodeView*) override {
return absl::OkStatus();
}
};
void VerifyRegularFaninMatch(const utils::MutableNodeView* node, int port,
absl::string_view fanin_name, int fanin_port) {
ASSERT_GT(node->NumRegularFanins(), port);
const auto& fanin = node->GetRegularFanin(port);
EXPECT_EQ(fanin.node_view()->GetName(), fanin_name);
EXPECT_EQ(fanin.index(), fanin_port);
}
void VerifyShapeAttributeMatch(const utils::MutableNodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrOutputShapes);
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->shape().DebugString(), attr_value);
}
void VerifyShapeAttributeMatch(const utils::MutableNodeView* node,
int shape_index, absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrOutputShapes);
ASSERT_NE(attr, nullptr);
ASSERT_GT(attr->list().shape_size(), shape_index);
EXPECT_EQ(attr->list().shape(shape_index).DebugString(), attr_value);
}
void VerifyDataFormatAttributeMatch(const utils::MutableNodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrDataFormat);
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->s(), attr_value);
}
Output SimpleConv2D(const Scope* scope, const DataType& data_type = DT_FLOAT) {
auto input =
ops::RandomUniform(scope->WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto filter =
ops::RandomUniform(scope->WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, data_type);
auto conv2d = ops::Conv2D(
scope->WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, kStride1, kStride2, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
return conv2d;
}
Status CreateSimpleConv2DGraph(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope, data_type);
auto output = ops::Identity(scope.WithOpName("output"), conv2d);
return scope.ToGraphDef(graph);
}
Status CreateSimpleFusedBatchNorm(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto x =
ops::RandomUniform(scope.WithOpName("x"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto scale =
ops::RandomUniform(scope.WithOpName("scale"), {kDepthIn}, DT_FLOAT);
auto offset =
ops::RandomUniform(scope.WithOpName("offset"), {kDepthIn}, DT_FLOAT);
auto mean =
ops::RandomUniform(scope.WithOpName("mean"), {kDepthIn}, DT_FLOAT);
auto var = ops::RandomUniform(scope.WithOpName("var"), {kDepthIn}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNormV2(
scope.WithOpName("bn").WithDevice("/device:GPU:0"), x, scale, offset,
mean, var, ops::FusedBatchNormV2::IsTraining(false).Epsilon(0.1f));
auto output_y = ops::Identity(scope.WithOpName("output_y"), batch_norm.y);
auto output_mean =
ops::Identity(scope.WithOpName("output_mean"), batch_norm.batch_mean);
auto output_variance = ops::Identity(scope.WithOpName("output_variance"),
batch_norm.batch_variance);
return scope.ToGraphDef(graph);
}
Status CreateSimpleMaxPoolGrad(GraphDef* graph, bool use_grad_grad) {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("orig_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto output_data = ops::RandomUniform(
scope.WithOpName("orig_output"),
{kBatchSize, kOutHeight, kOutWidth, kDepthIn}, DT_FLOAT);
auto output_grad =
ops::RandomUniform(scope.WithOpName("grad"),
{kBatchSize, use_grad_grad ? kHeight : kOutHeight,
use_grad_grad ? kWidth : kOutWidth, kDepthIn},
DT_FLOAT);
Output maxpool_grad;
if (use_grad_grad) {
maxpool_grad = ops::MaxPoolGradGrad(
scope.WithOpName("maxpool_grad").WithDevice("/device:GPU:0"), input,
output_data, output_grad, {1, kKernel, kKernel, 1},
{1, kStride1, kStride1, 1}, "VALID");
} else {
maxpool_grad = ops::internal::MaxPoolGrad(
scope.WithOpName("maxpool_grad").WithDevice("/device:GPU:0"), input,
output_data, output_grad, {1, kKernel, kKernel, 1},
{1, kStride1, kStride1, 1}, "VALID");
}
auto output = ops::Identity(scope.WithOpName("output"), maxpool_grad);
return scope.ToGraphDef(graph);
}
Status CreateSimpleBiasAddGrad(GraphDef* graph, const Input& shape) {
Scope scope = Scope::NewRootScope();
auto input = ops::RandomUniform(scope.WithOpName("input"), shape, DT_FLOAT);
auto bag =
ops::BiasAddGrad(scope.WithOpName("bag").WithDevice("/device:GPU:0"),
input, ops::BiasAddGrad::DataFormat(kSrcFormat));
auto output = ops::Identity(scope.WithOpName("output"), bag);
return scope.ToGraphDef(graph);
}
Status CreateSimpleConv2DBackpropFilter(GraphDef* graph,
const DataType& data_type = DT_FLOAT,
absl::string_view padding = "SAME") {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto out_backprop =
ops::RandomUniform(scope.WithOpName("out_backprop"),
{kBatchSize, kHeight, kWidth, kDepthOut}, data_type);
if (padding == "EXPLICIT") {
auto conv2d_backprop_filter = ops::Conv2DBackpropFilter(
scope.WithOpName("conv2d_backprop_filter").WithDevice("/device:GPU:0"),
input, {kHeight, kWidth, kDepthIn, kDepthOut}, out_backprop,
{1, 2, 4, 1}, padding,
ops::Conv2DBackpropFilter::Attrs()
.Dilations({1, kDilation, kDilation, 1})
.ExplicitPaddings({0, 0, kPaddingTop, kPaddingBottom, kPaddingLeft,
kPaddingRight, 0, 0})
.DataFormat(kSrcFormat));
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_filter);
} else {
auto conv2d_backprop_filter = ops::Conv2DBackpropFilter(
scope.WithOpName("conv2d_backprop_filter").WithDevice("/device:GPU:0"),
input, {kHeight, kWidth, kDepthIn, kDepthOut}, out_backprop,
{1, 2, 4, 1}, padding,
ops::Conv2DBackpropFilter::DataFormat(kSrcFormat));
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_filter);
}
return scope.ToGraphDef(graph);
}
Status CreateSimpleConv2DBackpropInput(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto input_sizes = ops::Const(scope.WithOpName("input_sizes"),
{kBatchSize, kHeight, kWidth, kDepthIn});
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, data_type);
auto out_backprop =
ops::RandomUniform(scope.WithOpName("out_backprop"),
{kBatchSize, kHeight, kWidth, kDepthOut}, data_type);
auto conv2d_backprop_input = ops::Conv2DBackpropInput(
scope.WithOpName("conv2d_backprop_input").WithDevice("/device:GPU:0"),
input_sizes, filter, out_backprop, {1, kStride1, kStride1, 1}, "VALID");
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_input);
return scope.ToGraphDef(graph);
}
Status CreateSimpleFusedBatchNormGrad(GraphDef* graph, bool is_training,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto y_backprop =
ops::RandomUniform(scope.WithOpName("y_backprop"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto x =
ops::RandomUniform(scope.WithOpName("x"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto scale =
ops::RandomUniform(scope.WithOpName("scale"), {kDepthIn}, DT_FLOAT);
auto reserve_space_1 = ops::RandomUniform(scope.WithOpName("reserve_space_1"),
{kDepthIn}, DT_FLOAT);
auto reserve_space_2 = ops::RandomUniform(scope.WithOpName("reserve_space_2"),
{kDepthIn}, DT_FLOAT);
auto fused_batch_norm_grad = ops::FusedBatchNormGradV2(
scope.WithOpName("fused_batch_norm_grad").WithDevice("/device:GPU:0"),
y_backprop, x, scale, reserve_space_1, reserve_space_2,
ops::FusedBatchNormGradV2::DataFormat(kSrcFormat)
.IsTraining(is_training)
.Epsilon(0.1f));
auto x_backprop = ops::Identity(scope.WithOpName("x_backprop"),
fused_batch_norm_grad.x_backprop);
auto scale_backprop = ops::Identity(scope.WithOpName("scale_backprop"),
fused_batch_norm_grad.scale_backprop);
auto offset_backprop = ops::Identity(scope.WithOpName("offset_backprop"),
fused_batch_norm_grad.offset_backprop);
auto reserve_space_3 = ops::Identity(scope.WithOpName("reserve_space_3"),
fused_batch_norm_grad.reserve_space_3);
auto reserve_space_4 = ops::Identity(scope.WithOpName("reserve_space_4"),
fused_batch_norm_grad.reserve_space_4);
return scope.ToGraphDef(graph);
}
Status CreateSimpleAddN(GraphDef* graph) {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
Output a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output b = ops::RandomUniform(scope.WithOpName("b"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output c = ops::RandomUniform(scope.WithOpName("c"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto add_n = ops::AddN(scope.WithOpName("add_n").WithDevice("/device:GPU:0"),
{a, b, c, conv2d});
auto output = ops::Identity(scope.WithOpName("output"), add_n);
return scope.ToGraphDef(graph);
}
Status CreateSimpleIdentityN(GraphDef* graph) {
Scope scope = Scope::NewRootScope();
auto conv2d_1_input =
ops::RandomUniform(scope.WithOpName("conv2d_1_input"),
{kBatchSize, kDepthIn, kHeight, kWidth}, DT_FLOAT);
auto conv2d_1_filter =
ops::RandomUniform(scope.WithOpName("conv2d_1_filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d_1 =
ops::Conv2D(scope.WithOpName("conv2d_1").WithDevice("/device:GPU:0"),
conv2d_1_input, conv2d_1_filter, {1, 1, 2, 4}, "SAME",
ops::Conv2D::DataFormat(kDstFormat));
auto conv2d_2_input =
ops::RandomUniform(scope.WithOpName("conv2d_2_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto conv2d_2_filter =
ops::RandomUniform(scope.WithOpName("conv2d_2_filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d_2 =
ops::Conv2D(scope.WithOpName("conv2d_2").WithDevice("/device:GPU:0"),
conv2d_2_input, conv2d_2_filter, {1, 2, 4, 1}, "SAME",
ops::Conv2D::DataFormat(kSrcFormat));
Output a = ops::RandomUniform(
scope.WithOpName("a"), {kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
Output b = ops::RandomUniform(scope.WithOpName("b"), {kBatchSize, kDepthIn},
DT_FLOAT);
auto identity_n =
ops::IdentityN(scope.WithOpName("identity_n").WithDevice("/device:GPU:0"),
{conv2d_1, conv2d_2, a, b});
auto conv2d_1_output =
ops::Identity(scope.WithOpName("conv2d_1_output"), identity_n.output[0]);
auto conv2d_2_output =
ops::Identity(scope.WithOpName("conv2d_2_output"), identity_n.output[1]);
auto a_output =
ops::Identity(scope.WithOpName("a_output"), identity_n.output[2]);
auto b_output =
ops::Identity(scope.WithOpName("b_output"), identity_n.output[3]);
return scope.ToGraphDef(graph);
}
class TransposerTest : public ::testing::Test {
protected:
void SetUp() override {
bool gpu_available = GetNumAvailableGPUs() > 0;
if (gpu_available) {
virtual_cluster_ =
std::make_unique<SingleMachine>(10, 1, 1);
} else {
DeviceProperties gpu_device;
gpu_device.set_type(kGPU);
gpu_device.mutable_environment()->insert({"architecture", "6"});
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/GPU:1", gpu_device}}));
}
TF_ASSERT_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_ASSERT_OK(virtual_cluster_->Shutdown()); }
template <typename T>
void ReduceTransposerKeepDims() {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const<T>(scope.WithOpName("axis"), {0, 1, 2}, {3});
auto attrs = ops::Sum::Attrs().KeepDims(true);
auto sum_op = ops::Sum(scope.WithOpName("sum").WithDevice("/device:GPU:0"),
conv2d, axis, attrs);
auto z = ops::Identity(scope.WithOpName("z"), sum_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ReduceTransposer reducer_transposer;
auto* sum = context.graph_view->GetNode("sum");
ASSERT_NE(sum, nullptr);
TF_ASSERT_OK(reducer_transposer.TransposeNode(&context, sum));
auto* input_transpose_node = context.graph_view->GetNode(
"sum-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
auto* updated_sum_node = context.graph_view->GetNode("sum");
ASSERT_NE(updated_sum_node, nullptr);
ASSERT_EQ(updated_sum_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_sum_node, 0,
input_transpose_node->GetName(), 0);
auto* axis_node = context.graph_view->GetNode(
"sum-1-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* output_transpose_node = context.graph_view->GetNode(
"sum-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
template <typename T>
void ReduceTransposerValidAxisNode() {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const<T>(scope.WithOpName("axis"), {0, 1, 2}, {3});
auto sum_op = ops::Max(scope.WithOpName("max").WithDevice("/device:GPU:0"),
conv2d, axis);
auto z = ops::Identity(scope.WithOpName("z"), sum_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ReduceTransposer reducer_transposer;
auto* max = context.graph_view->GetNode("max");
ASSERT_NE(max, nullptr);
TF_ASSERT_OK(reducer_transposer.TransposeNode(&context, max));
auto* input_transpose_node = context.graph_view->GetNode(
"max-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
auto* updated_max_node = context.graph_view->GetNode("max");
ASSERT_NE(updated_max_node, nullptr);
ASSERT_EQ(updated_max_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_max_node, 0,
input_transpose_node->GetName(), 0);
auto* axis_node = context.graph_view->GetNode(
"max-1-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, updated_max_node->GetName(), 0);
}
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(TransposerTest, CreateConstPermNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
constexpr char kNodeName[] = "const_perm_node";
constexpr char kDevice[] = "/device:GPU:0";
utils::MutationNewNode added_node;
EXPECT_FALSE(context.graph_view->HasNode(kNodeName));
TF_ASSERT_OK(transposer.CreateConstPermNode(&context, kNodeName, kDevice,
{0, 3, 1, 2}, "", &added_node));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
utils::MutableNodeView* const_perm_node =
context.graph_view->GetNode(kNodeName);
EXPECT_EQ(const_perm_node->GetName(), kNodeName);
EXPECT_EQ(const_perm_node->GetDevice(), kDevice);
const auto* value_attr = const_perm_node->GetAttr("value");
ASSERT_NE(value_attr, nullptr);
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(value_attr->tensor()));
Tensor expected(DT_INT32, {4});
::tensorflow::test::FillValues<int32>(&expected, {0, 3, 1, 2});
ExpectTensorEqual<int32>(tensor, expected);
}
TensorShapeProto MakeTensorShapeFromDimensions(absl::Span<const int> dims) {
TensorShapeProto shape_proto = TensorShapeProto();
for (const int dim : dims) {
TensorShapeProto_Dim dim_proto = TensorShapeProto_Dim();
dim_proto.set_size(dim);
*shape_proto.add_dim() = std::move(dim_proto);
}
return shape_proto;
}
TEST_F(TransposerTest, CreateTransposeNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
constexpr char kNodeNameFormat[] =
"transpose_node-0-$0-NWCHToNCWH-LayoutOptimizer";
constexpr char kDevice[] = "/device:GPU:0";
TensorShapeProto input_shape = MakeTensorShapeFromDimensions({1, 2, 3, 4});
TensorShapeProto expected_shape = MakeTensorShapeFromDimensions({1, 4, 2, 3});
utils::MutationNewNode added_node;
string transpose_node_name;
TF_ASSERT_OK(transposer.CreateTransposeNode(
&context, kNodeNameFormat, DT_DOUBLE, kDevice, input_shape, {0, 3, 1, 2},
"", &added_node, &transpose_node_name));
EXPECT_EQ(transpose_node_name,
"transpose_node-0-Transpose-NWCHToNCWH-LayoutOptimizer");
utils::Mutation* mutation = context.graph_view->GetMutationBuilder();
Status status;
mutation->AddNode({}, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* transpose_node = context.graph_view->GetNode(transpose_node_name);
ASSERT_NE(transpose_node, nullptr);
EXPECT_EQ(transpose_node->GetDevice(), kDevice);
const auto* output_shapes_attr = transpose_node->GetAttr("_output_shapes");
EXPECT_EQ(output_shapes_attr->list().shape(0).DebugString(),
expected_shape.DebugString());
}
TEST_F(TransposerTest, UpdateNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer transposer;
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
TF_ASSERT_OK(transposer.UpdateNode(&context, conv2d));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(updated_conv2d, nullptr);
VerifyDataFormatAttributeMatch(updated_conv2d, kDstFormat);
}
AttrValue_ListValue MakeAttrValueListValueFromVector(
absl::Span<const int> vec) {
AttrValue_ListValue list_proto = AttrValue_ListValue();
for (const int i : vec) {
list_proto.add_i(i);
}
return list_proto;
}
TEST_F(TransposerTest, UpdateStrides) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, "ABCD", "ACBD");
AttrValue_ListValue expected_original_strides =
MakeAttrValueListValueFromVector({1, 2, 4, 1});
AttrValue_ListValue expected_updated_strides =
MakeAttrValueListValueFromVector({1, 4, 2, 1});
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
const auto& strides_attr = conv2d->GetAttr("strides");
ASSERT_NE(strides_attr, nullptr);
EXPECT_EQ(strides_attr->list().DebugString(),
expected_original_strides.DebugString());
AttrValue data_format_attr;
data_format_attr.set_s("ABCD");
context.graph_view->GetMutationBuilder()->AddOrUpdateNodeAttr(
conv2d, "data_format", data_format_attr);
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
DefaultLayoutSensitiveOpTransposer transposer;
TF_ASSERT_OK(transposer.UpdateNode(&context, conv2d));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
const auto& updated_strides_attr = updated_conv2d->GetAttr("strides");
ASSERT_NE(updated_strides_attr, nullptr);
EXPECT_EQ(updated_strides_attr->list().DebugString(),
expected_updated_strides.DebugString());
}
TEST_F(TransposerTest, UpdateFaninEdgesTranspose) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleFusedBatchNormGrad(&item.graph, true));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
FusedBatchNormGradTransposer transposer;
auto* fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng, nullptr);
const auto& fbng_output_shapes_attr = fbng->GetAttr("_output_shapes");
ASSERT_NE(fbng_output_shapes_attr, nullptr);
const TensorShapeProto& expected_shape = fbng_output_shapes_attr->shape();
TF_ASSERT_OK(
transposer.UpdateFaninEdgesWithOp(&context, {0, 1}, fbng, kOpTranspose));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* transpose_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(transpose_node1, nullptr);
VerifyShapeAttributeMatch(transpose_node1, expected_shape.DebugString());
auto* transpose_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(transpose_node2, nullptr);
VerifyShapeAttributeMatch(transpose_node2, expected_shape.DebugString());
auto* const_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-PermConstNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(const_node1, nullptr);
auto* const_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-PermConstNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(const_node2, nullptr);
auto* y_backprop = context.graph_view->GetNode("y_backprop");
ASSERT_NE(y_backprop, nullptr);
ASSERT_EQ(transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(transpose_node1, 0, y_backprop->GetName(), 0);
VerifyRegularFaninMatch(transpose_node1, 1, const_node1->GetName(), 0);
auto* x = context.graph_view->GetNode("x");
ASSERT_NE(x, nullptr);
ASSERT_EQ(transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(transpose_node2, 0, x->GetName(), 0);
VerifyRegularFaninMatch(transpose_node2, 1, const_node2->GetName(), 0);
auto* updated_fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(updated_fbng, nullptr);
ASSERT_EQ(updated_fbng->NumRegularFanins(), 5);
VerifyRegularFaninMatch(updated_fbng, 0, transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(updated_fbng, 1, transpose_node2->GetName(), 0);
}
TEST_F(TransposerTest, UpdateFanoutEdgesTranspose) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
TensorShapeProto expected_original_shape =
MakeTensorShapeFromDimensions({32, 5, 3, 16});
TensorShapeProto expected_updated_shape =
MakeTensorShapeFromDimensions({32, 16, 5, 3});
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
VerifyShapeAttributeMatch(conv2d, 0, expected_original_shape.DebugString());
TF_ASSERT_OK(
transposer.UpdateFanoutEdgesWithOp(&context, {0}, conv2d, kOpTranspose));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(updated_conv2d, nullptr);
VerifyShapeAttributeMatch(updated_conv2d, 0,
expected_updated_shape.DebugString());
auto* transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(transpose_node, nullptr);
VerifyShapeAttributeMatch(transpose_node, 0,
expected_original_shape.DebugString());
auto* const_node = context.graph_view->GetNode(
"conv2d-0-0-PermConstNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(const_node, nullptr);
ASSERT_EQ(transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMat |
1,391 | cpp | tensorflow/tensorflow | scoped_allocator_optimizer | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_SCOPED_ALLOCATOR_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_SCOPED_ALLOCATOR_OPTIMIZER_H_
#include <atomic>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
class Graph;
namespace grappler {
class GraphProperties;
class NodeMap;
class ScopedAllocatorOptimizer;
class ScopedAllocatorOptimizer : public GraphOptimizer {
public:
ScopedAllocatorOptimizer(RewriterConfig::Toggle opt_level,
const ScopedAllocatorOptions& opts);
~ScopedAllocatorOptimizer() override;
string name() const override { return "scoped_allocator_optimizer"; }
bool UsesFunctionLibrary() const override { return true; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
typedef absl::flat_hash_map<string, std::vector<NodeDef*>> DevOpOccurrences;
typedef absl::flat_hash_map<string, DevOpOccurrences> GraphOpOccurrences;
typedef absl::flat_hash_set<string> OpNameSet;
Status ProcessGraphDef(GraphDef* graph,
const GraphProperties& graph_properties);
void FindOpOccurrences(GraphDef* graph, const OpNameSet& op_names,
GraphOpOccurrences* occs);
int NewScopedAllocatorId(int num_fields);
Status NewIdentityId(int* id);
NodeMap* node_map() { return node_map_.get(); }
const absl::flat_hash_set<string>& repeated_outputs() {
return repeated_outputs_;
}
static void ExtendNodeAttr(StringPiece name, const std::vector<int32>& values,
NodeDef* node_def);
class Rewriter {
public:
virtual ~Rewriter() {}
virtual Status Rewrite(ScopedAllocatorOptimizer* paopti,
int64_t invocation_count, GraphDef* graph,
const string& op_name,
const std::vector<NodeDef*>& nodes,
bool* applied) = 0;
void SetGraphProperties(const GraphProperties& graph_properties) {
graph_properties_ = &graph_properties;
CHECK(graph_properties_);
}
protected:
const GraphProperties* graph_properties_;
};
private:
Rewriter* GetRewriter(const string& op_name);
Status OrderNodeSet(std::vector<NodeDef*>* nodes) const;
RewriterConfig::Toggle opt_level_;
std::unordered_set<string> nodes_to_preserve_;
OpNameSet op_name_set_;
absl::flat_hash_map<string, Rewriter*> rewriters_;
std::vector<Rewriter*> to_delete_;
int next_sa_id_ = 1;
int next_identity_id_ = 1;
std::unique_ptr<NodeMap> node_map_;
absl::flat_hash_set<string> repeated_outputs_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#define LOG_WARNING_AND_RETURN_IF_ERROR(...) \
do { \
const ::tensorflow::Status _status = (__VA_ARGS__); \
if (TF_PREDICT_FALSE(!_status.ok())) { \
LOG(WARNING) << "error: " << _status; \
return _status; \
} \
} while (0)
namespace tensorflow {
namespace grappler {
namespace {
const char kScopedAllocatorAttrName[] = "_scoped_allocator";
bool HasOpName(const string& node_name, const string& op_name) {
size_t begin = node_name.rfind('/');
if (begin == string::npos) {
begin = 0;
} else {
++begin;
}
size_t end = node_name.rfind('_');
if (end != string::npos) {
size_t p = end + 1;
while (p < node_name.size()) {
if (!isdigit(node_name[p])) {
end = node_name.size();
break;
}
++p;
}
} else {
end = node_name.size();
}
return node_name.substr(begin, end - begin) == op_name;
}
Status GetOutputDataType(
const std::vector<OpInfo::TensorProperties>& output_props, int output_index,
DataType* dtype) {
int output_props_size = output_props.size();
if (output_index >= output_props_size) {
return errors::Internal("Invalid output index ", output_index,
" size of output_props ", output_props.size());
}
*dtype = output_props[output_index].dtype();
return absl::OkStatus();
}
Status CheckTypesAndGetShapes(const GraphProperties& graph_properties,
const std::vector<NodeDef*>& ops, DataType* type,
std::vector<TensorShape>* shapes) {
VLOG(1) << "CheckTypesAndGetShapes";
*type = DT_INVALID;
for (NodeDef* n : ops) {
AttrSlice n_attrs = AttrSlice(*n);
DataType dtype;
LOG_WARNING_AND_RETURN_IF_ERROR(GetNodeAttr(n_attrs, "T", &dtype));
VLOG(2) << "op " << n->name() << " has type " << dtype << " shapes.size() "
<< shapes->size();
if (!graph_properties.HasOutputProperties(n->name())) {
LOG(ERROR) << "Node " << n->DebugString() << " lacks output shape.";
return errors::Aborted("Node ", n->name(), " lacks output shape.");
}
const std::vector<OpInfo::TensorProperties>& prop_list =
graph_properties.GetOutputProperties(n->name());
if (prop_list.size() != 1) {
return errors::Aborted("Node ", n->name(),
" does not have exactly one output as expected "
"by ScopedAllocatorOptimizer");
}
const OpInfo::TensorProperties& props = prop_list[0];
if (shapes->empty()) {
*type = props.dtype();
} else if (*type != props.dtype()) {
return errors::Aborted("Group ops don't all have same type");
}
if (*type != dtype) {
return errors::Internal(
"Type mismatch: type in op attr = ", DataTypeString(dtype),
", type in output props = ", DataTypeString(*type));
}
if (!TensorShape::IsValid(props.shape()) || props.shape().unknown_rank()) {
return errors::Aborted("Complete shape not known for ", n->name());
}
VLOG(2) << "Adding shape " << props.shape().DebugString();
shapes->push_back(TensorShape(props.shape()));
}
return absl::OkStatus();
}
struct InputDesc {
NodeDef* from_node_def;
int output_slot;
NodeDef* to_node_def;
InputDesc(NodeDef* f, int os, NodeDef* t)
: from_node_def(f), output_slot(os), to_node_def(t) {}
};
void RemoveNode(NodeDef* nd, GraphDef* graph, NodeMap* node_map) {
node_map->RemoveNode(nd->name());
protobuf::RepeatedPtrField<NodeDef>* nodes = graph->mutable_node();
for (int i = 0; i < nodes->size(); ++i) {
if (nd->name() == (*nodes)[i].name()) {
nodes->SwapElements(i, nodes->size() - 1);
nodes->RemoveLast();
return;
}
}
LOG(FATAL) << "Failed to find node " << nd->name() << " in graph";
}
Status RemoveEdge(const string& input_edge_name, const string& from_node_name,
NodeDef* to_node, NodeMap* node_map) {
protobuf::RepeatedPtrField<string>* inputs = to_node->mutable_input();
int edge_index = -1;
for (edge_index = 0; edge_index < inputs->size(); ++edge_index) {
VLOG(2) << " consider edge " << (*inputs)[edge_index];
if ((*inputs)[edge_index] == input_edge_name) {
break;
}
}
if (edge_index >= inputs->size()) {
return errors::Internal("Could not find input name ", input_edge_name,
" at node ", to_node->name());
}
if (node_map) {
node_map->RemoveOutput(from_node_name, to_node->name());
}
inputs->DeleteSubrange(edge_index, 1);
return absl::OkStatus();
}
Status MaybeRewriteInput(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const DataType& dtype,
NodeDef* input, const string& edge_name,
int output_index, NodeDef* op, NodeDef** new_input,
int* new_output_index, bool* rewrite) {
*rewrite = IsConstant(*input) || IsExit(*input) ||
(sa_opti->repeated_outputs().find(edge_name) !=
sa_opti->repeated_outputs().end());
if (!(*rewrite)) {
*new_input = input;
*new_output_index = output_index;
return absl::OkStatus();
}
int unique_id;
LOG_WARNING_AND_RETURN_IF_ERROR(sa_opti->NewIdentityId(&unique_id));
string identity_name = strings::StrCat("scoped_allocator_identity_",
unique_id, "_", invocation_count);
NodeDefBuilder identity_builder(identity_name, "Identity");
identity_builder.Device(op->device());
identity_builder.Attr("T", dtype);
identity_builder.Input(
NodeDefBuilder::NodeOut(input->name(), output_index, dtype));
NodeDef* identity = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(identity_builder.Finalize(identity));
node_map->AddNode(identity_name, identity);
node_map->AddOutput(input->name(), identity_name);
node_map->UpdateInput(op->name(), input->name(), identity_name);
*op->mutable_input(0) = identity_name;
*new_input = identity;
*new_output_index = 0;
VLOG(1) << "Rewrite input " << edge_name << " op " << op->name()
<< " old output index " << output_index << " with identity "
<< identity_name << " new output index 0";
return absl::OkStatus();
}
Status GetInputs(ScopedAllocatorOptimizer* sa_opti, int64_t invocation_count,
GraphDef* graph, const GraphProperties& graph_properties,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
DataType dtype, std::vector<InputDesc>* inputs) {
VLOG(1) << "Getinputs";
for (NodeDef* n : ops) {
NodeDef* inode = nullptr;
int output_index = 0;
DataType inode_dtype = DT_INVALID;
VLOG(2) << "for node " << n->name();
for (const auto& input_name : n->input()) {
if (!IsControlInput(input_name)) {
if (inode) {
return errors::Internal("Found more than one input for node ",
n->name());
}
ParseNodeName(input_name, &output_index);
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
bool rewrite;
LOG_WARNING_AND_RETURN_IF_ERROR(MaybeRewriteInput(
sa_opti, invocation_count, graph, node_map, dtype, inode,
input_name, output_index, n, &inode, &output_index, &rewrite));
if (rewrite) {
inode_dtype = dtype;
}
VLOG(2) << "inode after rewrite " << inode->DebugString()
<< " output_index " << output_index;
}
}
if (inode == nullptr) {
return errors::Internal("Did not find node");
}
if (inode_dtype == DT_INVALID) {
if (!graph_properties.HasOutputProperties(inode->name())) {
return errors::Internal("Input node ", inode->name(),
" does not have output properties");
}
const auto& inode_output_props =
graph_properties.GetOutputProperties(inode->name());
LOG_WARNING_AND_RETURN_IF_ERROR(
GetOutputDataType(inode_output_props, output_index, &inode_dtype));
}
if (inode_dtype != dtype) {
return errors::Aborted("ScopedAllocatorOptimizer expected input type ",
dtype, " but found ", inode_dtype);
}
inputs->emplace_back(inode, output_index, n);
}
return absl::OkStatus();
}
Status GetDataInputs(GraphDef* graph, NodeMap* node_map, NodeDef* op,
std::vector<InputDesc>* inputs) {
VLOG(2) << "GetDataInputs for node " << op->name();
NodeDef* inode = nullptr;
int output_index = 0;
for (const auto& input_name : op->input()) {
if (IsControlInput(input_name)) {
continue;
}
ParseNodeName(input_name, &output_index);
inode = nullptr;
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
inputs->emplace_back(inode, output_index, op);
}
return absl::OkStatus();
}
void DumpGraphToVLOG(const GraphDef& graph, int log_level) {
if (VLOG_IS_ON(log_level)) {
for (const auto& line : str_util::Split(graph.DebugString(), "\n\r")) {
VLOG(log_level) << line;
}
}
}
}
void ScopedAllocatorOptimizer::ExtendNodeAttr(StringPiece name,
const std::vector<int32>& values,
NodeDef* node_def) {
if (HasNodeAttr(*node_def, name)) {
VLOG(2) << "extending";
AttrValue* existing = &(*node_def->mutable_attr())[string(name)];
for (int32_t i : values) {
existing->mutable_list()->add_i(i);
}
} else {
VLOG(2) << "setting new attr value";
AddNodeAttr(name, values, node_def);
}
}
class UnaryElementwiseRewriter : public ScopedAllocatorOptimizer::Rewriter {
public:
~UnaryElementwiseRewriter() override {}
Status CheckUsesAllocatorAttributes(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (IsConstant(*nd.from_node_def)) {
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(),
" is a Const op which does not use AllocatorAttributes");
}
}
return absl::OkStatus();
}
Status CheckExistingScopedAllocator(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
VLOG(2) << "get attrs for " << nd.from_node_def->name();
AttrSlice n_attrs = AttrSlice(*nd.from_node_def);
std::vector<int32> scope_ids;
Status ss = GetNodeAttr(n_attrs, kScopedAllocatorAttrName, &scope_ids);
if (ss.ok() && scope_ids[0] == nd.output_slot) {
LOG(INFO) << "Abandoning ScopedAllocatorOptimizer because input "
<< nd.from_node_def->name() << " output " << scope_ids[0]
<< " is already assigned to scope_id " << scope_ids[1];
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(), " output ", scope_ids[0], " is already ",
"assigned to scope_id ", scope_ids[1]);
}
}
return absl::OkStatus();
}
Status CheckInternalDataDependency(const std::set<string>& op_set,
const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (op_set.find(nd.from_node_def->name()) != op_set.end()) {
if (nd.output_slot != tensorflow::Graph::kControlSlot) {
return errors::Aborted("Data edge exists between ",
nd.from_node_def->name(),
" and another "
"node in the set");
}
}
}
return absl::OkStatus();
}
void ClearInternalControlInputs(const std::set<string>& op_set,
const std::vector<NodeDef*>& ops,
NodeMap* node_map) {
for (NodeDef* n : ops) {
for (const auto& input_name : n->input()) {
if (IsControlInput(input_name)) {
int position = 0;
string input_node_name = ParseNodeName(input_name, &position);
CHECK_EQ(position, -1);
if (op_set.find(input_node_name) != op_set.end()) {
VLOG(1) << "Remove control output from " << input_node_name
<< " via edge " << input_name << " to " << n->name();
TF_CHECK_OK(RemoveEdge(input_name, input_node_name, n, node_map));
}
}
}
}
}
Status AnalyzeInputs(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
string* device_name, DataType* dtype,
std::vector<TensorShape>* input_shapes,
std::vector<InputDesc>* inputs, TensorShape* sa_shape) {
CHECK(graph_properties_);
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckTypesAndGetShapes(*graph_properties_, ops, dtype, input_shapes));
LOG_WARNING_AND_RETURN_IF_ERROR(
GetInputs(sa_opti, invocation_count, graph, *graph_properties_,
sa_opti->node_map(), ops, *dtype, inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckUsesAllocatorAttributes(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckExistingScopedAllocator(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckInternalDataDependency(op_instance_names, *inputs));
ClearInternalControlInputs(op_instance_names, ops, node_map);
*device_name = ops[0]->device();
CHECK(!device_name->empty());
CHECK(!input_shapes->empty());
CHECK_EQ(0, Allocator::kAllocatorAlignment % DataTypeSize(*dtype))
<< "ScopedAllocatorOptimizer only applies to types that evenly "
<< "divide kAllocatorAlignment";
std::vector<ScopedAllocator::Field> sa_fields;
int64_t num_bytes = ScopedAllocatorMgr::PopulateFields(
0 , *input_shapes, *dtype, &sa_fields);
int64_t num_elts = num_bytes / DataTypeSize(*dtype);
VLOG(2) << "num_bytes " << num_bytes << " num_elts=" << num_elts;
*sa_shape = TensorShape({num_elts});
return absl::OkStatus();
}
Status TransitiveFanoutWithinFrame(
GraphDef* graph, NodeMap* node_map,
const std::vector<const NodeDef*>& source_nodes,
absl::flat_hash_set<const NodeDef*>* fanout) {
std::deque<const NodeDef*> queue(source_nodes.begin(), source_nodes.end());
absl::flat_hash_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.front();
queue.pop_front();
if (!visited.insert(node).second) {
continue;
}
fanout->insert(node);
for (const NodeDef* output : node_map->GetOutputs(node->name())) {
if (!ModifiesFrameInfo(*output)) {
queue.push_back(output);
}
VLOG(2) << "TransitiveFanout parent: " << node->name()
<< " child: " << output->name() << " of type " << output->op();
}
}
return absl::OkStatus();
}
Status ConstructScopedAllocatorNode(
ScopedAllocatorOptimizer* sa_opti, GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops, const string& device_name,
DataType dtype, int sa_id, const string& sa_name,
const std::vector<TensorShape>& input_shapes,
const std::vector<InputDesc>& inputs, const TensorShape& sa_shape) {
VLOG(2) << "ConstructScopedAllocatorNode " << sa_name;
NodeDefBuilder sa_builder(sa_name, "_ScopedAllocator");
sa_builder.Device(device_name);
sa_builder.Attr("sa_name", sa_name);
sa_builder.Attr("T", dtype);
sa_builder.Attr("id", sa_id);
sa_builder.Attr("shapes", input_shapes);
sa_builder.Attr("shape", sa_shape);
sa_builder.Attr("expected_call_count", static_cast<int64_t>(ops.size()));
NodeDef* sa_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sa_builder.Finalize(sa_node));
node_map->AddNode(sa_name, sa_node);
std::vector<const NodeDef*> fanout_sources;
fanout_sources.reserve(inputs.size());
for (const auto& input : inputs) {
fanout_sources.push_back(input.from_node_def);
}
absl::flat_hash_set<const NodeDef*> fanout;
TF_RETURN_IF_ERROR(
TransitiveFanoutWithinFrame(graph, node_map, fanout_sources, &fanout));
for (int i = 0, end = inputs.size(); i < end; ++i) {
auto& nd = inputs[i];
if (IsArg(*nd.from_node_def)) {
return errors::Aborted(
"ScopedAllocatorOptimizer does not work well when the op inputs "
"are _Arg ops; skipping this optimizer for this function");
}
VLOG(2) << "To input " << i << ": " << nd.from_node_def->name()
<< " add control input "
<< "^" << sa_name;
nd.from_node_def->add_input(strings::StrCat("^", sa_name));
ScopedAllocatorOptimizer::ExtendNodeAttr(kScopedAllocatorAttrName,
{nd.output_slot, sa_id + 1 + i},
nd.from_node_def);
node_map->AddOutput(sa_name, nd.from_node_def->name());
}
bool added_delay_edge = false;
for (auto& nd : inputs) {
std::vector<InputDesc> inputs_to_first;
LOG_WARNING_AND_RETURN_IF_ERROR(GetDataInputs(
graph, sa_opti->node_map(), nd.from_node_def, &inputs_to_first));
for (int i = 0, end = inputs_to_first.size(); i < end; ++i) {
if (fanout.find(inputs_to_first[i].from_node_def) != fanout.end()) {
VLOG(2) << "Found node " << inputs_to_first[i].from_node_def->name()
<< " in the fanout of " << sa_name;
continue;
}
sa_node->add_input(
strings::StrCat("^", inputs_to_first[i].from_node_def->name()));
node_map->AddOutput(inputs_to_first[i].from_node_def->name(), sa_name);
added_delay_edge = true;
VLOG(2) << "Adding control dependency from "
<< inputs_to_first[i].from_node_def->name() << " to "
<< sa_node->name();
break;
}
if (added_delay_edge) {
break;
}
}
if (!added_delay_edge) {
LOG(WARNING) << "Found no node from which a control edge can be added to "
"scoped allocator node. If you run into issues with "
"graphs that contain control flow, turn off the "
"ScopedAllocatorOptimizer and file a bug.";
}
return absl::OkStatus();
}
Status BuildSAConcatNode(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
const string& device_name, DataType dtype, int sa_id,
const string& sa_name, const string& sac_name,
const TensorShape& sa_shape,
std::vector<NodeDefBuilder::NodeOut>* sac_inputs) {
VLOG(2) << "BuildSAConcatNode " << sac_name;
absl::flat_hash_map<string, string> sac_ctl_inputs;
for (int i = 0, end = ops.size(); i < end; ++i) {
NodeDef* old_op = ops[i];
for (const string& old_op_input : old_op->input()) {
int position = 0;
string input_name = ParseNodeName(old_op_input, &position);
if (position == -1) {
if (op_instance_names.find(old_op_input) == op_instance_names.end()) {
sac_ctl_inputs.emplace(old_op_input, input_name);
}
} else {
if (op_instance_names.find(old_op_input) != op_instance_names.end()) {
LOG(ERROR) << "Data edge between " << old_op_input << " and "
<< old_op->name() << " cannot build ScopedAllocator.";
return errors::Aborted("Data edge between ", old_op_input, " and ",
old_op->name(),
" cannot build ScopedAllocator.");
}
sac_inputs->push_back(
NodeDefBuilder::NodeOut(old_op_input, 0, dtype));
}
VLOG(3) << "from op " << i << ": " << old_op->name()
<< " sac_inputs append " << old_op_input;
}
}
NodeDefBuilder sac_builder(sac_name, "_ScopedAllocatorConcat");
VLOG(2) << "New sac_name " << sac_name << " shape "
<< sa_shape.DebugString();
sac_builder.Device(device_name);
sac_builder.Attr("sa_name", sa_name);
sac_builder.Attr("id", sa_id);
sac_builder.Attr("T", dtype);
sac_builder.Attr("shape", sa_shape);
sac_builder.Attr("N", static_cast<int>(sac_inputs->size()));
sac_builder.Input(NodeDefBuilder::NodeOut(sa_name, 0, dtype));
sac_builder.Input(*sac_inputs);
NodeDef* sac_node = graph-> | #include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include <unordered_set>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
class ScopedAllocatorOptimizerTest : public ::testing::Test {
public:
std::unique_ptr<Session> CreateSession(const GraphDef& graph,
const ConfigProto& config) {
SessionOptions options;
options.config = config;
(*options.config.mutable_device_count())["CPU"] = 2;
Session* session = NewSession(options);
TF_CHECK_OK(session->Create(graph));
return std::unique_ptr<Session>(session);
}
std::vector<Tensor> EvaluateNodes(const GraphDef& graph,
const std::vector<string>& fetch) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph));
RunOptions run_options;
std::vector<Tensor> output_tensors;
TF_CHECK_OK(
session->Run(run_options, {}, fetch, fetch, &output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
void BuildAbsGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a =
ops::Const<float>(s.WithOpName("a"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b =
ops::Const<float>(s.WithOpName("b"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c =
ops::Const<float>(s.WithOpName("c"), {-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(s.WithOpName("s1"), a, b);
Output s2 = ops::Add(s.WithOpName("s2"), b, c);
Output int1, int2;
if (forward) {
int1 = ops::Identity(s.WithOpName("i1"), s1);
int2 = ops::Identity(s.WithOpName("i2"), s2);
} else {
int1 = s1;
int2 = s2;
}
Output a1 = ops::Abs(s.WithOpName("a1"), int1);
Output a2 = ops::Abs(s.WithOpName("a2"), int2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputDependencies(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output c = ops::Placeholder(s.WithOpName("c"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output s1 = ops::Add(s.WithOpName("s1"), b, c);
Output a1 = ops::Abs(s.WithOpName("a1"), a);
Output a2 = ops::Abs(s.WithOpName("a2"), b);
Output a3 = ops::Abs(s.WithOpName("a3"), s1);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
Output r3 = ops::Reshape(s.WithOpName("r3"), a3, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputAndOutputControlEdges(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl1 = ops::Placeholder(s.WithOpName("ctl1"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl2 = ops::Placeholder(s.WithOpName("ctl2"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output a1 = ops::Abs(s.WithOpName("a1").WithControlDependencies({ctl1}), a);
Output a2 = ops::Abs(s.WithOpName("a2").WithControlDependencies({ctl2}), b);
Output o1 = ops::Reshape(s.WithOpName("o1"), a1, {1, 4});
Output o2 = ops::Reshape(s.WithOpName("o2"), a2, {4, 1});
Output ctl3 =
ops::Const<float>(s.WithOpName("ctl3").WithControlDependencies({a1}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
Output ctl4 =
ops::Const<float>(s.WithOpName("ctl4").WithControlDependencies({a2}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildGraphWithMultipleScopes(GraphDef* graph_def) {
Scope root_scope = Scope::NewRootScope();
root_scope =
root_scope.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const<float>(root_scope.WithOpName("a"),
{1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b = ops::Const<float>(root_scope.WithOpName("b"),
{1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c = ops::Const<float>(root_scope.WithOpName("c"),
{-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(root_scope.WithOpName("s1"), a, b);
Output s2 = ops::Add(root_scope.WithOpName("s2"), b, c);
Output a1 = ops::Abs(root_scope.WithOpName("a1"), s1);
Output a2 = ops::Abs(root_scope.WithOpName("a2"), s2);
Output r1 = ops::Reshape(root_scope.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(root_scope.WithOpName("r2"), a2, {4, 1});
Scope sub_scope = root_scope.NewSubScope("sub");
Output s3 = ops::Add(sub_scope.WithOpName("s3"), a, b);
Output a3 = ops::Abs(sub_scope.WithOpName("a3"), s3);
Output a4 = ops::Abs(sub_scope.WithOpName("a4"), s2);
Output r3 = ops::Reshape(sub_scope.WithOpName("r3"), a3, {1, 4});
Output r4 = ops::Reshape(sub_scope.WithOpName("r4"), a4, {4, 1});
TF_CHECK_OK(root_scope.ToGraphDef(graph_def));
}
void BuildConstGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output c1 =
ops::Const<float>(s.WithOpName("c1"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output c2 =
ops::Const<float>(s.WithOpName("c2"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output a1 = ops::Abs(s.WithOpName("a1"), c1);
Output a2 = ops::Abs(s.WithOpName("a2"), c2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void SetShapes(GraphDef* graph_def) {
TensorShapeProto shape_proto;
shape_proto.add_dim()->set_size(2);
shape_proto.add_dim()->set_size(2);
for (NodeDef& n : *graph_def->mutable_node()) {
if (n.op() == "Add" || n.op() == "Abs") {
AddNodeAttr("_output_shapes", {shape_proto}, &n);
}
}
}
void ExecuteGraph(const GraphDef& graph_def,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
ConfigProto config;
GraphOptions* gopt = config.mutable_graph_options();
OptimizerOptions* opts = gopt->mutable_optimizer_options();
opts->set_do_common_subexpression_elimination(false);
opts->set_do_constant_folding(false);
opts->set_do_function_inlining(false);
opts->set_opt_level(OptimizerOptions::L0);
RewriterConfig* rwcfg = gopt->mutable_rewrite_options();
rwcfg->clear_optimizers();
(*rwcfg->add_optimizers()) = "scoped_allocator";
rwcfg->mutable_scoped_allocator_opts()->add_enable_op("Abs");
std::unique_ptr<Session> session(CreateSession(graph_def, config));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> target_nodes = {};
Status s = session->Run(inputs, output_names, target_nodes, outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(outputs->size(), output_names.size());
}
void ValidateValues(const std::vector<Tensor>& outputs,
const std::vector<std::vector<float>>& expected) {
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(expected[i].size(), outputs[i].NumElements());
for (int j = 0; j < expected[i].size(); ++j) {
EXPECT_EQ(expected[i][j], outputs[i].flat<float>()(j));
}
}
}
void GetNode(NodeMap* node_map, const string& node_name, NodeDef** node_def) {
*node_def = node_map->GetNode(node_name);
ASSERT_TRUE(*node_def);
}
NodeDef* ValidateSAControlInput(GraphDef* graph, NodeMap* node_map,
const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
string control_input_name;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
control_input_name = input;
}
}
EXPECT_EQ(num_control_inputs, 1);
NodeDef* control_input_node = nullptr;
GetNode(node_map, control_input_name, &control_input_node);
EXPECT_EQ(control_input_node->op(), "_ScopedAllocator");
return control_input_node;
}
int NumControlInputs(NodeMap* node_map, const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
}
}
return num_control_inputs;
}
};
#ifndef ENABLE_MKL
TEST_F(ScopedAllocatorOptimizerTest, UnaryRewriteOnly) {
GrapplerItem item;
BuildAbsGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* nd = nullptr;
GetNode(&node_map, "scoped_allocator_1_1", &nd);
{
auto& nd_set = node_map.GetOutputs(nd->name());
ASSERT_EQ(3, nd_set.size());
std::unordered_set<string> expected = {"scoped_allocator_concat_1_1", "s1",
"s2"};
for (auto it : nd_set) {
ASSERT_NE(expected.find(it->name()), expected.end())
<< "Failed to find " << it->name();
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_concat_1_1");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_1_1_Abs", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_1_1_Abs");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_split_1_1", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_split_1_1");
ASSERT_EQ(2, nd_set.size());
std::unordered_set<string> name_set;
for (auto it : nd_set) {
name_set.insert(it->name());
}
ASSERT_TRUE(name_set.find("r1") != name_set.end());
ASSERT_TRUE(name_set.find("r2") != name_set.end());
}
}
TEST_F(ScopedAllocatorOptimizerTest, UnaryExecute) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, false);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, MultipleScopes) {
GraphDef graph_def;
BuildGraphWithMultipleScopes(&graph_def);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0", "sub/r3:0", "sub/r4:0"},
&outputs);
ValidateValues(
outputs,
{{2, 2, 3, 3}, {4, 4, 3, 2}, {2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, Extend) {
NodeDef nd;
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {0, 2}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {6, 7}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {2, 3}, &nd);
VLOG(0) << "nd: " << nd.DebugString();
std::vector<int> scoped_allocator_attrs;
AttrSlice slice(nd);
Status sa_status =
GetNodeAttr(slice, "_scoped_allocator", &scoped_allocator_attrs);
for (int i : scoped_allocator_attrs) {
VLOG(0) << "extracted: " << i;
}
NodeDef nd2;
AddNodeAttr("_scoped_allocator", {0, 2}, &nd2);
AddNodeAttr("_scoped_allocator", {6, 7}, &nd2);
AddNodeAttr("_scoped_allocator", {2, 3}, &nd2);
VLOG(0) << "nd2: " << nd2.DebugString();
}
TEST_F(ScopedAllocatorOptimizerTest, ForwardInputToOutput) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, true);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def, {"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, InputDependencies) {
GrapplerItem item;
BuildAbsGraphWithInputDependencies(&item.graph);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Add");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* scoped_allocator_node =
ValidateSAControlInput(&optimized_graph, &node_map, "a");
VLOG(1) << scoped_allocator_node->DebugString();
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "b"));
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "s1"));
EXPECT_EQ(scoped_allocator_node->input_size(), 1);
EXPECT_EQ(scoped_allocator_node->input(0), "^c");
}
TEST_F(ScopedAllocatorOptimizerTest, ControlEdgeRewire) {
GrapplerItem item;
BuildAbsGraphWithInputAndOutputControlEdges(&item.graph);
SetShapes(&item.graph);
LOG(INFO) << item.graph.DebugString();
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Const");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
TF_ASSERT_OK(TopologicalSort(&optimized_graph));
NodeMap node_map(&optimized_graph);
LOG(INFO) << optimized_graph.DebugString();
NodeDef* ctl1 = nullptr;
GetNode(&node_map, "ctl1", &ctl1);
const auto& ctl1_outputs = node_map.GetOutputs("ctl1");
EXPECT_EQ(ctl1_outputs.size(), 1);
NodeDef* sa_concat = *ctl1_outputs.begin();
EXPECT_EQ(sa_concat->op(), "_ScopedAllocatorConcat");
NodeDef* ctl2 = nullptr;
GetNode(&node_map, "ctl2", &ctl2);
const auto& ctl2_outputs = node_map.GetOutputs("ctl2");
EXPECT_EQ(ctl2_outputs.size(), 1);
EXPECT_EQ(*ctl2_outputs.begin(), sa_concat);
EXPECT_EQ(NumControlInputs(&node_map, sa_concat->name()), 2);
const auto& sa_concat_outputs = node_map.GetOutputs(sa_concat->name());
EXPECT_EQ(sa_concat_outputs.size(), 1);
NodeDef* fused_abs = *sa_concat_outputs.begin();
EXPECT_EQ(NumControlInputs(&node_map, fused_abs->name()), 0);
const auto& fused_abs_outputs = node_map.GetOutputs(fused_abs->name());
EXPECT_EQ(fused_abs_outputs.size(), 1);
NodeDef* sa_split = *fused_abs_outputs.begin();
EXPECT_EQ(NumControlOutputs(*sa_split, node_map), 2);
EXPECT_EQ(NumControlInputs(&node_map, "ctl3"), 1);
EXPECT_EQ(NumControlInputs(&node_map, "ctl4"), 1);
}
TEST_F(ScopedAllocatorOptimizerTest, ConstInput) {
GrapplerItem item;
BuildConstGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
const NodeDef* sa_node = nullptr;
for (const NodeDef& node : optimized_graph.node()) {
if (node.op() == "_ScopedAllocator") {
sa_node = &node;
break;
}
}
ASSERT_NE(sa_node, nullptr);
int num_identity_ops = 0;
NodeMap node_map(&optimized_graph);
for (NodeDef* sa_output : node_map.GetOutputs(sa_node->name())) {
EXPECT_FALSE(IsConstant(*sa_output));
if (IsIdentity(*sa_output)) {
++num_identity_ops;
}
}
EXPECT_EQ(num_identity_ops, 2);
}
#endif
}
}
} |
1,392 | cpp | tensorflow/tensorflow | auto_mixed_precision | tensorflow/core/grappler/optimizers/auto_mixed_precision.cc | tensorflow/core/grappler/optimizers/auto_mixed_precision_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_AUTO_MIXED_PRECISION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_AUTO_MIXED_PRECISION_H_
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
enum class AutoMixedPrecisionMode { CUDA, BF16, CPU, FP16_CPU };
class AutoMixedPrecision : public GraphOptimizer {
public:
explicit AutoMixedPrecision(
AutoMixedPrecisionMode mode = AutoMixedPrecisionMode::CUDA)
: mode_(mode) {}
~AutoMixedPrecision() override {}
string name() const override {
switch (mode_) {
case AutoMixedPrecisionMode::CUDA:
return "auto_mixed_precision";
case AutoMixedPrecisionMode::BF16:
return "auto_mixed_precision_onednn_bfloat16";
case AutoMixedPrecisionMode::CPU:
return "auto_mixed_precision_cpu";
case AutoMixedPrecisionMode::FP16_CPU:
return "auto_mixed_precision_onednn_float16";
default:
LOG(FATAL) << "Invalid value for AutoMixedPrecisionMode: "
<< static_cast<int>(mode_);
}
};
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
private:
const AutoMixedPrecisionMode mode_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision_lists.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
bool ShouldSimulateGpu() {
bool is_enabled = [] {
bool ret = false;
string var;
TF_CHECK_OK(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "", &var));
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU",
false, &ret));
return ret;
}();
return is_enabled;
}
#if GOOGLE_CUDA
const std::pair<int, int> kMinGPUArch = {7, 0};
#else
const std::pair<int, int> kMinGPUArch = {0, 0};
#endif
const char kSuffix[] = "AutoMixedPrecision";
const char kCastToFp16[] = "CastToFp16";
const char kCastToBf16[] = "CastToBf16";
const char kCastToFp32[] = "CastToFp32";
#if GOOGLE_CUDA
std::pair<int, int> GetDeviceGPUArch(
const DeviceProperties& device_properties) {
if (device_properties.type() != "GPU") return {0, 0};
string arch_str = device_properties.environment().at("architecture");
std::vector<string> split_arch_str = str_util::Split(arch_str, '.');
if (split_arch_str.empty()) {
return {0, 0};
}
int major, minor;
if (!strings::safe_strto32(split_arch_str[0], &major)) {
return {0, 0};
}
if (split_arch_str.size() > 1) {
if (strings::safe_strto32(split_arch_str[1], &minor)) {
return {major, minor};
} else {
return {0, 0};
}
} else {
return {major, 0};
}
}
#endif
bool HasFastFP16Support(const DeviceProperties& props) {
#if GOOGLE_CUDA
return GetDeviceGPUArch(props) >= kMinGPUArch;
#elif TENSORFLOW_USE_ROCM
absl::flat_hash_set<std::string> FP16SupportedDevices = {{"gfx906"},
{"gfx908"}};
std::string gcnArchName = props.environment().at("architecture");
std::vector<std::string> gpu_arch = absl::StrSplit(gcnArchName, ":");
return !gpu_arch.empty() && FP16SupportedDevices.contains(gpu_arch[0]);
#endif
return ShouldSimulateGpu();
}
struct TypeAttrId {
static constexpr int kSingleType = -1;
explicit TypeAttrId(const string& _attr_name, int _type_index = kSingleType)
: attr_name(_attr_name),
type_index(_type_index),
fixed_type(DT_INVALID) {}
explicit TypeAttrId(DataType _fixed_type)
: attr_name(), type_index(kSingleType), fixed_type(_fixed_type) {}
bool operator==(const TypeAttrId& other) const {
return attr_name == other.attr_name && type_index == other.type_index &&
fixed_type == other.fixed_type;
}
bool operator<(const TypeAttrId& other) const {
return std::make_tuple(attr_name, type_index, fixed_type) <
std::make_tuple(other.attr_name, other.type_index, other.fixed_type);
}
template <typename H>
friend H AbslHashValue(H h, const TypeAttrId& ta) {
return H::combine(std::move(h), ta.attr_name, ta.type_index, ta.fixed_type);
}
string DebugString() const {
if (!attr_name.empty()) {
if (type_index == kSingleType) {
return attr_name;
} else {
return strings::StrCat(attr_name, "[", type_index, "]");
}
} else {
return tensorflow::DataTypeString(fixed_type);
}
}
string attr_name;
int type_index;
DataType fixed_type;
};
DataType GetDataType(const NodeDef& node, const TypeAttrId& type_attr) {
if (type_attr.attr_name.empty()) {
return type_attr.fixed_type;
}
if (!node.attr().count(type_attr.attr_name)) {
return DT_INVALID;
}
const AttrValue& attr_value = node.attr().at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
return attr_value.type();
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return DT_INVALID;
}
return attr_value.list().type(type_attr.type_index);
}
}
bool SetDataType(NodeDef* node, const TypeAttrId& type_attr, DataType type) {
if (type_attr.attr_name.empty() || !node->attr().count(type_attr.attr_name)) {
return false;
}
AttrValue& attr_value = node->mutable_attr()->at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
attr_value.set_type(type);
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return false;
}
attr_value.mutable_list()->set_type(type_attr.type_index, type);
}
return true;
}
std::vector<std::pair<int, int>> ArgDefIndexes(const NodeDef& node, int arg_idx,
const OpDef::ArgDef& arg_def) {
std::vector<std::pair<int, int>> argdef_inds;
if (!arg_def.type_list_attr().empty()) {
int num_types = node.attr().at(arg_def.type_list_attr()).list().type_size();
for (int type_idx = 0; type_idx < num_types; ++type_idx) {
argdef_inds.push_back({arg_idx, type_idx});
}
} else {
int num_repeat = 1;
if (node.attr().count(arg_def.number_attr())) {
num_repeat = node.attr().at(arg_def.number_attr()).i();
}
argdef_inds.insert(argdef_inds.end(), num_repeat, {arg_idx, -1});
}
return argdef_inds;
}
std::vector<std::pair<int, int>> InputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.input_arg_size());
for (int arg_idx = 0; arg_idx < op_def.input_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
std::vector<std::pair<int, int>> OutputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.output_arg_size());
for (int arg_idx = 0; arg_idx < op_def.output_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
TypeAttrId GetTypeAttrId(const OpDef::ArgDef& arg_def, int arg_type_index) {
if (!arg_def.type_list_attr().empty()) {
return TypeAttrId(arg_def.type_list_attr(), arg_type_index);
} else if (!arg_def.type_attr().empty()) {
return TypeAttrId(arg_def.type_attr());
} else {
return TypeAttrId(arg_def.type());
}
}
std::vector<int> NonControlInputs(const NodeDef& node) {
std::vector<int> pos;
for (int i = 0; i < node.input_size(); i++) {
if (!IsControlInput(node.input(i))) {
pos.push_back(i);
}
}
return pos;
}
class NodeTypeAttrMap {
public:
NodeTypeAttrMap() {}
explicit NodeTypeAttrMap(const GraphDef& graph) { TF_CHECK_OK(Init(graph)); }
Status Init(const GraphDef& graph) {
if (graph_ != nullptr) {
return errors::InvalidArgument("NodeTypeAttrMap is already initialized.");
}
graph_ = &graph;
function_library_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), graph.library()));
for (const NodeDef& node : graph.node()) {
TF_RETURN_IF_ERROR(AddNode(node));
}
return absl::OkStatus();
}
bool is_initialized() const { return graph_ != nullptr; }
absl::flat_hash_set<TypeAttrId> GetTypeAttrs(const NodeDef& node) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
absl::flat_hash_set<TypeAttrId> type_attrs;
const auto iter = type2io_.find(&node);
CHECK(iter != type2io_.end());
for (const auto& key_value : iter->second) {
type_attrs.insert(key_value.first);
}
return type_attrs;
}
const absl::flat_hash_set<int>& GetInputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).first;
}
const absl::flat_hash_set<int>& GetOutputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).second;
}
TypeAttrId GetInputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
const auto iter = io2type_.find(&node);
DCHECK(iter != io2type_.end())
<< "Node " << node.name() << " doesn't exist in a graph";
auto type_vec = io2type_.at(&node).first;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
TypeAttrId GetOutputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
auto type_vec = io2type_.at(&node).second;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
private:
Status AddNode(const NodeDef& node) {
const OpDef* op_def_ptr = nullptr;
TF_RETURN_IF_ERROR(function_library_->LookUpOpDef(node.op(), &op_def_ptr));
const OpDef& op_def = *op_def_ptr;
auto& type2io_entry = type2io_[&node];
auto& io2type_entry = io2type_[&node];
auto input_arg_inds = InputPortArgDefIndexes(node, op_def);
if (NonControlInputs(node).size() != input_arg_inds.size()) {
return errors::InvalidArgument(
"Expected ", node.op(), " node ", node.name(), " to have ",
input_arg_inds.size(), " non-control input(s), but got ",
node.input_size());
}
io2type_entry.first.reserve(input_arg_inds.size());
for (int i = 0; i < static_cast<int>(input_arg_inds.size()); ++i) {
const auto& arg_inds = input_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].first.insert(i);
io2type_entry.first.push_back(type_attr);
}
auto output_arg_inds = OutputPortArgDefIndexes(node, op_def);
io2type_entry.second.reserve(output_arg_inds.size());
for (int i = 0; i < static_cast<int>(output_arg_inds.size()); ++i) {
const auto& arg_inds = output_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].second.insert(i);
io2type_entry.second.push_back(type_attr);
}
for (const auto& attr : node.attr()) {
const string& attr_name = attr.first;
if (!attr_name.empty() && attr_name[0] == '_') continue;
const AttrValue& attr_value = attr.second;
const OpDef::AttrDef* attr_def = FindAttr(attr_name, op_def);
if (!attr_def) {
return errors::InvalidArgument("AttrDef not found for attribute ",
attr_name, " of node ", node.name());
}
if (attr_def->type() == "type") {
type2io_entry[TypeAttrId(attr_name)];
} else if (attr_def->type() == "list(type)") {
for (int i = 0; i < attr_value.list().type_size(); ++i) {
type2io_entry[TypeAttrId(attr_name, i)];
}
}
}
return absl::OkStatus();
}
const GraphDef* graph_ = nullptr;
std::unique_ptr<FunctionLibraryDefinition> function_library_;
typedef absl::flat_hash_set<int> IntSet;
typedef absl::flat_hash_map<TypeAttrId, std::pair<IntSet, IntSet>> Type2IOMap;
absl::flat_hash_map<const NodeDef*, Type2IOMap> type2io_;
typedef std::vector<TypeAttrId> TypeAttrIdVec;
absl::flat_hash_map<const NodeDef*, std::pair<TypeAttrIdVec, TypeAttrIdVec>>
io2type_;
};
struct NodeTypeId {
NodeTypeId(const NodeDef* _node, const TypeAttrId& _type_attr)
: node(_node), type_attr(_type_attr) {}
const NodeDef* node;
TypeAttrId type_attr;
bool operator==(const NodeTypeId& other) const {
return node == other.node && type_attr == other.type_attr;
}
template <typename H>
friend H AbslHashValue(H h, const NodeTypeId& nt) {
return H::combine(std::move(h), nt.node, nt.type_attr);
}
};
struct NodeTypeIdEdge {
NodeTypeIdEdge(const NodeTypeId& _src, const NodeTypeId& _dst)
: src(_src), dst(_dst) {}
NodeTypeId src;
NodeTypeId dst;
};
class GraphTypeTopologyView {
public:
GraphTypeTopologyView() = default;
explicit GraphTypeTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
Status InitializeFromGraph(const GraphDef& graph,
const NodeTypeAttrMap& node_type_map);
Status AddEphemeralEdges(absl::Span<const NodeTypeIdEdge> ephemeral_edges);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
bool HasNode(absl::string_view node_name, const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(int node_idx) const;
const absl::optional<int> GetNodeIndex(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const absl::optional<int> GetNodeIndex(const NodeTypeId& node) const;
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
struct NodeTypeKey : public std::pair<absl::string_view, TypeAttrId> {
typedef std::pair<absl::string_view, TypeAttrId> Base;
using Base::pair;
template <typename H>
friend H AbslHashValue(H h, const NodeTypeKey& nt) {
return H::combine(std::move(h), nt.first, nt.second);
}
};
bool skip_invalid_edges_ = false;
const GraphDef* graph_ = nullptr;
int num_nodes_ = 0;
std::vector<NodeTypeId> node_type_attrs_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
absl::flat_hash_map<NodeTypeKey, int> node_type_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_;
std::vector<absl::InlinedVector<int, 2>> fanouts_;
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
Status GraphTypeTopologyView::InitializeFromGraph(
const GraphDef& graph, const NodeTypeAttrMap& node_type_map) {
if (graph_ != nullptr) {
return errors::InvalidArgument(
"GraphTypeTopologyView is already initialized.");
}
graph_ = &graph;
int num_nodedefs = graph.node_size();
node_name_to_index_.rehash(num_nodedefs);
node_type_attrs_.reserve(num_nodedefs);
node_type_name_to_index_.rehash(num_nodedefs);
for (int node_idx = 0; node_idx < num_nodedefs; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
for (const TypeAttrId& type_attr : node_type_map.GetTypeAttrs(node)) {
int node_type_idx = node_type_attrs_.size();
node_type_name_to_index_.emplace(NodeTypeKey(node.name(), type_attr),
node_type_idx);
node_type_attrs_.emplace_back(&node, type_attr);
}
}
num_nodes_ = node_type_attrs_.size();
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
const NodeTypeId& node_type = node_type_attrs_.at(node_type_idx);
auto input_ports =
node_type_map.GetInputPorts(*node_type.node, node_type.type_attr);
fanins_[node_type_idx].reserve(input_ports.size());
for (int port : input_ports) {
const string& input = node_type.node->input(port);
TensorId tensor = ParseTensorName(input);
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat(
"Non-existent input ", input, " in node ", node_type.node->name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
const NodeDef& input_node = graph_->node(input_idx);
TypeAttrId input_type_attr =
node_type_map.GetOutputTypeAttr(input_node, tensor.index());
const auto it2 = node_type_name_to_index_.find(
NodeTypeKey(input_node.name(), input_type_attr));
if (it2 == node_type_name_to_index_.end()) {
if (!skip_invalid_edges_) {
return errors::InvalidArgument("Did not find type attr ",
input_type_attr.DebugString(),
" in node ", input_node.name());
}
continue;
}
int input_node_type_idx = it2->second;
fanins_[node_type_idx].push_back(input_node_type_idx);
fanouts_[input_node_type_idx].push_back(node_type_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
Status GraphTypeTopologyView::AddEphemeralEdges(
absl::Span<const NodeTypeIdEdge> ephemeral_edges) {
for (const NodeTypeIdEdge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
int src_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.src.node->name(), edge.src.type_attr));
int dst_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.dst.node->name(), edge.dst.type_attr));
fanins_[dst_node_type_idx].push_back(src_node_type_idx);
fanouts_[src_node_type_idx].push_back(dst_node_type_idx);
}
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
bool GraphTypeTopologyView::HasNode(absl::string_view node_name,
const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it != node_type_name_to_index_.end();
}
const NodeTypeId* GraphTypeTopologyView::GetNode(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it == node_type_name_to_index_.end()
? nullptr
: &node_type_attrs_.at(it->second);
}
const NodeTypeId* GraphTypeTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &node_type_attrs_.at(node_idx);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
DCHECK(it != node_type_name_to_index_.end())
<< "Node doesn't exist in a graph";
return it == node_type_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
const NodeTypeId& node) const {
return GetNodeIndex(node.node->name(), node.type_attr);
}
const absl::InlinedVector<int, 4>& GraphTypeTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTypeTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
enum class TypeTraversalDirection {
kFollowInputs,
kFollowOutputs,
kFollowInputsAndOutputs,
};
struct DfsTypeCallbacks {
DfsTypeCallbacks() = default;
DfsTypeCallbacks(std::function<void(int)> pre, std::function<void(int)> post, | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM || INTEL_MKL
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::ContainsRegex;
using ::testing::SizeIs;
template <DataType DTYPE>
Tensor GenerateIdentityMatrix(int64_t height, int64_t width) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, TensorShape{height, width});
for (int64_t i = 0; i < height; ++i) {
for (int64_t j = 0; j < width; ++j) {
tensor.matrix<T>()(i, j) = i == j;
}
}
return tensor;
}
template <DataType DTYPE>
Tensor GenerateRandomTensorInRange(const TensorShape& shape, double minval,
double maxval) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
for (auto i = 0; i < tensor.NumElements(); i++)
tensor.flat<T>()(i) =
(random::New64() % 65536 / 65536.0) * (maxval - minval) + minval;
return tensor;
}
void VerifyGraphsEquivalent(const GraphDef& original_graph,
const GraphDef& optimized_graph,
const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
GraphView optimized_view(&optimized_graph);
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = *optimized_view.GetNode(original.name());
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
if (original.input_size() == optimized.input_size()) {
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
}
const std::pair<int, int> kMinGPUArch = {7, 0};
class AutoMixedPrecisionTest : public GrapplerTest {
protected:
void SetMode(AutoMixedPrecisionMode mode) { mode_ = mode; }
void SetUp() override {
if (mode_ == AutoMixedPrecisionMode::CUDA) {
int num_gpus = GetNumAvailableGPUs();
gpu_available_ = (num_gpus > 0);
#if GOOGLE_CUDA
gpu_available_ =
gpu_available_ && (num_gpus == GetNumAvailableGPUs(kMinGPUArch));
#else
gpu_available_ = false;
#endif
if (gpu_available_) {
virtual_cluster_.reset(new SingleMachine( 10, 1, 1));
} else {
DeviceProperties device_properties;
device_properties.set_type("GPU");
#if GOOGLE_CUDA
device_properties.mutable_environment()->insert({"architecture", "7"});
device_properties.mutable_environment()->insert({"cuda", "9010"});
#else
device_properties.mutable_environment()->insert(
{"architecture", "gfx906"});
#endif
virtual_cluster_.reset(
new VirtualCluster({{"/GPU:1", device_properties}}));
}
} else if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
DeviceProperties device_properties;
device_properties.set_type("CPU");
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
bool is_fp16_enabled_on_cpu = false;
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
is_fp16_enabled_on_cpu = IsAMXDataTypeSupportedByOneDNNOnThisCPU(DT_HALF);
#endif
if (!IsMKLEnabled() || !is_fp16_enabled_on_cpu) {
GTEST_SKIP() << "This device doesn't support FP16";
}
}
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
NodeDef* AddSimpleNode(const string& name, const string& op,
const std::vector<string>& inputs,
GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
if (op == "AddN" || op == "ShapeN") {
AttrValue num_inputs;
num_inputs.set_i(inputs.size());
attributes.emplace_back("N", num_inputs);
}
if (op == "ShapeN") {
AttrValue out_type;
out_type.set_type(DT_INT32);
attributes.emplace_back("out_type", out_type);
}
AttrValue type;
type.set_type(DT_FLOAT);
if (op == "Const" || op == "Placeholder" || op == "VariableV2" ||
op == "VarHandleOp" || op == "ReadVariableOp") {
attributes.emplace_back("dtype", type);
} else if (op == "SparseMatMul") {
attributes.emplace_back("Ta", type);
attributes.emplace_back("Tb", type);
} else if (op == "IdentityN") {
AttrValue type_list;
for (int i = 0; i < static_cast<int>(inputs.size()); ++i) {
type_list.mutable_list()->add_type(DT_FLOAT);
}
attributes.emplace_back("T", type_list);
} else if (op == "StackV2" || op == "StackPopV2") {
attributes.emplace_back("elem_type", type);
} else if (op == "Cast") {
attributes.emplace_back("SrcT", type);
attributes.emplace_back("DstT", type);
} else {
attributes.emplace_back("T", type);
}
return AddNode(name, op, inputs, attributes, graph);
}
void TestSimpleUnaryInferOp(
double input_min, double input_max, double atol, double rtol,
const std::function<Output(const tensorflow::Scope&, Output)>&
test_op_factory) {
int size = 128;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output eye = ops::Const(s.WithOpName("eye"),
GenerateIdentityMatrix<DT_FLOAT>(size, size));
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, eye);
Output infer1 = test_op_factory(s.WithOpName("infer1"), allow1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, eye);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_tensor = GenerateRandomTensorInRange<DT_FLOAT>(
TensorShape({size, size}), input_min, input_max);
std::vector<std::pair<string, Tensor>> feed = {{"input", input_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], atol, rtol);
}
}
std::unique_ptr<Cluster> virtual_cluster_;
bool gpu_available_;
AutoMixedPrecisionMode mode_;
};
class AutoMixedPrecisionParamTest
: public AutoMixedPrecisionTest,
public ::testing::WithParamInterface<AutoMixedPrecisionMode> {
protected:
void SetUp() override {
mode_ = GetParam();
AutoMixedPrecisionTest::SetMode(mode_);
AutoMixedPrecisionTest::SetUp();
}
AutoMixedPrecisionMode mode_;
};
TEST_P(AutoMixedPrecisionParamTest, NoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.234f, {32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, AlreadyFp16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_HALF);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output cst2 = ops::Cast(s.WithOpName("cst2"), clr1, DT_FLOAT);
Output clr2 = ops::Relu(s.WithOpName("clr2"), cst2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("DstT").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, NoInferOp) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL", "TREAT_INFER_AS_DENY",
1 );
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr4, clr4);
Output infer3 = ops::Log(s.WithOpName("infer3"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), infer3);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer3")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
unsetenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL");
}
TEST_P(AutoMixedPrecisionParamTest, BidirectionalClearChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output clr2 = ops::Relu(s.WithOpName("clr2"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
auto clr3 = ops::ShapeN(s.WithOpName("clr3"), {clr1, clr2});
Output clr4 = ops::Relu(s.WithOpName("clr4"), clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), clr4);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveFetches) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), deny1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow2);
Output deny2 = ops::Exp(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
GrapplerItem item;
item.fetch = {"allow1", "clr2", "clr3"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveCPUNodes) {
if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
GTEST_SKIP() << "This test is not required on CPU";
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
Output allow2 =
ops::MatMul(s.WithOpName("allow2").WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
infer1, infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveIdentityAfterVariable) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output var1 = ops::Variable(s.WithOpName("var1"), {32, 32}, DT_FLOAT);
Output clr1 = ops::Identity(s.WithOpName("clr1"), var1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, clr1);
Output input2 = ops::Const(s.WithOpName("input2"), 1.f / 32, {32, 32});
Output clr2 = ops::Identity(s.WithOpName("clr2"), input2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), input, clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), allow2);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto var1_tensor =
GenerateConstantTensor<DT_FLOAT>(TensorShape({32, 32}), 3.141593f);
std::vector<std::pair<string, Tensor>> feed = {{"var1", var1_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 5);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("var1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {8, 56, 56, 16});
Output weight = ops::Const(s.WithOpName("weight"), 2.f, {3, 3, 16, 16});
Output scale = ops::Const(s.WithOpName("scale"), 3.f, {16});
Output offset = ops::Const(s.WithOpName("offset"), 4.f, {16});
Output mean = ops::Const(s.WithOpName("mean"), 5.f, {0});
Output variance = ops::Const(s.WithOpName("variance"), 6.f, {0});
Output allow1 =
ops::Conv2D(s.WithOpName("allow1"), input, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
auto fbn1_op =
ops::FusedBatchNorm(s.WithOpName("fbn1"), allow1, scale, offset, mean,
variance, ops::FusedBatchNorm::DataFormat("NHWC"));
Output fbn1 = fbn1_op.y;
Output fbn1_rs1 = fbn1_op.reserve_space_1;
Output fbn1_rs2 = fbn1_op.reserve_space_2;
Output bng1 = ops::FusedBatchNormGrad(
s.WithOpName("bng1"), fbn1, allow1, scale, fbn1_rs1,
fbn1_rs2, ops::FusedBatchNormGrad::DataFormat("NHWC"))
.x_backprop;
Output infer1 = ops::Add(s.WithOpName("infer1"), fbn1, bng1);
Output allow2 =
ops::Conv2D(s.WithOpName("allow2"), infer1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->op(), "FusedBatchNormV2");
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("bng1")->op(), "FusedBatchNormGradV2");
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_P(AutoMixedPrecisionParamTest, RepeatedAndListTypeAttrs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto clr1_op = ops::IdentityN(s.WithOpName("clr1"), {allow1, allow1, allow1});
Output infer1 =
ops::AddN(s.WithOpName("infer1"),
{clr1_op.output[0], clr1_op.output[1], clr1_op.output[2]});
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
for (auto type : output_view.GetNode("clr1")->attr().at("T").list().type()) {
EXPECT_EQ(type, DT_HALF);
}
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, ExistingCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), true, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow1);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 1);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("SrcT").type(), DT_BOOL);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, RecurrentEdgeColorMismatch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output ent1 =
ops::internal::Enter(s.WithOpName("ent1"), deny1, "loop1").output;
Output mrg1 = ops::Merge(s.WithOpName("mrg1"), {ent1, ent1}).output;
Output con1 = ops::Const(s.WithOpName("con1"), false, {});
Output lpc1 = ops::LoopCond(s.WithOpName("lpc1"), con1).output;
auto swt1 = ops::Switch(s.WithOpName("swt1"), mrg1, l |
1,393 | cpp | tensorflow/tensorflow | loop_optimizer | tensorflow/core/grappler/optimizers/loop_optimizer.cc | tensorflow/core/grappler/optimizers/loop_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_LOOP_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_LOOP_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
constexpr char kLoopOptimizer[] = "LoopOptimizer";
class LoopOptimizer : public GraphOptimizer {
public:
LoopOptimizer();
explicit LoopOptimizer(RewriterConfig::Toggle opt_level,
DeviceBase* cpu_device);
~LoopOptimizer() override {}
string name() const override { return "loop_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
friend class LoopOptimizerTest;
struct LoopOptimizerOptions {
bool enable_loop_invariant_node_motion = false;
bool enable_stack_push_removal = true;
bool enable_dead_branch_removal = true;
static LoopOptimizerOptions Default(RewriterConfig::Toggle opt_level) {
LoopOptimizerOptions options;
return options;
}
};
Status RemoveDeadBranches(const std::unordered_set<string>& nodes_to_preserve,
NodeMap& node_map,
const absl::flat_hash_set<string>& feed_nodes,
GraphDef* optimized_graph);
RewriterConfig::Toggle opt_level_;
DeviceBase* cpu_device_;
LoopOptimizerOptions options_;
std::unique_ptr<ResourceMgr> resource_mgr_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
namespace {
using TensorVector = gtl::InlinedVector<TensorValue, 4>;
class LoopInvariantNodeMotionOptimizer {
public:
explicit LoopInvariantNodeMotionOptimizer(GraphDef* optimized_graph)
: optimized_graph_(optimized_graph) {}
virtual ~LoopInvariantNodeMotionOptimizer() = default;
Status Optimize();
private:
Status FindInvariantNodes(NodeDef* node);
Status RevertInvariantNodes();
Status MoveInvariantNodes(const int frame_id);
Status HandleInvariantNode(NodeDef* node, const int num_outputs,
const int frame_id);
Status HandleConst(NodeDef* node, const int num_outputs, const int frame_id);
Status HandleInvariantEnter(NodeDef* node, const int num_outputs);
GraphDef* optimized_graph_;
std::unique_ptr<NodeMap> node_map_;
std::map<NodeDef*, int> invariant_nodes_;
std::set<int> empty_set_;
std::vector<std::set<int>> frame_children_;
std::vector<int> frame_parent_;
std::map<int, const NodeDef*> loop_cond_;
std::map<int, std::vector<NodeDef*>> invariant_enters_;
int new_enter_id_;
};
Status LoopInvariantNodeMotionOptimizer::HandleInvariantEnter(
NodeDef* node, const int num_outputs) {
auto consumers = node_map_->GetOutputs(node->name());
std::vector<string> enter_control_inputs;
string enter_input;
for (auto& input : node->input()) {
if (IsControlInput(input)) {
enter_control_inputs.push_back(input);
} else {
enter_input = input;
}
}
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
if (NodeName(consumer->input(i)) == node->name()) {
consumer->set_input(i, enter_input);
node_map_->AddOutput(NodeName(enter_input), consumer->name());
node_map_->RemoveOutput(node->name(), consumer->name());
}
}
for (auto& control_input : enter_control_inputs) {
consumer->add_input(control_input);
node_map_->AddOutput(NodeName(control_input), consumer->name());
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::HandleConst(NodeDef* node,
const int num_outputs,
const int frame_id) {
NodeDef* const_node = nullptr;
if (num_outputs == 0) {
const_node = node;
node_map_->RemoveInputs(node->name());
node->clear_input();
} else {
const string const_node_name =
AddPrefixToNodeName(node->name(), kLoopOptimizer);
const_node = node_map_->GetNode(const_node_name);
if (const_node == nullptr) {
const_node = optimized_graph_->add_node();
const_node->set_name(const_node_name);
const_node->set_op("Const");
const_node->set_device(node->device());
*const_node->mutable_attr() = node->attr();
node_map_->AddNode(const_node->name(), const_node);
}
auto consumers = node_map_->GetOutputs(node->name());
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
if (NodeName(consumer->input(i)) == node->name()) {
if (IsControlInput(consumer->input(i))) {
*consumer->mutable_input(i) = AsControlDependency(*const_node);
} else {
*consumer->mutable_input(i) = const_node->name();
}
node_map_->AddOutput(const_node->name(), consumer->name());
node_map_->RemoveOutput(node->name(), consumer->name());
}
}
}
}
}
if (frame_parent_[frame_id] != -1) {
int parent_id = frame_parent_[frame_id];
auto loop_cond_it = loop_cond_.find(parent_id);
if (loop_cond_it == loop_cond_.end()) {
return errors::InvalidArgument("Frame ", frame_id,
" doesn't have a LoopCond node");
}
auto& loop_cond_name = loop_cond_it->second->name();
NodeDef* switch_node = nullptr;
for (auto* node : node_map_->GetOutputs(loop_cond_name)) {
if (node->op() == "Switch") {
switch_node = node;
break;
}
}
if (!switch_node) {
return errors::InvalidArgument("LoopCond node of Frame ", frame_id,
" doesn't connect to any Switch node");
}
string switch_output = StrCat(switch_node->name(), ":1");
const string ctrl_dep = ConstantFolding::AddControlDependency(
switch_output, optimized_graph_, node_map_.get());
const_node->add_input(ctrl_dep);
node_map_->AddOutput(NodeName(ctrl_dep), const_node->name());
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::HandleInvariantNode(
NodeDef* node, const int num_outputs, const int frame_id) {
for (int i = 0; i < node->input_size(); ++i) {
if (IsControlInput(node->input(i))) {
node->mutable_input()->SwapElements(i, node->input_size() - 1);
node->mutable_input()->RemoveLast();
}
}
if (num_outputs == 0) {
return absl::OkStatus();
}
DataTypeVector input_types;
DataTypeVector output_types;
OpRegistryInterface* op_registry = OpRegistry::Global();
const OpRegistrationData* op_reg_data = nullptr;
TF_RETURN_IF_ERROR(op_registry->LookUp(node->op(), &op_reg_data));
TF_RETURN_IF_ERROR(InOutTypesForNode(*node, op_reg_data->op_def, &input_types,
&output_types));
auto consumers = node_map_->GetOutputs(node->name());
string fname = invariant_enters_[frame_id][0]->attr().at("frame_name").s();
int piterations =
invariant_enters_[frame_id][0]->attr().at("parallel_iterations").i();
for (auto* consumer : consumers) {
if (!invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
int port;
string node_name = ParseNodeName(consumer->input(i), &port);
if (node_name != node->name()) {
continue;
}
if (port < 0) {
return errors::InvalidArgument(
"Invariant node should not have control outputs "
"to variant node");
}
DataType output_type = output_types[port];
NodeDef* new_enter = optimized_graph_->add_node();
new_enter->set_op("Enter");
new_enter->set_device(node->device());
new_enter->set_name(AddPrefixToNodeName(
StrCat(fname, "_enter_", new_enter_id_++), kLoopOptimizer));
AttrValue data_type;
data_type.set_type(output_type);
new_enter->mutable_attr()->insert({"T", data_type});
AttrValue frame_name;
frame_name.set_s(fname);
new_enter->mutable_attr()->insert({"frame_name", frame_name});
AttrValue is_const;
is_const.set_b(true);
new_enter->mutable_attr()->insert({"is_constant", is_const});
AttrValue parallel_iterations;
parallel_iterations.set_i(piterations);
new_enter->mutable_attr()->insert(
{"parallel_iterations", parallel_iterations});
new_enter->add_input(consumer->input(i));
*consumer->mutable_input(i) = new_enter->name();
node_map_->AddNode(new_enter->name(), new_enter);
node_map_->AddOutput(node->name(), new_enter->name());
node_map_->AddOutput(new_enter->name(), consumer->name());
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::MoveInvariantNodes(
const int frame_id) {
for (auto iter = invariant_nodes_.begin(); iter != invariant_nodes_.end();
++iter) {
auto* invariant_node = iter->first;
const int num_outputs = iter->second;
if (IsEnter(*invariant_node)) {
TF_RETURN_IF_ERROR(HandleInvariantEnter(invariant_node, num_outputs));
} else if (IsConstant(*invariant_node)) {
TF_RETURN_IF_ERROR(HandleConst(invariant_node, num_outputs, frame_id));
} else {
TF_RETURN_IF_ERROR(
HandleInvariantNode(invariant_node, num_outputs, frame_id));
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::RevertInvariantNodes() {
std::deque<const NodeDef*> reverted_nodes;
for (auto iter = invariant_nodes_.begin(); iter != invariant_nodes_.end();) {
bool erased = false;
const auto* node = iter->first;
if (!IsConstant(*node) && !IsEnter(*node) && iter->second > 0) {
auto& consumers = node_map_->GetOutputs(node->name());
for (auto* consumer : consumers) {
if (!invariant_nodes_.count(consumer)) {
for (const auto& input : consumer->input()) {
if (IsControlInput(input) && NodeName(input) == node->name()) {
reverted_nodes.push_back(node);
invariant_nodes_.erase(iter++);
erased = true;
break;
}
}
if (erased) break;
}
}
}
if (!erased) ++iter;
}
while (!reverted_nodes.empty()) {
const auto* node = reverted_nodes.front();
reverted_nodes.pop_front();
std::set<NodeDef*> producers;
for (const auto& input : node->input()) {
auto* producer = node_map_->GetNode(input);
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
if (IsControlInput(input) && !IsConstant(*producer) &&
!IsEnter(*producer)) {
reverted_nodes.push_back(producer);
invariant_nodes_.erase(iter);
} else {
producers.insert(producer);
}
}
}
for (auto* producer : producers) {
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
++iter->second;
}
}
for (auto* consumer : node_map_->GetOutputs(node->name())) {
auto iter = invariant_nodes_.find(consumer);
if (iter != invariant_nodes_.end()) {
reverted_nodes.push_back(consumer);
invariant_nodes_.erase(iter);
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::FindInvariantNodes(
NodeDef* start_node) {
std::vector<NodeDef*> stack;
stack.reserve(32);
stack.push_back(start_node);
while (!stack.empty()) {
NodeDef* node = stack.back();
stack.pop_back();
auto consumers = node_map_->GetOutputs(node->name());
invariant_nodes_.emplace(node, consumers.size());
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer) || ModifiesFrameInfo(*consumer)) {
continue;
}
bool is_invariant = true;
for (const auto& input : consumer->input()) {
if (!IsControlInput(input)) {
const string name = NodeName(input);
auto* producer = node_map_->GetNode(name);
if (!invariant_nodes_.count(producer)) {
if (IsConstant(*producer)) {
invariant_nodes_.insert(
std::make_pair(producer, node_map_->GetOutputs(name).size()));
} else {
is_invariant = false;
break;
}
}
}
}
if (is_invariant) {
std::set<NodeDef*> producers;
for (const auto& input : consumer->input()) {
auto* producer = node_map_->GetNode(input);
producers.insert(producer);
}
for (auto* producer : producers) {
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
--iter->second;
}
}
stack.push_back(consumer);
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::Optimize() {
node_map_.reset(new NodeMap(optimized_graph_));
FrameView frame_view;
TF_RETURN_IF_ERROR(frame_view.InferFromGraph(*optimized_graph_));
frame_parent_.resize(frame_view.num_frames(), -1);
frame_children_.resize(frame_view.num_frames());
std::deque<int> worklist;
for (const NodeDef& node : optimized_graph_->node()) {
const std::vector<int>& frame_ids = frame_view.Frames(node);
if (frame_ids.size() >= 3) {
for (unsigned int i = 1; i < frame_ids.size() - 1; ++i) {
frame_parent_[frame_ids[i]] = frame_ids[i - 1];
frame_children_[frame_ids[i]].insert(frame_ids[i + 1]);
}
}
if (frame_ids.size() >= 2) {
frame_children_[frame_ids[0]].insert(frame_ids[1]);
frame_parent_[frame_ids.back()] = frame_ids[frame_ids.size() - 2];
}
if (!frame_ids.empty()) {
frame_children_[frame_ids.back()] = empty_set_;
if (node.op() == "LoopCond") {
if (loop_cond_.count(frame_ids.back())) {
return errors::InvalidArgument(
"Loop ", frame_ids.back(),
" has more than one LoopCond node: ", node.name(), " and ",
loop_cond_[frame_ids.back()]->name());
}
loop_cond_[frame_ids.back()] = &node;
}
if (IsEnter(node) && node.attr().at("is_constant").b()) {
invariant_enters_[frame_ids.back()].push_back(
const_cast<NodeDef*>(&node));
}
}
}
for (size_t i = 0; i < frame_children_.size(); i++) {
if (frame_children_[i].empty()) {
worklist.push_back(i);
}
}
while (!worklist.empty()) {
int frame_id = worklist.front();
new_enter_id_ = 0;
worklist.pop_front();
if (frame_parent_[frame_id] != -1) {
int parent_id = frame_parent_[frame_id];
frame_children_[parent_id].erase(frame_id);
if (frame_children_[parent_id].empty()) {
worklist.push_back(parent_id);
}
}
if (invariant_enters_[frame_id].empty()) {
continue;
}
invariant_nodes_.clear();
for (auto* enter : invariant_enters_[frame_id]) {
TF_RETURN_IF_ERROR(FindInvariantNodes(enter));
}
TF_RETURN_IF_ERROR(RevertInvariantNodes());
TF_RETURN_IF_ERROR(MoveInvariantNodes(frame_id));
}
return absl::OkStatus();
}
std::vector<int> GetStackPushNodesToConvert(
const GraphTopologyView& graph_view,
const std::unordered_set<string>& nodes_to_preserve, int stack_node_idx) {
VLOG(1) << "Stack node: " << graph_view.graph()->node(stack_node_idx).name();
const std::unordered_set<string> op_types_to_traverse(
{"Stack", "StackV2", "Enter", "RefEnter", "Switch", "RefSwitch",
"_SwitchN", "Identity", "RefIdentity"});
const auto is_op_to_traverse = [&](const NodeDef* node) -> bool {
return op_types_to_traverse.find(node->op()) != op_types_to_traverse.end();
};
std::vector<int> nodes_to_convert;
std::vector<int> fanouts;
DfsTraversal(graph_view, {graph_view.GetNode(stack_node_idx)},
TraversalDirection::kFollowOutputs,
DfsPredicates::Advance(is_op_to_traverse),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
const absl::optional<int> idx = graph_view.GetNodeIndex(*node);
fanouts.push_back(idx.value());
}));
for (int fanout_idx : fanouts) {
const NodeDef& fanout_node = graph_view.graph()->node(fanout_idx);
VLOG(1) << "Fanout " << fanout_idx << " : " << fanout_node.name();
if (IsStackPushOp(fanout_node)) {
if (graph_view.HasNode(fanout_node.input(0))) {
const NodeDef* stack_node = graph_view.GetNode(fanout_node.input(0));
while (stack_node->op() != "Stack" && stack_node->op() != "StackV2" &&
stack_node->input_size() > 0 &&
graph_view.HasNode(stack_node->input(0))) {
stack_node = graph_view.GetNode(stack_node->input(0));
}
if (nodes_to_preserve.find(stack_node->name()) ==
nodes_to_preserve.end()) {
nodes_to_convert.push_back(fanout_idx);
}
} else {
nodes_to_convert.push_back(fanout_idx);
}
} else if (IsStackOp(fanout_node) || IsStackCloseOp(fanout_node) ||
op_types_to_traverse.find(fanout_node.op()) !=
op_types_to_traverse.end()) {
continue;
} else if (!IsStackPopOp(fanout_node) ||
(!graph_view.GetFanout(fanout_idx).empty() ||
nodes_to_preserve.find(fanout_node.name()) !=
nodes_to_preserve.end())) {
nodes_to_convert.clear();
break;
}
}
return nodes_to_convert;
}
Status RemoveStackOps(const std::unordered_set<string>& nodes_to_preserve,
GraphDef* optimized_graph) {
NodeMap node_map(optimized_graph);
GraphTopologyView graph_view;
TF_RETURN_IF_ERROR(graph_view.InitializeFromGraph(*optimized_graph));
for (int node_idx = 0; node_idx < optimized_graph->node_size(); ++node_idx) {
if (IsStackOp(optimized_graph->node(node_idx))) {
for (int push_node_idx : GetStackPushNodesToConvert(
graph_view, nodes_to_preserve, node_idx)) {
NodeDef* push_node = optimized_graph->mutable_node(push_node_idx);
VLOG(1) << "Converting " << push_node_idx << " : "
<< push_node->DebugString();
if (push_node->attr().count("swap_memory") != 0) {
push_node->mutable_attr()->erase("swap_memory");
}
push_node->set_op("Identity");
push_node->mutable_input()->SwapElements(0, 1);
const string ctrl_dep = ConstantFolding::AddControlDependency(
push_node->input(1), optimized_graph, &node_map);
push_node->set_input(1, ctrl_dep);
VLOG(1) << "After converting: " << push_node->DebugString();
}
}
}
return absl::OkStatus();
}
bool IsSimpleBinaryOperator(const NodeDef& node) {
return (IsLess(node) || IsLessEqual(node) || IsGreater(node) ||
IsGreaterEqual(node) || IsEqual(node));
}
Status EvaluateBoolOpForConstantOperands(const NodeDef& op_node,
const NodeDef& constant_operand_0,
const NodeDef& constant_operand_1,
DeviceBase* cpu_device,
ResourceMgr* resource_mgr,
bool* value) {
VLOG(4) << "Evaluate bool op: op_node=" << op_node.name()
<< " input0=" << constant_operand_0.name()
<< " input1=" << constant_operand_1.name();
TensorVector inputs;
const TensorProto& raw_val_0 = constant_operand_0.attr().at("value").tensor();
Tensor value_0(raw_val_0.dtype(), raw_val_0.tensor_shape());
CHECK(value_0.FromProto(raw_val_0));
inputs.emplace_back(&value_0);
const TensorProto& raw_val_1 = constant_operand_1.attr().at("value").tensor();
Tensor value_1(raw_val_1.dtype(), raw_val_1.tensor_shape());
CHECK(value_1.FromProto(raw_val_1));
inputs.emplace_back(&value_1);
TensorVector outputs;
TF_RETURN_IF_ERROR(
EvaluateNode(op_node, inputs, cpu_device, resource_mgr, &outputs));
if (outputs.size() != 1 || outputs[0].tensor == nullptr) {
return Status(absl::StatusCode::kInvalidArgument, "Expected one output.");
}
*value = outputs[0].tensor->scalar<bool>()();
delete outputs[0].tensor;
return absl::OkStatus();
}
bool IsReallyConstant(const NodeDef& node,
const absl::flat_hash_set<string>& feed_nodes) {
if (!IsConstant(node)) {
return false;
}
return feed_nodes.find(node.name()) == feed_nodes.end();
}
Status CheckForDeadFanout(const MutableGraphView& view,
const NodeDef& switch_node, const NodeMap& node_map,
const absl::flat_hash_set<string>& feed_nodes,
DeviceBase* cpu_device, ResourceMgr* resource_mgr,
bool* has_dead_fanout, int* dead_fanout) {
*has_dead_fanout = false;
GraphView::InputPort switch_loopcond_port(&switch_node, 1);
const NodeDef* switch_predicate =
view.GetRegularFanin(switch_loopcond_port).node;
if (IsReallyConstant(*switch_predicate, feed_nodes)) {
VLOG(3) << "Found switch node with constant predicate:"
<< " switch_node=" << switch_node.name()
<< " switch_predicate=" << switch_predicate->name();
Tensor selector;
CHECK(selector.FromProto(switch_predicate->attr().at("value").tensor()));
*has_dead_fanout = true;
*dead_fanout = selector.scalar<bool>()() ? 0 : 1;
return absl::OkStatus();
}
GraphView::InputPort switch_input_port(&switch_node, 0);
const NodeDef* switch_input = view.GetRegularFanin(switch_input_port).node;
if (!IsMerge(*switch_input) || !IsLoopCond(*switch_predicate)) {
return absl::OkStatus();
}
VLOG(4) << "Try to find a zero iteration while loop:"
<< " switch_node=" << switch_node.name();
NodeDef* switch_ctrl_node = view.GetRegularFanin({switch_predicate, 0}).node;
if (!switch_ctrl_node || !IsSimpleBinaryOperator(*switch_ctrl_node)) {
return absl::OkStatus();
}
NodeDef* merge_node = nullptr;
NodeDef* constant_ctrl_input = nullptr;
int constant_index = 0;
for (int i = 0; i < switch_ctrl_node->input().size(); ++i) {
const string& input = switch_ctrl_node->input(i);
if (IsControlInput(input)) continue;
NodeDef* node = view.GetNode(switch_ctrl_node->input(i));
if (IsMerge(*node)) {
merge_node = node;
}
if (IsReallyConstant(*node, feed_nodes)) {
constant_ctrl_input = node;
constant_index = i;
}
}
if (merge_node == nullptr || constant_ctrl_input == nullptr) {
return absl::OkStatus();
}
NodeDef* enter_node = nullptr;
NodeDef* constant_init_node = nullptr;
for (const auto& input : merge_node->input()) {
NodeDef* node = node_map.GetNode(input);
if (IsEnter(*node)) {
enter_node = node;
}
if (IsReallyConstant(*node, feed_nodes)) {
constant_init_node = node;
}
}
if (enter_node != nullptr) {
if (constant_init_node != nullptr) return absl::OkStatus();
for (const auto& input : enter_node->input()) {
NodeDef* node = node_map.GetNode(input);
if (IsReallyConstant(*node, feed_nodes)) {
constant_init_node = node;
}
}
}
if (constant_init_node == nullptr) {
return absl::OkStatus();
}
VLOG(4) << "Check if loop will be 0 iterations:"
<< "\n| switch_node : " << switch_node.name()
<< "\n| switch_ctrl_node : " << switch_ctrl_node->name()
<< "\n| merge_node : " << merge_node->name()
<< "\n| constant_ctrl_input: " << constant_ctrl_input->name()
<< "\n| enter_node : "
<< (enter_node ? enter_node->name() : "<n/a>")
<< "\n| constant_init_node : " << constant_init_node->name();
NodeDef* operand_0 =
constant_index ? constant_init_node : constant_ctrl_input;
NodeDef* operand_1 =
constant_index ? constant_ctrl_input : constant_init_node;
bool constant_switch_value;
TF_RETURN_IF_ERROR(EvaluateBoolOpForConstantOperands(
*switch_ctrl_node, *operand_0, *operand_1, cpu_device, resource_mgr,
&constant_switch_value));
if (constant_switch_value == false) {
VLOG(3) << "Remove 0 iteration while loop:"
<< " switch_node=" << switch_node.name();
*has_dead_fanout = true;
*dead_fanout = 1;
} else {
VLOG(4) << "Was not able to prove that loop has 0 iterations.";
}
return absl::OkStatus();
}
}
LoopOptimizer::LoopOptimizer()
: opt_level_(RewriterConfig::ON),
cpu_device_(nullptr),
options_(LoopOptimizerOptions::Default(RewriterConfig::ON)) {}
LoopOptimizer::LoopOptimizer(RewriterConfig::Toggle opt_level,
DeviceBase* cpu_device)
: opt_level_(opt_level),
cpu_device_(cpu_device),
options_(LoopOptimizerOptions::Default(RewriterConfig::ON)) {
resource_mgr_.reset(new ResourceMgr());
}
Status LoopOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
if (!options_.enable_loop_invariant_node_motion &&
!options_.enable_stack_push_removal &&
!options_.enable_dead_branch_removal) {
return errors::Aborted("Nothing to do.");
}
*optimized_graph = item.graph;
if (options_.enable_loop_invariant_node_motion) {
LoopInvariantNodeMotionOptimizer linm_optimizer(optimized_graph);
TF_RETURN_IF_ERROR(linm_optimizer.Optimize());
}
if (options_.enable_stack_push_removal) {
TF_RETURN_IF_ERROR(RemoveStackOps(item.NodesToPreserve(), optimized_graph));
}
if (options_.enable_dead_branch_removal) {
NodeMap node_map(optimized_graph);
absl::flat_hash_set<string> feed_nodes;
for (const auto& feed : item.feed) {
feed_nodes.insert(NodeName(feed.first));
}
TF_RETURN_IF_ERROR(RemoveDeadBranches(item.NodesToPreserve(), node_map,
feed_nodes, optimized_graph));
}
return absl::OkStatus();
}
static Status update_identity_node_type(NodeDef* sw_node) {
if (sw_node->has_experimental_type() &&
(sw_node->experimental_type().type_id() == TFT_PRODUCT)) {
FullTypeDef old_t = sw_node->experimental_type();
if (old_t.args_size() != 2) {
return errors::Internal(
"When converting Switch or Merge node '", sw_node->name(),
"' to Identity, full type of original node describes ",
old_t.args_size(), " outputs, not 2.\n", old_t.DebugString());
}
FullTypeDef new_t;
new_t.set_type_id(TFT_PRODUCT);
*(new_t.add_args()) = old_t.args()[0];
*(sw_node->mutable_experimental_type()) = new_t;
}
return absl::OkStatus | #include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class LoopOptimizerTest : public GrapplerTest {
protected:
void AddEnterNode(const string& name, const string& frame,
const bool is_constant, const int piterations,
const std::vector<string>& inputs, GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
AttrValue type;
type.set_type(DT_FLOAT);
attributes.emplace_back("T", type);
AttrValue frame_name;
frame_name.set_s(frame);
attributes.emplace_back("frame_name", frame_name);
AttrValue is_const;
is_const.set_b(is_constant);
attributes.emplace_back("is_constant", is_const);
AttrValue parallel_iterations;
parallel_iterations.set_i(piterations);
attributes.emplace_back("parallel_iterations", parallel_iterations);
AddNode(name, "Enter", inputs, attributes, graph);
}
void AddSimpleNode(const string& name, const string& op,
const std::vector<string>& inputs, GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
AttrValue type;
type.set_type(DT_FLOAT);
attributes.emplace_back("T", type);
AddNode(name, op, inputs, attributes, graph);
}
void EnableOnlyLoopInvariantNodeMotion(LoopOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.enable_loop_invariant_node_motion = true;
}
void EnableOnlyStackPushRemoval(LoopOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.enable_stack_push_removal = true;
}
private:
void DisableAllStages(LoopOptimizer* optimizer) {
LoopOptimizer::LoopOptimizerOptions options;
options.enable_loop_invariant_node_motion = false;
options.enable_stack_push_removal = false;
optimizer->options_ = options;
}
};
TEST_F(LoopOptimizerTest, Basic) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
const auto* variant_add_node = view.GetNode("VariantAdd");
ASSERT_NE(variant_add_node, nullptr);
const auto* variant_add_node_def = variant_add_node->node();
ASSERT_EQ(frames.Frames(*variant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*variant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
const auto* variant_add_node = view.GetNode("VariantAdd");
ASSERT_NE(variant_add_node, nullptr);
const auto* variant_add_node_def = variant_add_node->node();
ASSERT_EQ(frames.Frames(*variant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*variant_add_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, Const) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("Const", "Const", {"^Identity"}, &graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "Const"}, &graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
const auto* const_node = view.GetNode("Const");
ASSERT_NE(const_node, nullptr);
const auto* const_node_node_def = const_node->node();
ASSERT_EQ(frames.Frames(*const_node_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*const_node_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
const auto* const_node = view.GetNode("Const");
ASSERT_NE(const_node, nullptr);
const auto* const_node_node_def = const_node->node();
ASSERT_EQ(frames.Frames(*const_node_node_def).size(), 0);
}
}
TEST_F(LoopOptimizerTest, ControlOutput) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y", "^InvariantAdd"},
&graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoop1) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"VariantAdd"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "InvariantEnter2"},
&graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 0);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoop2) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"InvariantAdd"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "InvariantEnter2"},
&graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 0);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
}
}
TEST_F(LoopOptimizerTest, NestedLoopConst1) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"VariantAdd"}, &graph);
AddSimpleNode("Const2", "Const", {"^Identity2"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "Const2"}, &graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 0);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoopConst2) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"InvariantAdd"}, &graph);
AddSimpleNode("Const2", "Const", {"^Identity2"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "Const2"}, &graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 0);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 0);
}
}
void VerifyGraphsEqual(const GraphDef& original_graph,
const GraphDef& optimized_graph, const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(optimized.name(), original.name()) << func;
EXPECT_EQ(optimized.op(), original.op()) << func;
ASSERT_EQ(optimized.input_size(), original.input_size()) << func;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(optimized.input(j), original.input(j)) << func;
}
}
}
TEST_F(LoopOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushNoOp) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("pop1", "StackPopV2", {"stack1"}, &graph);
AddSimpleNode("id1", "Identity", {"pop1"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter2_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter2_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter2_stack2", "enter2_c"}, &graph);
AddSimpleNode("pop2", "StackPopV2", {"enter2_stack2"}, &graph);
AddSimpleNode("id2", "Identity", {"pop2"}, &graph);
AddSimpleNode("stack3", "StackV2", {}, &graph);
AddSimpleNode("push3", "StackPushV2", {"stack3", "c"}, &graph);
AddSimpleNode("stop", "StopGradient", {"stack3"}, &graph);
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushNoPopButStackLives) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter2_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter2_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter2_stack2", "enter2_c"}, &graph);
item.keep_ops.push_back("stack1");
item.keep_ops.push_back("stack2");
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushWithoutMatchingPop) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter_stack2", "enter_c"}, &graph);
AddSimpleNode("stack3", "StackV2", {}, &graph);
AddSimpleNode("push3", "StackPushV2", {"stack3", "c"}, &graph);
AddSimpleNode("pop3", "StackPopV2", {"stack3"}, &graph);
AddSimpleNode("stack4", "StackV2", {}, &graph);
AddSimpleNode("push4", "StackPushV2", {"stack4", "c"}, &graph);
AddSimpleNode("pop4", "StackPopV2", {"stack4"}, &graph);
item.fetch.push_back("pop4");
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(output.node_size(), 13);
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "push1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "c");
EXPECT_EQ(node.input(1), "^stack1");
} else if (node.name() == "push2") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "enter_c");
EXPECT_EQ(node.input(1), "^enter_stack2");
} else if (node.name() == "push3") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "c");
EXPECT_EQ(node.input(1), "^stack3");
} else {
const NodeDef& orig_node = item.graph.node(i);
EXPECT_EQ(node.ShortDebugString(), orig_node.ShortDebugString());
}
}
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesConstantCondition) {
Scope scope = Scope::NewRootScope();
Output v_in = ops::Const<f |
1,394 | cpp | tensorflow/tensorflow | static_schedule | tensorflow/core/grappler/optimizers/static_schedule.cc | tensorflow/core/grappler/optimizers/static_schedule_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_STATIC_SCHEDULE_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_STATIC_SCHEDULE_H_
#include <unordered_map>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/grappler_item.h"
namespace tensorflow {
namespace grappler {
Status EstimateEarliestExecutionTimes(
const GrapplerItem& item, const Cluster* cluster,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* execution_times);
Status EstimateRequiredTimes(
const GrapplerItem& item, const Cluster* cluster,
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>&
execution_times,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* required_times);
}
}
#endif
#include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include <deque>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace grappler {
static Costs::NanoSeconds PredictExecutionTime(
const GraphProperties& properties, const OpLevelCostEstimator& estimator,
const VirtualPlacer& placer, const NodeDef& node) {
OpContext op_context;
op_context.op_info.set_op(node.op());
*op_context.op_info.mutable_attr() = node.attr();
std::vector<OpInfo::TensorProperties> inputs =
properties.GetInputProperties(node.name());
for (auto& input : inputs) {
op_context.op_info.add_inputs()->Swap(&input);
}
std::vector<OpInfo::TensorProperties> outputs =
properties.GetOutputProperties(node.name());
for (auto& output : outputs) {
op_context.op_info.add_outputs()->Swap(&output);
}
DeviceProperties device = placer.get_device(node);
op_context.op_info.mutable_device()->Swap(&device);
Costs::NanoSeconds estimate =
estimator.PredictCosts(op_context).execution_time;
return std::max(estimate, Costs::NanoSeconds(1));
}
Status EstimateEarliestExecutionTimes(
const GrapplerItem& item, const Cluster* cluster,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* completion_times) {
std::unordered_map<string, const NodeDef*> name_map;
std::unordered_map<const NodeDef*, int> pending_inputs;
std::deque<const NodeDef*> ready_nodes;
for (const NodeDef& node : item.graph.node()) {
name_map[node.name()] = &node;
if (node.input_size() == 0) {
ready_nodes.push_back(&node);
(*completion_times)[&node] = 0;
} else if (IsMerge(node)) {
pending_inputs[&node] = 1;
} else {
pending_inputs[&node] = node.input_size();
}
}
std::unordered_map<const NodeDef*, std::vector<const NodeDef*>> fanouts;
for (const NodeDef& node : item.graph.node()) {
for (const string& input : node.input()) {
string node_name = NodeName(input);
auto it = name_map.find(node_name);
if (it == name_map.end()) {
return errors::InvalidArgument(
strings::StrCat("Unknown input node ", input));
}
const NodeDef* fanin = it->second;
fanouts[fanin].push_back(&node);
}
}
name_map.clear();
GraphProperties properties(item);
TF_RETURN_IF_ERROR(
properties.InferStatically(true,
false,
false));
OpLevelCostEstimator estimator;
VirtualPlacer placer(cluster->GetDevices());
while (!ready_nodes.empty()) {
const NodeDef* node = ready_nodes.front();
ready_nodes.pop_front();
Costs::NanoSeconds execution_time =
PredictExecutionTime(properties, estimator, placer, *node);
Costs::NanoSeconds completion_time =
execution_time + (*completion_times)[node];
(*completion_times)[node] = completion_time;
for (const NodeDef* fanout : fanouts[node]) {
int pending = pending_inputs[fanout];
if (pending == 0) {
continue;
} else if (pending == 1) {
ready_nodes.push_back(fanout);
}
pending_inputs[fanout]--;
Costs::NanoSeconds ready_time =
std::max(completion_time, (*completion_times)[fanout]);
(*completion_times)[fanout] = ready_time;
}
}
return absl::OkStatus();
}
Status EstimateRequiredTimes(
const GrapplerItem& item, const Cluster* cluster,
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>&
execution_times,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* required_times) {
std::unordered_map<string, const NodeDef*> name_map;
for (const NodeDef& node : item.graph.node()) {
name_map[node.name()] = &node;
(*required_times)[&node] = Costs::NanoSeconds::max();
}
std::unordered_map<const NodeDef*, int> pending_fanouts;
for (const NodeDef& node : item.graph.node()) {
for (const string& input : node.input()) {
string node_name = NodeName(input);
auto it = name_map.find(node_name);
if (it == name_map.end()) {
return errors::InvalidArgument(
strings::StrCat("Unknown input node ", input));
}
const NodeDef* fanin = it->second;
pending_fanouts[fanin] += 1;
}
}
std::deque<const NodeDef*> ready_nodes;
for (const NodeDef& node : item.graph.node()) {
if (pending_fanouts[&node] == 0) {
auto it = execution_times.find(&node);
if (it != execution_times.end()) {
(*required_times)[&node] = it->second;
}
ready_nodes.push_back(&node);
}
}
GraphProperties properties(item);
TF_RETURN_IF_ERROR(
properties.InferStatically(true,
false,
false));
OpLevelCostEstimator estimator;
VirtualPlacer placer(cluster->GetDevices());
while (!ready_nodes.empty()) {
const NodeDef* node = ready_nodes.front();
ready_nodes.pop_front();
Costs::NanoSeconds execution_time =
PredictExecutionTime(properties, estimator, placer, *node);
Costs::NanoSeconds required_time = (*required_times)[node] - execution_time;
for (const string& fanin_name : node->input()) {
const NodeDef* fanin = name_map[NodeName(fanin_name)];
(*required_times)[fanin] =
std::min((*required_times)[fanin], required_time);
int pending = pending_fanouts[fanin];
if (pending == 0) {
continue;
} else if (pending == 1) {
ready_nodes.push_back(fanin);
}
pending_fanouts[fanin]--;
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class StaticScheduleTest : public ::testing::Test {
public:
std::unique_ptr<VirtualCluster> CreateVirtualCluster() const {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
return std::unique_ptr<VirtualCluster>(new VirtualCluster(devices));
}
};
std::vector<Costs::NanoSeconds> GetOrderedTimes(
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>
completion_times) {
std::map<Costs::NanoSeconds, std::string> ordered_completion_times;
for (const auto& node_def_time : completion_times) {
ordered_completion_times[node_def_time.second] =
node_def_time.first->name();
}
std::vector<Costs::NanoSeconds> ordered_times;
for (const auto& time_node_name : ordered_completion_times) {
ordered_times.push_back(time_node_name.first);
}
return ordered_times;
}
std::vector<std::string> GetOrderedNodeNames(
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>
completion_times) {
std::map<Costs::NanoSeconds, std::string> ordered_completion_times;
for (const auto& node_def_time : completion_times) {
ordered_completion_times[node_def_time.second] =
node_def_time.first->name();
}
std::vector<std::string> ordered_node_names;
for (const auto& time_node_name : ordered_completion_times) {
ordered_node_names.push_back(time_node_name.second);
}
return ordered_node_names;
}
TEST_F(StaticScheduleTest, BasicGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> completion_times;
Status status =
EstimateEarliestExecutionTimes(item, cluster.get(), &completion_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), completion_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(completion_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[0], Costs::NanoSeconds(1));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(completion_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"Const/Const", "x", "Sign", "Sign_1",
"Sign_2", "Sign_3", "y"}));
}
TEST_F(StaticScheduleTest, BasicGraphWithCtrlDependencies) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::AddN(s.WithOpName("e"), {d});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ("c", item.graph.node(2).name());
EXPECT_EQ("e", item.graph.node(4).name());
*item.graph.mutable_node(4)->add_input() = "^c";
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> completion_times;
Status status =
EstimateEarliestExecutionTimes(item, cluster.get(), &completion_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), completion_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(completion_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[0], Costs::NanoSeconds(1));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(completion_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"a", "b", "c", "d", "e"}));
}
TEST_F(StaticScheduleTest, RequiredTimes) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> execution_times;
for (const NodeDef& node : item.graph.node()) {
execution_times[&node] = 0;
}
std::unordered_map<const NodeDef*, Costs::NanoSeconds> required_times;
Status status = EstimateRequiredTimes(item, cluster.get(), execution_times,
&required_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), required_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(required_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[ordered_times.size() - 1], Costs::NanoSeconds(0));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(required_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"Const/Const", "x", "Sign", "Sign_1",
"Sign_2", "Sign_3", "y"}));
}
}
}
} |
1,395 | cpp | tensorflow/tensorflow | graph_optimizer_stage | tensorflow/core/grappler/optimizers/graph_optimizer_stage.cc | tensorflow/core/grappler/optimizers/graph_optimizer_stage_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GRAPH_OPTIMIZER_STAGE_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_GRAPH_OPTIMIZER_STAGE_H_
#include <unordered_map>
#include <unordered_set>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
struct NodeScopeAndName {
string scope;
string name;
};
const NodeScopeAndName ParseNodeScopeAndName(const string& node_name);
struct GraphOptimizerContext {
GraphOptimizerContext(const std::unordered_set<string>* nodes_to_preserve,
GraphDef* optimized_graph,
GraphProperties* graph_properties, NodeMap* node_map,
gtl::FlatSet<string>* feed_nodes,
RewriterConfig::Toggle opt_level)
: nodes_to_preserve(nodes_to_preserve),
optimized_graph(optimized_graph),
graph_properties(graph_properties),
node_map(node_map),
feed_nodes(feed_nodes),
opt_level(opt_level) {}
const std::unordered_set<string>* nodes_to_preserve;
GraphDef* optimized_graph;
GraphProperties* graph_properties;
NodeMap* node_map;
gtl::FlatSet<string>* feed_nodes;
RewriterConfig::Toggle opt_level;
};
Status GetInputNode(const GraphOptimizerContext& ctx, const string& input,
NodeDef** node);
Status GetTensorProperties(const GraphOptimizerContext& ctx,
const string& tensor,
const OpInfo::TensorProperties** properties);
NodeDef* AddCopyNode(const GraphOptimizerContext& ctx, const string& name,
const NodeDef* node_to_copy);
NodeDef* AddEmptyNode(const GraphOptimizerContext& ctx, const string& name);
const string MakeOptimizedNodeName(const NodeScopeAndName& node,
const string& sub_scope,
const string& prefix);
const string MakeOptimizedNodeName(const NodeScopeAndName& root,
const std::vector<string> node_names,
const string& sub_scope,
const string& prefix);
template <typename Result>
class GraphOptimizerStage {
public:
explicit GraphOptimizerStage(const string& optimizer_name,
const string& stage_name,
const GraphOptimizerContext& ctx)
: optimizer_name_(optimizer_name), stage_name_(stage_name), ctx_(ctx) {}
virtual ~GraphOptimizerStage() = default;
const string& stage_name() const { return stage_name_; }
const string& optimizer_name() const { return optimizer_name_; }
virtual bool IsSupported(const NodeDef* node) const = 0;
virtual Status TrySimplify(NodeDef* node, Result* result) = 0;
Status EnsureNodeIsSupported(const NodeDef* node) const {
return IsSupported(node)
? absl::OkStatus()
: errors::InvalidArgument(
"Node ", node->name(), " is not supported by optimizer ",
optimizer_name_, " and stage ", stage_name_);
}
const string OptimizedNodeName(const NodeScopeAndName& node) const {
return MakeOptimizedNodeName(node, optimizer_name_, stage_name_);
}
const string OptimizedNodeName(const NodeScopeAndName& root,
const std::vector<string>& nodes) const {
return MakeOptimizedNodeName(root, nodes, optimizer_name_, stage_name_);
}
const string OptimizedNodeName(const NodeScopeAndName& node,
const string& rewrite_rule) const {
const string prefix = strings::StrCat(stage_name_, "_", rewrite_rule);
return MakeOptimizedNodeName(node, optimizer_name_, prefix);
}
const string UniqueOptimizedNodeName(const NodeScopeAndName& node) {
const string node_name = OptimizedNodeName(node);
return UniqueNodeName(node_name);
}
const string UniqueOptimizedNodeName(const NodeScopeAndName& node,
const string& rewrite_rule) {
const string node_name = OptimizedNodeName(node, rewrite_rule);
return UniqueNodeName(node_name);
}
Status GetInputNode(const string& input, NodeDef** node) const {
return ::tensorflow::grappler::GetInputNode(ctx_, input, node);
}
Status GetTensorProperties(
const string& tensor, const OpInfo::TensorProperties** properties) const {
return ::tensorflow::grappler::GetTensorProperties(ctx_, tensor,
properties);
}
NodeDef* AddCopyNode(const string& name, const NodeDef* node_to_copy) {
return ::tensorflow::grappler::AddCopyNode(ctx_, name, node_to_copy);
}
NodeDef* AddEmptyNode(const string& name) {
return ::tensorflow::grappler::AddEmptyNode(ctx_, name);
}
protected:
const GraphOptimizerContext& ctx() const { return ctx_; }
private:
const string UniqueNodeName(absl::string_view name) {
string node_name = string(name);
while (ctx_.node_map->NodeExists(node_name)) {
node_name = absl::StrCat(name, "_unique",
optimized_node_name_counter_.fetch_add(1));
}
return node_name;
}
const string optimizer_name_;
const string stage_name_;
const GraphOptimizerContext ctx_;
std::atomic<int64_t> optimized_node_name_counter_ = {0};
};
template <typename Result>
class GraphOptimizerStagePipeline {
public:
explicit GraphOptimizerStagePipeline(
const std::function<bool(const Result&)> break_predicate)
: break_predicate_(break_predicate) {}
template <typename T, typename... Args>
T& AddStage(Args&&... args) {
auto stage = new T(std::forward<Args>(args)...);
stages_.push_back(std::unique_ptr<T>(stage));
return *stage;
}
bool PassThroughAllStages(NodeDef* node, Result* result) {
for (auto& stage : stages_) {
if (stage->IsSupported(node)) {
const Status stage_status = stage->TrySimplify(node, result);
if (!stage_status.ok()) {
VLOG(2) << "Failed to run optimizer " << stage->optimizer_name()
<< ", stage " << stage->stage_name() << " node "
<< node->name() << ". Error: " << stage_status.message();
}
if (break_predicate_(*result)) return true;
}
}
return false;
}
Status PassThroughAllStagesWithStatus(NodeDef* node, Result* result) {
for (auto& stage : stages_) {
if (!stage->IsSupported(node)) {
continue;
}
const Status stage_status = stage->TrySimplify(node, result);
if (!stage_status.ok()) {
return stage_status;
} else if (break_predicate_(*result)) {
break;
}
}
return absl::OkStatus();
}
std::size_t NumStages() { return stages_.size(); }
std::vector<string> StageNames() {
std::vector<string> names;
names.reserve(stages_.size());
for (const auto& stage : stages_) {
names.push_back(stage->stage_name());
}
return names;
}
private:
std::vector<std::unique_ptr<GraphOptimizerStage<Result>>> stages_;
std::function<bool(const Result&)> break_predicate_;
GraphOptimizerStagePipeline(const GraphOptimizerStagePipeline&) = delete;
void operator=(const GraphOptimizerStagePipeline&) = delete;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/core/graph/tensor_id.h"
namespace tensorflow {
namespace grappler {
const NodeScopeAndName ParseNodeScopeAndName(const string& node_name) {
auto pos = node_name.find_last_of('/');
if (pos == string::npos) {
return {"", node_name};
} else {
return {node_name.substr(0, pos), node_name.substr(pos + 1)};
}
};
Status GetInputNode(const GraphOptimizerContext& ctx, const string& input,
NodeDef** node) {
string node_name = NodeName(input);
NodeDef* node_by_name = ctx.node_map->GetNode(node_name);
if (node_by_name == nullptr) {
return errors::FailedPrecondition("Node ", node_name,
" doesn't exists in a node map");
}
*node = node_by_name;
return absl::OkStatus();
}
Status GetTensorProperties(const GraphOptimizerContext& ctx,
const string& tensor,
const OpInfo::TensorProperties** properties) {
if (ctx.graph_properties == nullptr) {
return errors::InvalidArgument("Graph properties are unknown.");
}
SafeTensorId tensor_id = ParseTensorName(tensor);
if (tensor_id.index() < 0) {
return errors::InvalidArgument(
"Can't get tensor properties of control dependency ", tensor);
}
const auto& output_properties =
ctx.graph_properties->GetOutputProperties(tensor_id.node());
int num_outputs = output_properties.size();
if (num_outputs == 0 || tensor_id.index() > num_outputs - 1) {
return errors::InvalidArgument(
"Node ", tensor_id.node(),
" is missing output properties at position :", tensor_id.index(),
" (num_outputs=", num_outputs, ")");
}
*properties = &output_properties[tensor_id.index()];
return absl::OkStatus();
}
NodeDef* AddCopyNode(const GraphOptimizerContext& ctx, const string& name,
const NodeDef* node_to_copy) {
CHECK(node_to_copy != nullptr);
CHECK(!ctx.node_map->NodeExists(name))
<< "Node " << name << " already exists in a graph";
NodeDef* new_node = ctx.optimized_graph->add_node();
*new_node = *node_to_copy;
new_node->set_name(name);
ctx.node_map->AddNode(name, new_node);
return new_node;
}
NodeDef* AddEmptyNode(const GraphOptimizerContext& ctx, const string& name) {
std::string new_name = name;
for (int count = 0; ctx.node_map->NodeExists(new_name); ++count) {
LOG(WARNING) << name << " already exists in the graph.";
new_name = absl::StrCat(name, "_", count);
}
NodeDef* new_node = ctx.optimized_graph->add_node();
new_node->set_name(new_name);
ctx.node_map->AddNode(new_name, new_node);
return new_node;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& node,
const string& sub_scope,
const string& prefix) {
CHECK(!sub_scope.empty() || !prefix.empty())
<< "Either optimized node name prefix or sub-scope must be non-empty";
string optimized_node_name;
if (!node.scope.empty()) {
strings::StrAppend(&optimized_node_name, node.scope, "/");
}
if (!sub_scope.empty()) {
strings::StrAppend(&optimized_node_name, sub_scope, "/");
}
if (!prefix.empty()) {
strings::StrAppend(&optimized_node_name, prefix, "_");
}
strings::StrAppend(&optimized_node_name, node.name);
return optimized_node_name;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& root,
const std::vector<string> node_names,
const string& sub_scope,
const string& prefix) {
string optimized_node_name = MakeOptimizedNodeName(root, sub_scope, prefix);
for (const string& node_name : node_names) {
auto name_and_scope = ParseNodeScopeAndName(node_name);
strings::StrAppend(&optimized_node_name, "_", name_and_scope.name);
}
return optimized_node_name;
}
}
} | #include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
class GraphOptimizerStageTest : public ::testing::Test {};
struct FakeResult {};
class FakeOptimizerStage : public GraphOptimizerStage<FakeResult> {
public:
explicit FakeOptimizerStage(const string& optimizer_name,
const string& stage_name,
const GraphOptimizerContext& ctx)
: GraphOptimizerStage(optimizer_name, stage_name, ctx) {}
~FakeOptimizerStage() override = default;
bool IsSupported(const NodeDef* node) const override { return true; }
Status TrySimplify(NodeDef* node, FakeResult* result) override {
return absl::OkStatus();
}
};
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInRoot) {
const auto scope_and_name = ParseNodeScopeAndName("Add");
EXPECT_EQ(scope_and_name.scope, "");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInScope) {
const auto scope_and_name = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(scope_and_name.scope, "a/b/c");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, OptimizedNodeName) {
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(stage.OptimizedNodeName(node), "a/b/c/my_opt/my_stg_Add");
EXPECT_EQ(stage.OptimizedNodeName(node, std::vector<string>({"Mul", "Sqrt"})),
"a/b/c/my_opt/my_stg_Add_Mul_Sqrt");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.OptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_Add");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeName) {
GraphDef graph =
GDef({NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique0");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique1");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeNameWithUsedNodeNames) {
GraphDef graph = GDef(
{NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A_unique0", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A_unique1", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique1");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique2");
}
TEST_F(GraphOptimizerStageTest, GetInputNodeAndProperties) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node->input(0), "a");
EXPECT_EQ(add_node->input(1), "b");
const OpInfo::TensorProperties* add_properties;
TF_CHECK_OK(stage.GetTensorProperties("Add", &add_properties));
EXPECT_EQ(add_properties->dtype(), DT_FLOAT);
const OpInfo::TensorProperties* a_properties;
TF_CHECK_OK(stage.GetTensorProperties("a:0", &a_properties));
EXPECT_EQ(a_properties->dtype(), DT_FLOAT_REF);
const OpInfo::TensorProperties* b_properties;
TF_CHECK_OK(stage.GetTensorProperties("b:0", &b_properties));
EXPECT_EQ(b_properties->dtype(), DT_FLOAT_REF);
}
TEST_F(GraphOptimizerStageTest, AddNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
NodeDef* add_node_copy = stage.AddCopyNode("Add_1", add_node);
EXPECT_EQ(add_node_copy->name(), "Add_1");
EXPECT_EQ(add_node_copy->op(), "Add");
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node_copy->input(0), "a");
EXPECT_EQ(add_node_copy->input(1), "b");
NodeDef* add_node_copy_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_1", &add_node_copy_by_name));
EXPECT_EQ(add_node_copy, add_node_copy_by_name);
NodeDef* empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(empty_node->name(), "Add_2");
EXPECT_EQ(empty_node->input_size(), 0);
NodeDef* empty_node_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_2", &empty_node_by_name));
EXPECT_EQ(empty_node, empty_node_by_name);
NodeDef* unique_empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(unique_empty_node->name(), "Add_2_0");
}
}
}
} |
1,396 | cpp | tensorflow/tensorflow | meta_optimizer | tensorflow/core/grappler/optimizers/data/meta_optimizer.cc | tensorflow/core/grappler/optimizers/meta_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_META_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_META_OPTIMIZER_H_
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
namespace tensorflow {
namespace grappler {
class TFDataMetaOptimizer : public CustomGraphOptimizer {
public:
TFDataMetaOptimizer() = default;
~TFDataMetaOptimizer() override = default;
string name() const override { return "tf_data_meta_optimizer"; };
bool UsesFunctionLibrary() const override { return true; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override;
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
private:
absl::flat_hash_map<string, std::unique_ptr<GraphOptimizer>>
enabled_optimizers_;
Status ApplyOptimization(const string& name, Cluster* cluster,
GrapplerItem* item) const;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/meta_optimizer.h"
#include <array>
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ConfigMap =
std::map<string, tensorflow::RewriterConfig_CustomGraphOptimizer>;
constexpr std::array<const char*, 22> kTFDataOptimizations = {
"noop_elimination",
"disable_intra_op_parallelism",
"use_private_thread_pool",
"shuffle_and_repeat_fusion",
"map_parallelization",
"map_fusion",
"filter_fusion",
"map_and_filter_fusion",
"map_and_batch_fusion",
"batch_parallelization",
"filter_parallelization",
"make_sloppy",
"parallel_batch",
"slack",
"autotune_buffer_sizes",
"seq_interleave_prefetch",
"inject_prefetch",
"inject_io_prefetch_eligible",
"inject_io_prefetch",
"disable_prefetch_legacy_autotune",
"enable_gradient_descent",
"make_deterministic"};
Status ToConfigMap(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config,
ConfigMap* result) {
auto found = gtl::FindOrNull(config->parameter_map(), "optimizer_configs");
if (!found) return absl::OkStatus();
auto& options = found->list().s();
for (const auto& option_string : options) {
std::vector<string> split = absl::StrSplit(option_string, ':');
if (split.size() != 3) {
return errors::Internal(
"Wrong format for optimizer options. Expect <optimizer name>:<config "
"key>:<config value>, received: ",
option_string);
}
const string& optimizer_name = split[0];
const string& config_key = split[1];
const string& config_value = split[2];
auto optimizer_config = gtl::FindOrNull(*result, optimizer_name);
if (!optimizer_config) {
(*result)[optimizer_name] =
tensorflow::RewriterConfig_CustomGraphOptimizer();
optimizer_config = gtl::FindOrNull(*result, optimizer_name);
}
(*optimizer_config->mutable_parameter_map())[config_key].set_s(
config_value);
}
return absl::OkStatus();
}
}
Status TFDataMetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
GrapplerItem optimized_item = item;
for (const auto& optimization : kTFDataOptimizations) {
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"TFData", optimization});
Status status = ApplyOptimization(optimization, cluster, &optimized_item);
timings.ReportAndStop();
if (!status.ok()) return status;
}
output->Swap(&optimized_item.graph);
FunctionLibraryDefinition flib =
FunctionLibraryDefinition(OpRegistry::Global(), output->library())
.ReachableDefinitions(*output);
const auto producer = output->versions().producer();
bool optimized_functions = false;
for (const auto& name : flib.ListFunctionNames()) {
auto* func = flib.Find(name);
if (!data::IsTFDataFunction(*func)) continue;
VLOG(3) << "Optimize function: function=" << func->signature().name();
optimized_functions = true;
GrapplerFunctionItem func_item;
TF_RETURN_IF_ERROR(
MakeGrapplerFunctionItem(*func, flib, producer, &func_item));
GraphDef optimized_func_graph;
TF_RETURN_IF_ERROR(Optimize(cluster, func_item, &optimized_func_graph));
for (const FunctionDef& func_def :
optimized_func_graph.library().function()) {
if (flib.Find(func_def.signature().name()) == nullptr) {
TF_RETURN_IF_ERROR(flib.AddFunctionDef(func_def));
}
}
FunctionDef optimized_func;
func_item.SwapFunctionBody(std::move(optimized_func_graph));
TF_RETURN_IF_ERROR(MakeFunctionDef(func_item, flib, &optimized_func));
TF_RETURN_IF_ERROR(
flib.ReplaceFunction(func->signature().name(), optimized_func));
}
if (optimized_functions) {
*output->mutable_library() = flib.ToProto();
}
return absl::OkStatus();
}
Status TFDataMetaOptimizer::ApplyOptimization(const string& name,
Cluster* cluster,
GrapplerItem* item) const {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
const auto* optimizer = gtl::FindOrNull(enabled_optimizers_, name);
if (!optimizer) {
return absl::OkStatus();
}
GraphDef result;
(*optimizer)->set_deadline_usec(this->deadline_usec());
Status status = (*optimizer)->Optimize(cluster, *item, &result);
if (status.ok()) {
item->graph.Swap(&result);
} else if (absl::IsAborted(status)) {
status = absl::OkStatus();
}
return status;
}
Status TFDataMetaOptimizer::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) return absl::OkStatus();
auto& optimizers = config->parameter_map().at("optimizers").list().s();
ConfigMap optimizer_configs;
TF_RETURN_IF_ERROR(ToConfigMap(config, &optimizer_configs));
for (const auto& optimizer_name : optimizers) {
auto optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull(optimizer_name);
if (optimizer) {
TF_RETURN_IF_ERROR(
optimizer->Init(gtl::FindOrNull(optimizer_configs, optimizer_name)));
enabled_optimizers_[optimizer_name] = std::move(optimizer);
} else {
return errors::Internal(
"Tried to register a dataset optimizer that doesn't exist: ",
optimizer_name);
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(TFDataMetaOptimizer, "tf_data_meta_optimizer");
}
} | #include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include <atomic>
#include "absl/strings/match.h"
#include "absl/strings/substitute.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class TestOptimizer : public CustomGraphOptimizer {
public:
static void SetOptimized(const bool flag_value) { optimized_ = flag_value; }
static bool IsOptimized() { return optimized_; }
TestOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(const tensorflow::RewriterConfig_CustomGraphOptimizer* config =
nullptr) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
optimized_ = true;
*optimized_graph = item.graph;
return absl::OkStatus();
}
private:
static bool optimized_;
};
bool TestOptimizer::optimized_;
REGISTER_GRAPH_OPTIMIZER(TestOptimizer);
class TestGraphOptimizer : public TestOptimizer {
public:
string name() const override { return "test_graph_optimizer"; }
};
REGISTER_GRAPH_OPTIMIZER(TestGraphOptimizer);
class TestOptimizerWithParams : public TestOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
CHECK(config != nullptr);
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(TestOptimizerWithParams);
class GrapplerItemPropertiesAccumulator : public CustomGraphOptimizer {
public:
static void SetOptimizationOptions(
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options) {
optimization_options_ = optimization_options;
}
static void ResetOptimizationOptions() { optimization_options_ = nullptr; }
GrapplerItemPropertiesAccumulator() {}
string name() const override {
return "grappler_item_properties_accumulator";
}
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
if (optimization_options_) {
optimization_options_->insert({item.id, item.optimization_options()});
}
return absl::OkStatus();
}
private:
static gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options_;
};
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
GrapplerItemPropertiesAccumulator::optimization_options_;
REGISTER_GRAPH_OPTIMIZER(GrapplerItemPropertiesAccumulator);
class MetaOptimizerTest : public GrapplerTest {};
TEST_F(MetaOptimizerTest, RunsCustomOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerWithParams) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizerWithParams");
auto* custom_config = rewriter_config.add_custom_optimizers();
custom_config->set_name("TestOptimizerWithParams");
(*custom_config->mutable_parameter_map())["foo"] = AttrValue();
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerAndCustomGraphOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
TestGraphOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsPluginOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"/device:GPU:0"});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_min_graph_nodes(-1);
const auto creator = []() { return new TestOptimizer; };
ConfigList config_list;
config_list.disable_model_pruning = true;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunOptimizersTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunToggleOptimizersAndCustomGraphOptimizerTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibrary) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"my_mul"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "my_mul:z:0"}});
(*square_func.mutable_attr())["_noinline"].set_b(true);
FunctionDef quadratic_func = FunctionDefHelper::Create(
"MyQuadratic", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"square"}, "MySquare", {"x"}, {{"T", "$T"}}},
{{"quadratic"}, "MySquare", {"square:z"}, {{"T", "$T"}}}},
{{"z", "quadratic:z:0"}});
(*quadratic_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("quadratic", "MyQuadratic", {"b"}, {{"T", DT_INT32}}, kDevice),
NDef("out_s", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_q", "Identity", {"quadratic:0"}, {{"T", DT_INT32}}, kDevice)},
{mul_func, square_func, quadratic_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(3, optimized_flib.num_functions());
const auto specialized_name = [](const string& fn, const string& node,
const string& id) {
return absl::Substitute("$0_specialized_for_$1_at_$2", fn, node, id);
};
const string optimized_0 =
specialized_name("MyQuadratic", "quadratic", "tf_graph");
const string optimized_1 = specialized_name("MySquare", "square", "tf_graph");
const string optimized_2 =
specialized_name("MySquare", "square", optimized_0);
const FunctionDef* optimized_func_0 = optimized_flib.Find(optimized_0);
const FunctionDef* optimized_func_1 = optimized_flib.Find(optimized_1);
const FunctionDef* optimized_func_2 = optimized_flib.Find(optimized_2);
ASSERT_NE(optimized_func_0, nullptr);
ASSERT_NE(optimized_func_1, nullptr);
ASSERT_NE(optimized_func_2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_1, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_0, node.op());
}
}
EXPECT_EQ(2, count);
count = 0;
for (const NodeDef& node : optimized_func_0->node_def()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_2, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_2, node.op());
}
}
EXPECT_EQ(2, count);
const std::vector<const FunctionDef*> optimized_funcs = {optimized_func_1,
optimized_func_2};
for (const FunctionDef* optimized_func : optimized_funcs) {
count = 0;
for (const NodeDef& node : optimized_func->node_def()) {
if (node.name() == "Func/my_mul/input/_0" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Func/my_mul/input/_1" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "my_mul/mul" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("Func/my_mul/input/_0:output:0", node.input(0));
EXPECT_EQ("Func/my_mul/input/_1:output:0", node.input(1));
}
EXPECT_TRUE(node.device().empty());
}
EXPECT_EQ(3, count);
ASSERT_EQ(1, optimized_func->ret().size());
EXPECT_EQ("Func/my_mul/output/_2:output:0", optimized_func->ret().at("z"));
}
item.fetch = {"out_s", "out_q"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<int>(4));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<int>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneUnusedOutputs) {
using test::function::NDef;
ConfigProto config_proto;
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_mul = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output0"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output0:z:0"}, {"z1", "output1:z:0"}, {"z2", "output2:z:0"}});
FunctionDef my_fwd = FunctionDefHelper::Create(
"Fwd", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output"}, "MyMul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output:z0:0"}, {"z1", "output:z1:0"}, {"z2", "output:z2:0"}});
(*my_mul.mutable_attr())["_noinline"].set_b(true);
(*my_fwd.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_mul, my_fwd};
GrapplerItem item;
item.id = "tf_graph";
item.fetch = {"ret"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fwd", "Fwd", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("ret", "Identity", {"fwd:2"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string specialized_my_fwd = "Fwd_specialized_for_fwd_at_tf_graph";
const string specialized_my_mul =
absl::StrCat("MyMul_specialized_for_output_at_", specialized_my_fwd);
FunctionDef expected_my_mul = FunctionDefHelper::Create(
specialized_my_mul, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output2"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output2:z:0"}});
FunctionDef expected_my_fwd = FunctionDefHelper::Create(
specialized_my_fwd, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output"}, specialized_my_mul, {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output:z2:0"}});
const FunctionDef* my_mul_spec = optimized_flib.Find(specialized_my_mul);
const FunctionDef* my_fwd_spec = optimized_flib.Find(specialized_my_fwd);
ASSERT_NE(my_mul_spec, nullptr);
ASSERT_NE(my_fwd_spec, nullptr);
CompareFunctions(expected_my_mul, *my_mul_spec);
CompareFunctions(expected_my_fwd, *my_fwd_spec);
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(4.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneFunctionBody) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.add_optimizers("pruning");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T"}, {"T: {float, double}"},
{{{"mul1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"mul2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "mul1:z:0"}, {"z2", "mul2:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn1", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn2", "Identity", {"fn2:1"}, {{"T", DT_FLOAT}}, kDevice)},
{my_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string optimized_fn1 = "MyFunc_specialized_for_fn1_at_tf_graph";
const string optimized_fn2 = "MyFunc_specialized_for_fn2_at_tf_graph";
const FunctionDef* optimized_func_fn1 = optimized_flib.Find(optimized_fn1);
const FunctionDef* optimized_func_fn2 = optimized_flib.Find(optimized_fn2);
ASSERT_NE(optimized_func_fn1, nullptr);
ASSERT_NE(optimized_func_fn2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++count) {
EXPECT_EQ(optimized_fn1, node.op());
} else if (node.name() == "fn2" && ++count) {
EXPECT_EQ(optimized_fn2, node.op());
}
}
EXPECT_EQ(2, count);
ASSERT_EQ(1, optimized_func_fn1->node_def_size());
EXPECT_EQ(1, optimized_func_fn1->signature().output_arg_size());
EXPECT_EQ("z1", optimized_func_fn1->signature().output_arg(0).name());
EXPECT_EQ("mul1", optimized_func_fn1->node_def(0).name());
ASSERT_EQ(1, optimized_func_fn2->node_def_size());
EXPECT_EQ(1, optimized_func_fn2->signature().output_arg_size());
EXPECT_EQ("z2", optimized_func_fn2->signature().output_arg(0).name());
EXPECT_EQ("mul2", optimized_func_fn2->node_def(0).name());
item.fetch = {"out_fn1", "out_fn2"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(3.123f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryWithRestrictions) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func_1 = FunctionDefHelper::Create(
"MyMul1", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 = FunctionDefHelper::Create(
"MyMul2", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(optimization_options.size(), 3);
auto optimization_options_main =
gtl::FindOrNull(optimization_options, "main");
ASSERT_NE(optimization_options_main, nullptr);
EXPECT_TRUE(optimization_options_main->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_1 =
gtl::FindOrNull(optimization_options, "MyMul1");
ASSERT_NE(optimization_options_my_mul_1, nullptr);
EXPECT_TRUE(optimization_options_my_mul_1->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_2 =
gtl::FindOrNull(optimization_options, "MyMul2");
ASSERT_NE(optimization_options_my_mul_2, nullptr);
EXPECT_FALSE(
optimization_options_my_mul_2->allow_non_differentiable_rewrites);
}
class SleepingOptimizer : public CustomGraphOptimizer {
public:
SleepingOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
Env::Default()->SleepForMicroseconds(1000000);
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
optimized_graph->add_node();
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(SleepingOptimizer);
TEST_F(MetaOptimizerTest, OptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
GraphDef output;
GraphDef original = item.graph;
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
CompareGraphs(original, output);
}
TEST_F(MetaOptimizerTest, MetaOptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(1500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
EXPECT_EQ(original_node_size + 1, output.node_size());
}
TEST_F(MetaOptimizerTest, OptimizerDoesNotTimeOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(2500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(original_node_size + 2, output.node_size());
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& post_optimization_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_post_optimization_verifier_config();
post_optimization_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunInterOptimizerVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& inter_optimizer_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_inter_optimizer_verifier_config();
inter_optimizer_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnInvalidGraph) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef mul_func_1 =
FunctionDefHelper::Create("MyMul1", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 =
FunctionDefHelper::Create("MyMul2", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyM |
1,397 | cpp | tensorflow/tensorflow | debug_stripper | tensorflow/core/grappler/optimizers/debug_stripper.cc | tensorflow/core/grappler/optimizers/debug_stripper_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEBUG_STRIPPER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEBUG_STRIPPER_H_
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
namespace tensorflow {
namespace grappler {
class DebugStripper : public GraphOptimizer {
public:
DebugStripper() {}
~DebugStripper() override {}
string name() const override { return "debug_stripper"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
bool can_optimize = false;
for (const NodeDef& node : item.graph.node()) {
if (IsAssert(node) || IsCheckNumerics(node) || IsPrint(node)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return errors::Aborted("Nothing to do.");
}
*output = item.graph;
for (NodeDef& node : *output->mutable_node()) {
if (IsAssert(node) || node.op() == "PrintV2") {
node.set_op("NoOp");
EraseRegularNodeAttributes(&node);
for (string& inp : *node.mutable_input()) {
if (!IsControlInput(inp)) {
inp = AsControlDependency(NodeName(inp));
}
}
} else if (IsCheckNumerics(node) || node.op() == "Print") {
node.set_op("Identity");
protobuf::Map<string, AttrValue> new_attr;
if (node.attr().find("T") != node.attr().end()) {
new_attr.insert({"T", node.attr().at("T")});
}
node.mutable_attr()->swap(new_attr);
for (int i = 1, end = node.input_size(); i < end; ++i) {
if (!IsControlInput(node.input(i))) {
*node.mutable_input(i) = AsControlDependency(NodeName(node.input(i)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DebugStripperTest : public GrapplerTest {};
TEST_F(DebugStripperTest, OutputEqualToInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output add = ops::Add(s, x, y);
Output result = ops::Identity(s, add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
EXPECT_EQ(optimizer.Optimize(nullptr, item, &output),
errors::Aborted("Nothing to do."));
}
TEST_F(DebugStripperTest, StripAssertOnTwoOutputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({6}));
auto split =
ops::Split(s.WithOpName("split"), 0, input, 2);
Output x = split[0];
Output y = split[1];
Output ge = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), ge, {x, y});
Output add = ops::Add(
s.WithOpName("add").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
for (const string& input : node.input()) {
if (IsControlInput(input)) {
EXPECT_EQ(input.find(':'), -1);
}
}
}
}
TEST_F(DebugStripperTest, StripAssertFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto greaterequal = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), greaterequal, {x, y});
Output add = ops::Add(
s.WithOpName("z").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "GreaterEqual") {
count++;
EXPECT_EQ("GreaterEqual", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Assert") {
count++;
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("^GreaterEqual", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^Assert", node.input(2));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripCheckNumericsFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto check1 = ops::CheckNumerics(s.WithOpName("CheckNumerics1"), x, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("CheckNumerics2"), y, "foo");
Output add = ops::Add(s.WithOpName("z"), check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "CheckNumerics1") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "CheckNumerics2") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("CheckNumerics1", node.input(0));
EXPECT_EQ("CheckNumerics2", node.input(1));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output print = ops::Print(s.WithOpName("Print"), x, {x});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Print") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ(1, node.attr_size());
}
}
EXPECT_EQ(2, output.node_size());
Tensor x_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"Print"}, {{"x", x_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"Print"}, {{"x", x_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintV2FromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), string("Hello"), {});
Operation print = ops::PrintV2(s.WithOpName("PrintV2"), x);
Output y =
ops::Identity(s.WithOpName("y").WithControlDependencies({print}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "PrintV2") {
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^x", node.input(0));
EXPECT_EQ(0, node.attr_size());
} else if (node.name() == "y") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^PrintV2", node.input(1));
}
}
EXPECT_EQ(3, output.node_size());
Tensor expected = EvaluateNodes(item.graph, {"y"}, {})[0];
Tensor optimized = EvaluateNodes(output, {"y"}, {})[0];
EXPECT_EQ(expected.scalar<tstring>()(), optimized.scalar<tstring>()());
}
}
}
} |
1,398 | cpp | tensorflow/tensorflow | pin_to_host_optimizer | tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc | tensorflow/core/grappler/optimizers/pin_to_host_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_PIN_TO_HOST_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_PIN_TO_HOST_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
namespace internal {
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device);
}
class PinToHostOptimizer : public GraphOptimizer {
public:
PinToHostOptimizer() {}
explicit PinToHostOptimizer(RewriterConfig::Toggle opt_level) {}
~PinToHostOptimizer() override {}
string name() const override { return "pin_to_host_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace internal {
constexpr int64_t kTensorMaxSize = 64;
bool IsDenylisted(const NodeDef& node) {
return
IsCollective(node) ||
IsControlFlow(node) ||
IsNoOp(node);
}
bool IsTensorSmall(const OpInfo::TensorProperties& prop) {
if (prop.dtype() == DataType::DT_STRING) {
return true;
}
if (prop.dtype() != DataType::DT_INT32 &&
prop.dtype() != DataType::DT_INT64 &&
prop.dtype() != DataType::DT_FLOAT) {
return false;
}
const int64_t size = NumCoefficients(prop.shape());
if (size < 0 || size > kTensorMaxSize) {
return false;
}
return true;
}
Status TryFindKernelDef(const std::vector<DeviceType>& devices,
const NodeDef& node, const KernelDef** kdef) {
for (const DeviceType& device : devices) {
const KernelDef* kernel = nullptr;
Status s = FindKernelDef(device, node, &kernel, nullptr);
if (s.ok()) {
if (kdef) {
*kdef = kernel;
}
return absl::OkStatus();
}
}
return errors::NotFound("Could not find KernelDef for op: ", node.op());
}
Status IsNodeOutputPortHostFriendly(const GraphView& graph,
GraphProperties* properties,
const NodeDef& node, int port_id,
bool* is_candidate) {
*is_candidate = false;
if (IsDenylisted(node)) {
return absl::OkStatus();
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
const auto& output_properties = properties->GetOutputProperties(node.name());
int output_properties_size = output_properties.size();
if (port_id >= output_properties_size) {
LOG(WARNING) << "port_id=" << port_id
<< " but output_properties.size()=" << output_properties.size()
<< "\n"
<< node.DebugString();
return absl::OkStatus();
}
if (!IsTensorSmall(output_properties[port_id])) {
return absl::OkStatus();
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
for (const auto& fanin : graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return absl::OkStatus();
}
const int output_arg_id = OpOutputPortIdToArgId(node, *op, port_id);
if (output_arg_id < 0) {
LOG(WARNING) << "Invalid port: " << port_id << "!\n"
<< node.DebugString() << "\n"
<< op->DebugString();
return absl::OkStatus();
}
const KernelDef* kernel = nullptr;
s = TryFindKernelDef({node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node,
&kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return absl::OkStatus();
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->output_arg(output_arg_id).name() == host_memory_arg) {
*is_candidate = true;
break;
}
}
return absl::OkStatus();
}
bool IsNodeInputPortHostFriendly(const NodeDef& node, int port_id) {
if (absl::StrContains(node.device(), DEVICE_CPU)) {
return true;
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return false;
}
const int input_arg_id = OpInputPortIdToArgId(node, *op, port_id);
const KernelDef* kernel = nullptr;
s = internal::TryFindKernelDef(
{node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node, &kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return false;
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->input_arg(input_arg_id).name() == host_memory_arg) {
return true;
}
}
return false;
}
Status IsNodeHostCandidate(const GraphView& graph, GraphProperties* properties,
const NodeDef& node, bool* is_candidate) {
*is_candidate = false;
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
if (IsDenylisted(node)) {
return absl::OkStatus();
}
Status s = TryFindKernelDef({DEVICE_CPU}, node, nullptr);
if (!s.ok()) {
return absl::OkStatus();
}
for (const GraphView::OutputPort& fanin :
graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
for (const auto& prop : properties->GetOutputProperties(node.name())) {
if (!IsTensorSmall(prop)) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device) {
if (device.empty() && has_device_cpu) {
return "/device:CPU:0";
} else if (absl::StrContains(device, DEVICE_GPU)) {
for (const auto& device_match :
{std::pair<string, string>("GPU", "CPU:0"),
std::pair<string, string>("/device", "/device:CPU:0")}) {
const string device_host =
strings::StrCat(device.substr(0, device.rfind(device_match.first)),
device_match.second);
if (devices.find(device_host) != devices.end()) {
return device_host;
}
}
}
return "";
}
}
Status PinToHostOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
*optimized_graph = item.graph;
if (IsLegacyTPUBridgeGraphDef(*optimized_graph)) {
return absl::OkStatus();
}
GraphProperties properties(item);
GraphView graph(optimized_graph);
gtl::FlatSet<string> devices;
if (cluster) {
const std::vector<string> device_names = cluster->GetDeviceNames();
devices.insert(device_names.begin(), device_names.end());
} else {
devices = {"/device:CPU:0"};
}
const bool has_device_cpu = devices.find("/device:CPU:0") != devices.end();
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
std::vector<std::pair<NodeDef*, string>> const_nodes;
for (auto& node : *optimized_graph->mutable_node()) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
bool is_candidate = false;
TF_RETURN_IF_ERROR(
internal::IsNodeHostCandidate(graph, &properties, node, &is_candidate));
if (!is_candidate) {
continue;
}
string device =
internal::TryFindHostDevice(devices, has_device_cpu, node.device());
if (!device.empty()) {
if (IsConstant(node)) {
const_nodes.emplace_back(&node, node.device());
}
VLOG(2) << "Moving node " << node.name() << " to device " << device;
*node.mutable_device() = std::move(device);
}
}
for (auto& it : const_nodes) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
NodeDef* node = it.first;
const string& device = it.second;
for (const GraphView::InputPort& fanout : graph.GetFanouts(*node, false)) {
if (!internal::IsNodeInputPortHostFriendly(*fanout.node,
fanout.port_id)) {
VLOG(2) << "Swapping node " << node->name() << " back to device "
<< device;
node->set_device(device);
break;
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class PinToHostOptimizerTest : public GrapplerTest {};
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceNoDevices) {
gtl::FlatSet<string> devices = {};
EXPECT_EQ(internal::TryFindHostDevice(devices, false, "ABC"), "");
}
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceCpuXlaGpu) {
gtl::FlatSet<string> devices = {"/device:CPU:0", "/device:XLA_GPU:0"};
EXPECT_EQ(internal::TryFindHostDevice(devices, true, ""), "/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:0"),
"/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:*"),
"/device:CPU:0");
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
int num_int32 = 4;
Output f = ops::Const(s.WithOpName("f"), {"test"});
GrapplerItem item;
item.fetch = {"a", "c", "d", "e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
if (i < num_int32) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
} else {
test::ExpectTensorEqual<tstring>(tensors[i], tensors_expected[i]);
}
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e" || node.name() == "f") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallFloatOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {1024, 1024});
Output input_min = ops::Const(s.WithOpName("input_min"), 0.0f);
Output input_max = ops::Const(s.WithOpName("input_max"), 6.0f);
Output b =
ops::QuantizeAndDequantizeV2(s.WithOpName("b"), a, input_min, input_max);
GrapplerItem item;
item.fetch = {"b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
for (const NodeDef& node : output.node()) {
if (node.name() == "input_min" || node.name() == "input_max") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
}
}
}
TEST_F(PinToHostOptimizerTest, TopologicalSort) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"a", "c", "d", "e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
std::reverse(item.graph.mutable_node()->begin(),
item.graph.mutable_node()->end());
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 4);
}
TEST_F(PinToHostOptimizerTest, NoSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 1});
Output b = ops::Const(s.WithOpName("b"), 1, {1, 1024 * 1024});
Output c = ops::MatMul(s.WithOpName("c"), a, b);
GrapplerItem item;
item.fetch = {"a", "b", "c"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_TRUE(node.device().empty());
++found;
}
EXPECT_EQ(found, 3);
}
TEST_F(PinToHostOptimizerTest, Identity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Const(s.WithOpName("a").WithDevice("/device:GPU:0"), 1, {64, 64});
Output b = ops::Const(s.WithOpName("b"), {0, 1}, {2});
Output c =
ops::ReduceProd(s.WithOpName("c").WithDevice("/device:GPU:0"), a, b);
Output d = ops::Identity(s.WithDevice("/device:CPU:0").WithOpName("d"), c);
Output e = ops::Multiply(s.WithOpName("e"), d, d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_EQ(node.device(), "/device:GPU:0");
} else if (node.name() == "b") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
} else if (node.name() == "d") {
EXPECT_EQ(node.device(), "/device:CPU:0");
} else if (node.name() == "e") {
EXPECT_TRUE(node.device().empty());
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, PortIdToArgId) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 2, 3});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GrapplerItem item;
item.fetch = {"a", "b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_EQ(node.device(), "/device:CPU:0");
++found;
}
EXPECT_EQ(found, 2);
}
}
}
} |
1,399 | cpp | tensorflow/tensorflow | arithmetic_optimizer | tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc | tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_ARITHMETIC_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_ARITHMETIC_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
constexpr char kArithmeticOptimizer[] = "ArithmeticOptimizer";
class ArithmeticOptimizer : public GraphOptimizer {
public:
ArithmeticOptimizer()
: opt_level_(RewriterConfig::ON),
options_(ArithmeticOptimizerOptions::Default(RewriterConfig::ON)) {}
explicit ArithmeticOptimizer(RewriterConfig::Toggle opt_level)
: opt_level_(opt_level),
options_(ArithmeticOptimizerOptions::Default(opt_level)) {}
~ArithmeticOptimizer() override {}
string name() const override { return "arithmetic_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
private:
friend class ArithmeticOptimizerTest;
struct ArithmeticOptimizerOptions {
bool combine_add_to_addn = true;
bool convert_sqrt_div_to_rsqrt_mul = true;
bool dedup_computations = true;
bool fold_conjugate_into_transpose = true;
bool fold_multiply_into_conv = true;
bool fold_transpose_into_matmul = true;
bool fuse_squared_diff = true;
bool hoist_common_factor_out_of_aggregation = true;
bool hoist_cwise_unary_chains = true;
bool minimize_broadcasts = true;
bool optimize_max_or_min_of_monotonic = true;
bool remove_idempotent = true;
bool remove_identity_transpose = true;
bool remove_involution = true;
bool remove_logical_not = true;
bool remove_negation = true;
bool remove_redundant_bitcast = true;
bool remove_redundant_cast = true;
bool remove_redundant_reshape = true;
bool reduce_upsampling_dims = true;
bool reorder_cast_like_and_value_preserving = true;
bool replace_mul_with_tile = true;
bool replace_mul_with_square = true;
bool replace_pack_with_tile_reshape = true;
bool convert_pow = true;
bool convert_log1p = true;
bool convert_log_softmax = true;
bool convert_expm1 = true;
bool unary_ops_composition = true;
bool remove_stack_slice_same_axis = true;
bool simplify_aggregation = true;
bool simplify_embedding_lookup = true;
bool remove_cast_into_segment_reduction = true;
static ArithmeticOptimizerOptions Default(
RewriterConfig::Toggle opt_level) {
ArithmeticOptimizerOptions options;
return options;
}
};
bool CanDedup(const NodeDef& node) const;
void DedupComputations();
void ForwardControlDependencies(NodeDef* target_node,
const std::vector<const NodeDef*>& src_nodes);
Status SimplifyArithmeticOps(bool can_use_shapes);
string TrySimplifyAndReplaceUses(const NodeDef* node,
SetVector<NodeDef*>* nodes_to_simplify);
RewriterConfig::Toggle opt_level_;
ArithmeticOptimizerOptions options_;
bool fetch_nodes_known_ = false;
std::unordered_set<string> nodes_to_preserve_;
std::unique_ptr<NodeMap> node_map_;
std::unique_ptr<GraphProperties> graph_properties_;
GraphDef* optimized_graph_ = nullptr;
gtl::FlatSet<string> feed_nodes_;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/strided_slice_op.h"
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAddOpsRewriteTag[] =
"_grappler_ArithmeticOptimizer_AddOpsRewriteStage";
constexpr char kMinimizeBroadcastsTag[] =
"_grappler_ArithmeticOptimizer_MinimizeBroadcasts";
template <typename T>
bool ValuesFromConstNode(const NodeDef& node, std::vector<T>* values) {
if (node.op() != "Const") {
return false;
}
if (node.attr().count("dtype") == 0 || node.attr().count("value") == 0 ||
node.attr().at("dtype").type() != DataTypeToEnum<T>::value) {
return false;
}
const TensorProto& tensor = node.attr().at("value").tensor();
typename checkpoint::SaveTypeTraits<T>::RepeatedField* tensor_values =
checkpoint::MutableTensorProtoData<T>(const_cast<TensorProto*>(&tensor));
if (!tensor_values->empty() && tensor.has_tensor_shape()) {
const TensorShapeProto& shape = tensor.tensor_shape();
if (shape.dim_size() == 1 && shape.dim(0).size() == tensor_values->size()) {
values->insert(values->end(), tensor_values->begin(),
tensor_values->end());
return true;
}
}
const auto tensor_content_size = tensor.tensor_content().size();
if (tensor_content_size > 0) {
CHECK_EQ(0, tensor_content_size % sizeof(T))
<< "tensor_content_size (" << tensor_content_size
<< ") is not a multiple of " << sizeof(T);
values->resize(tensor_content_size / sizeof(T));
port::CopyToArray(tensor.tensor_content(),
reinterpret_cast<char*>(values->data()));
return true;
}
return false;
}
bool MaybeAddControlInput(const string& new_input, NodeDef* node,
GraphDef* graph, NodeMap* node_map) {
bool already_exists = false;
for (const string& input : node->input()) {
if (input == new_input || AsControlDependency(input) == new_input) {
already_exists = true;
break;
}
}
if (!already_exists) {
const string ctrl_dep =
ConstantFolding::AddControlDependency(new_input, graph, node_map);
node->add_input(ctrl_dep);
node_map->AddOutput(NodeName(new_input), node->name());
}
return !already_exists;
}
void SetDataTypeToAttr(DataType dtype, const string& attr_name, NodeDef* node) {
(*node->mutable_attr())[attr_name].set_type(dtype);
}
NodeDef* GetTailOfValuePreservingChain(
const NodeDef& node, const NodeMap& node_map,
const std::unordered_set<string>& nodes_to_preserve) {
auto is_value_preserving_non_branching = [&](const NodeDef& node) {
return nodes_to_preserve.find(node.name()) == nodes_to_preserve.end() &&
IsValuePreserving(node) && NumNonControlOutputs(node, node_map) == 1;
};
return GetTailOfChain(node, node_map, false,
is_value_preserving_non_branching);
}
NodeDef* GetTailOfIdempotentChain(
const NodeDef& node, const NodeMap& node_map,
const std::unordered_set<string>& nodes_to_preserve) {
auto is_idempotent_non_branching = [&](const NodeDef& node) {
return nodes_to_preserve.find(node.name()) == nodes_to_preserve.end() &&
IsIdempotent(node) && NumNonControlOutputs(node, node_map) == 1;
};
return GetTailOfChain(node, node_map, false,
is_idempotent_non_branching);
}
bool GetElementUnexhaustive(const Tensor& t, int i, const std::set<int>& dtypes,
complex128* element) {
if (dtypes.find(t.dtype()) == dtypes.end()) return false;
switch (t.dtype()) {
case DT_BFLOAT16:
*element = complex128(t.flat<bfloat16>()(i));
return true;
case DT_HALF:
*element = complex128(static_cast<double>(t.flat<Eigen::half>()(i)), 0);
return true;
case DT_INT32:
*element = complex128(t.flat<int32>()(i));
return true;
case DT_INT64:
*element = complex128(t.flat<int64_t>()(i));
return true;
case DT_FLOAT:
*element = complex128(t.flat<float>()(i));
return true;
case DT_DOUBLE:
*element = complex128(t.flat<double>()(i));
return true;
case DT_COMPLEX64:
*element = complex128(t.flat<complex64>()(i));
return true;
case DT_COMPLEX128:
*element = t.flat<complex128>()(i);
return true;
default:
return false;
}
}
bool NodeIsOnCpu(const NodeDef& node) {
string task;
string device;
return DeviceNameUtils::SplitDeviceName(node.device(), &task, &device) &&
absl::StrContains(device, DEVICE_CPU);
}
bool AllRegularInputsEqual(const NodeDef& node) {
if (!HasRegularInputs(node)) return true;
for (int i = 1; i < node.input_size(); ++i) {
if (IsControlInput(node.input(i))) {
break;
}
if (node.input(0) != node.input(i)) {
return false;
}
}
return true;
}
void ReplaceWithNoOp(NodeDef* node, const GraphOptimizerContext& ctx) {
ctx.node_map->RemoveInputs(node->name());
ctx.graph_properties->ClearInputProperties(node->name());
ctx.graph_properties->ClearOutputProperties(node->name());
ChangeToNoOp(node);
EraseRegularNodeAttributes(node);
node->clear_input();
}
struct ArithmeticOptimizerContext {
explicit ArithmeticOptimizerContext(SetVector<NodeDef*>* nodes_to_simplify)
: nodes_to_simplify(nodes_to_simplify) {}
SetVector<NodeDef*>* nodes_to_simplify;
};
class ArithmeticOptimizerStage : public GraphOptimizerStage<string> {
public:
explicit ArithmeticOptimizerStage(const string& name,
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext ctx_ext)
: GraphOptimizerStage("ArithmeticOptimizer", name, ctx),
ctx_ext_(ctx_ext) {}
~ArithmeticOptimizerStage() override = default;
protected:
void AddToOptimizationQueue(NodeDef* node) {
ctx_ext_.nodes_to_simplify->PushBack(node);
}
Status UpdateConsumers(NodeDef* node, const string& new_input) {
const auto consumers = ctx().node_map->GetOutputs(node->name());
if (consumers.empty()) return absl::OkStatus();
const TensorId new_tensor = ParseTensorName(new_input);
for (NodeDef* consumer : consumers) {
if (consumer->name() == new_tensor.node()) continue;
bool updated = false;
for (int i = 0; i < consumer->input_size(); ++i) {
const TensorId input_tensor = ParseTensorName(consumer->input(i));
if (input_tensor.node() == node->name()) {
if (new_tensor.index() < 0 && input_tensor.index() >= 0) {
return errors::InvalidArgument(
"Cannot override data input ", input_tensor.ToString(),
" with control input ", new_tensor.ToString());
}
consumer->set_input(i, input_tensor.index() < 0
? absl::StrCat("^", new_tensor.node())
: new_input);
ctx().node_map->UpdateInput(consumer->name(), node->name(),
new_input);
updated = true;
}
}
if (updated) {
DedupControlInputs(consumer);
AddToOptimizationQueue(consumer);
}
}
return absl::OkStatus();
}
void ForwardControlDependencies(
NodeDef* target_node, const std::vector<const NodeDef*>& src_nodes) {
for (const auto& src : src_nodes) {
for (int i = src->input_size() - 1; i >= 0; --i) {
if (IsControlInput(src->input(i))) {
*target_node->add_input() = src->input(i);
ctx().node_map->AddOutput(NodeName(src->input(i)),
target_node->name());
} else {
break;
}
}
}
DedupControlInputs(target_node);
}
bool IsReallyConstant(const NodeDef& node) const {
if (!IsConstant(node)) {
return false;
}
return ctx().feed_nodes->find(node.name()) == ctx().feed_nodes->end();
}
bool IsInPreserveSet(const NodeDef& node) const {
return ctx().nodes_to_preserve->find(node.name()) !=
ctx().nodes_to_preserve->end();
}
bool IsDrivenByControlDependency(const NodeDef& node) const {
return std::any_of(
node.input().begin(), node.input().end(),
[](const string& input) { return IsControlInput(input); });
}
bool DrivesControlDependency(const NodeDef& node) const {
for (const NodeDef* output : ctx().node_map->GetOutputs(node.name())) {
for (int i = 0; i < output->input_size(); ++i) {
const TensorId tensor = ParseTensorName(output->input(i));
if (tensor.node() == node.name() && tensor.index() < 0) {
return true;
}
}
}
return false;
}
bool GetTensorFromConstNode(const string& node_name_or_input,
Tensor* tensor) {
const NodeDef* node = ctx().node_map->GetNode(node_name_or_input);
return node != nullptr && IsReallyConstant(*node) &&
CheckAttrExists(*node, "value").ok() &&
tensor->FromProto(node->attr().at("value").tensor());
}
private:
const ArithmeticOptimizerContext ctx_ext_;
};
class ArithmeticNodesGroupOptimizerStage : public ArithmeticOptimizerStage {
public:
explicit ArithmeticNodesGroupOptimizerStage(
const string& name, const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext ctx_ext)
: ArithmeticOptimizerStage(name, ctx, ctx_ext) {}
~ArithmeticNodesGroupOptimizerStage() override = default;
struct InputAndShape {
InputAndShape(const string& input, const TensorShapeProto& shape)
: input(input), shape(shape) {}
string input;
TensorShapeProto shape;
};
struct OptimizedNodesGroup {
NodeDef* root_node;
TensorShapeProto root_shape;
std::vector<NodeDef*> optimized_nodes;
std::vector<InputAndShape> inputs;
};
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
OptimizedNodesGroup group;
TF_RETURN_IF_ERROR(CreateOptimizedNodesGroup(node, &group));
if (!group.optimized_nodes.empty()) {
*simplified_node_name = RewriteOptimizedNodesGroup(group);
}
return absl::OkStatus();
}
protected:
virtual string RewriteOptimizedNodesGroup(
const OptimizedNodesGroup& group) = 0;
virtual bool IsAbsorbableByOptimizedNodesGroup(
const OptimizedNodesGroup& group, const NodeDef& node) const = 0;
Status AbsorbInputByOptimizedNodesGroup(const string& input,
OptimizedNodesGroup* group) const {
std::deque<const string*> input_tensors;
input_tensors.push_front(&input);
while (!input_tensors.empty()) {
const string* input_tensor = input_tensors.front();
input_tensors.pop_front();
NodeDef* input_node;
TF_RETURN_IF_ERROR(GetInputNode(*input_tensor, &input_node));
if (IsAbsorbableByOptimizedNodesGroup(*group, *input_node)) {
group->optimized_nodes.push_back(input_node);
for (int i = input_node->input_size() - 1; i >= 0; --i) {
const string& absorbed_node_input = input_node->input(i);
if (IsControlInput(absorbed_node_input)) continue;
input_tensors.push_front(&absorbed_node_input);
}
} else {
const OpInfo::TensorProperties* properties;
TF_RETURN_IF_ERROR(GetTensorProperties(*input_tensor, &properties));
group->inputs.emplace_back(*input_tensor, properties->shape());
}
}
return absl::OkStatus();
}
Status CreateOptimizedNodesGroup(NodeDef* root_node,
OptimizedNodesGroup* group) const {
const OpInfo::TensorProperties* root_node_output_properties;
TF_RETURN_IF_ERROR(
GetTensorProperties(root_node->name(), &root_node_output_properties));
group->root_node = root_node;
group->root_shape = root_node_output_properties->shape();
group->optimized_nodes.reserve(root_node->input_size());
for (int i = 0; i < root_node->input_size(); ++i) {
const string& input_i = root_node->input(i);
if (IsControlInput(input_i)) continue;
TF_RETURN_IF_ERROR(AbsorbInputByOptimizedNodesGroup(input_i, group));
}
return absl::OkStatus();
}
bool HasAllInputsBroadcastableToShape(
const NodeDef& node, const OpInfo::TensorProperties& properties) const {
auto is_broadcastable = [this, &properties](const string& input) {
const OpInfo::TensorProperties* input_props;
Status has_input_properties = GetTensorProperties(input, &input_props);
return has_input_properties.ok() &&
ShapesBroadcastable(properties, *input_props);
};
return std::all_of(node.input().begin(), node.input().end(),
is_broadcastable);
}
string ShapeSignature(const TensorShapeProto& shape) const {
string signature = strings::StrCat("rank:", shape.dim_size(), ":dim");
for (int i = 0; i < shape.dim_size(); ++i)
strings::StrAppend(&signature, ":", shape.dim(i).size());
return signature;
}
void MarkWithTag(const StringPiece tag, NodeDef* node) {
AddNodeAttr(tag, true, node);
}
void MarkAllMembersWithTag(const OptimizedNodesGroup& group,
const StringPiece tag) const {
AddNodeAttr(tag, true, group.root_node);
for (NodeDef* optimized_node : group.optimized_nodes) {
AddNodeAttr(tag, true, optimized_node);
}
}
bool IsOnTheSameDevice(const OptimizedNodesGroup& group,
const NodeDef& node) const {
return group.root_node->device() == node.device();
}
bool IsInPreserveSet(const NodeDef& node) const {
return ctx().nodes_to_preserve->find(node.name()) !=
ctx().nodes_to_preserve->end();
}
bool IsMarkedWithTag(const NodeDef& node, const StringPiece tag) const {
return HasNodeAttr(node, tag);
}
bool IsMarkedWithAnyTag(const NodeDef& node, const StringPiece tag1,
const StringPiece tag2) const {
return IsMarkedWithTag(node, tag1) || IsMarkedWithTag(node, tag2);
}
};
class AddOpsRewriteStage : public ArithmeticNodesGroupOptimizerStage {
public:
explicit AddOpsRewriteStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticNodesGroupOptimizerStage("AddOpsRewrite", ctx, ctx_ext) {}
~AddOpsRewriteStage() override = default;
bool IsSupported(const NodeDef* node) const override {
if (!CanOptimize(*node)) return false;
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node->name(), &properties);
return has_properties.ok() && ShapeIsSymbolicallyDefined(*properties) &&
HasAllInputsBroadcastableToShape(*node, *properties);
}
protected:
bool IsAbsorbableByOptimizedNodesGroup(const OptimizedNodesGroup& group,
const NodeDef& node) const override {
if (!CanOptimize(node)) return false;
if (!IsOnTheSameDevice(group, node)) {
return false;
}
if (NumNonControlDataOutputs(node, *ctx().node_map) != 1) {
return false;
}
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node.name(), &properties);
return has_properties.ok() &&
HasAllInputsBroadcastableToShape(node, *properties);
}
bool CanOptimize(const NodeDef& node) const {
if (!IsAdd(node) && !IsAddN(node)) {
return false;
}
if (IsInPreserveSet(node) || IsMarkedWithTag(node, kAddOpsRewriteTag)) {
return false;
}
return !(IsDrivenByControlDependency(node) ||
DrivesControlDependency(node));
}
string RewriteOptimizedNodesGroup(const OptimizedNodesGroup& group) override {
VLOG(2) << "Collapse Add/AddN: root=" << group.root_node->name()
<< " op=" << group.root_node->op()
<< " num_optimized_nodes=" << group.optimized_nodes.size()
<< " num_inputs=" << group.inputs.size();
MarkAllMembersWithTag(group, kAddOpsRewriteTag);
auto root_scope_and_name = ParseNodeScopeAndName(group.root_node->name());
std::unordered_map<string, std::vector<InputAndShape>> shape_sig_to_inputs;
for (const auto& input : group.inputs) {
shape_sig_to_inputs[ShapeSignature(input.shape)].push_back(input);
}
using SigKV = decltype(shape_sig_to_inputs)::value_type;
VLOG(3) << "Add/AddN group has " << shape_sig_to_inputs.size()
<< " unique shapes: "
<< absl::StrJoin(shape_sig_to_inputs, ", ",
[](string* out, SigKV p) {
strings::StrAppend(out, p.first);
});
std::vector<TensorShapeProto> shapes;
shapes.reserve(shape_sig_to_inputs.size());
for (const auto& el : shape_sig_to_inputs)
shapes.push_back(el.second[0].shape);
if (shapes.size() == 1) {
string node_name = UniqueOptimizedNodeName(root_scope_and_name);
AddInputsOfSymbolicallyEqualShape(*group.root_node, node_name,
group.inputs);
return node_name;
}
std::sort(shapes.begin(), shapes.end(),
[](const TensorShapeProto& left, const TensorShapeProto& right) {
return CompareSymbolicallyShapedTensorSizes(left, right);
});
auto leaf_node_name = [&root_scope_and_name, this](int i) {
return UniqueOptimizedNodeName(root_scope_and_name,
strings::StrCat("Leaf_", i));
};
auto internal_node_name = [&root_scope_and_name, this](int i) {
return UniqueOptimizedNodeName(root_scope_and_name,
strings::StrCat("Internal_", i));
};
std::deque<InputAndShape> add_ops;
for (int i = 0, end = shapes.size(); i < end; ++i) {
const auto node_name = leaf_node_name(i);
const auto& inputs = shape_sig_to_inputs[ShapeSignature(shapes[i])];
add_ops.push_back(AddInputsOfSymbolicallyEqualShape(*group.root_node,
node_name, inputs));
}
int internal_nodes = 0;
do {
const InputAndShape lhs = add_ops.front();
add_ops.pop_front();
const InputAndShape rhs = add_ops.front();
add_ops.pop_front();
string name = add_ops.empty()
? UniqueOptimizedNodeName(root_scope_and_name)
: internal_node_name(internal_nodes++);
InputAndShape add = AddAggregatedInputs(*group.root_node, name, lhs, rhs);
add_ops.push_front(add);
} while (add_ops.size() > 1);
InputAndShape optimized_root_node = add_ops.front();
return opt | #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
#include <complex>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer_test_utils.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kHoistFactorOptimizerDiv[] =
"ArithmeticOptimizer/HoistCommonFactor_Div_";
constexpr char kHoistFactorOptimizerMul[] =
"ArithmeticOptimizer/HoistCommonFactor_Mul_";
constexpr char kHoistFactorOptimizerAdd[] =
"ArithmeticOptimizer/HoistCommonFactor_AddV2_";
constexpr char kSimplifyAggregationConst[] =
"ArithmeticOptimizer/SimplifyAggregation_Const_";
constexpr char kSimplifyAggregationMul[] =
"ArithmeticOptimizer/SimplifyAggregation_Mul_";
string HoistMulName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerMul, "");
}
string HoistDivName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerDiv, "");
}
string HoistAddName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerAdd, "");
}
string AggregationConstName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationConst, "");
}
string AggregationMulName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationMul, "");
}
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
}
TEST_F(ArithmeticOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
ArithmeticOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTile) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 44, 1, 96, 1, 64}));
Output ones = ops::Const(s.WithOpName("ones"), 1.0f, {1, 1, 2, 1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("mul"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor =
GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 44, 1, 96, 1, 64}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
ASSERT_EQ(CountOpNodes(g, "Mul"), 0);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplaceMulWithBroadcastByTile";
const NodeDef* t = node_map.GetNode(absl::StrCat(p, "_", "Tile_mul"));
const NodeDef* c = node_map.GetNode(absl::StrCat(p, "_", "Const_mul"));
ASSERT_NE(t, nullptr);
ASSERT_NE(c, nullptr);
EXPECT_EQ(t->op(), "Tile");
ASSERT_EQ(t->input_size(), 2);
EXPECT_EQ(t->input(0), "input");
EXPECT_EQ(t->input(1), c->name());
EXPECT_EQ(t->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(t->attr().at("Tmultiples").type(), c->attr().at("dtype").type());
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTilePreserveControl) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 1, 1}));
Output ones = ops::Const(s.WithOpName("ones").WithControlDependencies(input),
1.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("mul"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
ASSERT_EQ(CountOpNodes(g, "Mul"), 0);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplaceMulWithBroadcastByTile";
const NodeDef* c = node_map.GetNode(absl::StrCat(p, "_", "Const_mul"));
ASSERT_NE(c, nullptr);
ASSERT_EQ(c->input_size(), 1);
EXPECT_TRUE(IsControlInput(c->input(0)));
EXPECT_EQ(c->input(0), "^input");
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNoBroadcast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({1, 2, 1}));
Output ones = ops::Const(s.WithOpName("ones"), 1.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("multiply"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 1}));
auto expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(g, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNotConst) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input1 = ops::Placeholder(s.WithOpName("input1"), DT_FLOAT,
ops::Placeholder::Shape({1, 1, 1}));
Output input2 = ops::Placeholder(s.WithOpName("input2"), DT_FLOAT,
ops::Placeholder::Shape({1, 2, 1}));
Output multiply = ops::Mul(s.WithOpName("multiply"), input1, input2);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor1 = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto tensor2 = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 1}));
auto expected = EvaluateNodes(item.graph, item.fetch,
{{"input1", tensor1}, {"input2", tensor2}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(item.graph, item.fetch,
{{"input1", tensor1}, {"input2", tensor2}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNotOnes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({1, 1, 1}));
Output ones = ops::Const(s.WithOpName("ones"), 2.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("multiply"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(g, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReduceUpsamplingDims) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 22, 48, 64}));
Output reshape_a = ops::Reshape(
s.WithOpName("reshape_a"), input,
ops::Const(s.WithOpName("shape_a"), {1, 22, 1, 48, 1, 64}, {6}));
Output tile =
ops::Tile(s.WithOpName("tile"), reshape_a,
ops::Const(s.WithOpName("multiples"), {1, 1, 2, 1, 2, 1}, {6}));
Output reshape_b =
ops::Reshape(s.WithOpName("reshape_b"), tile,
ops::Const(s.WithOpName("shape_b"), {1, 44, 96, 64}));
Output output = ops::Identity(s.WithOpName("output"), reshape_b);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 22, 48, 64}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReduceUpsamplingDims(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
ASSERT_EQ(CountOpNodes(g, "Reshape"), 2);
ASSERT_EQ(CountOpNodes(g, "Const"), 3);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReduceUpsamplingDims";
const NodeDef* ra =
node_map.GetNode(absl::StrCat(p, "_", "Reshape_reshape_b"));
const NodeDef* rb = node_map.GetNode("reshape_b");
const NodeDef* t = node_map.GetNode(absl::StrCat(p, "_", "Tile_reshape_b"));
ASSERT_NE(ra, nullptr);
ASSERT_NE(rb, nullptr);
ASSERT_NE(t, nullptr);
ASSERT_EQ(rb->input_size(), 2);
EXPECT_EQ(rb->input(0), t->name());
ASSERT_EQ(t->input_size(), 2);
EXPECT_EQ(t->input(0), ra->name());
ASSERT_EQ(ra->input_size(), 2);
EXPECT_EQ(ra->input(0), "input");
{
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
{
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output d = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithControlDependencies(d).WithOpName("mul"), c, c);
Output mul_no_nan = ops::MulNoNan(s.WithOpName("mul_no_nan"), d, d);
Output id = ops::Identity(s.WithOpName("id"), mul);
Output id2 = ops::Identity(s.WithOpName("id2"), mul_no_nan);
GrapplerItem item;
item.fetch = {"id", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithSquare(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 6);
NodeMap node_map(&output);
const string p = "ArithmeticOptimizer/ReplaceMulWithSquare";
const NodeDef* square_node = node_map.GetNode(absl::StrCat(p, "_", "mul"));
ASSERT_NE(square_node, nullptr);
EXPECT_EQ(square_node->op(), "Square");
ASSERT_EQ(square_node->input_size(), 2);
EXPECT_EQ(square_node->input(0), "c");
EXPECT_EQ(square_node->input(1), "^d");
const NodeDef* square_node2 =
node_map.GetNode(absl::StrCat(p, "_", "mul_no_nan"));
ASSERT_NE(square_node2, nullptr);
EXPECT_EQ(square_node2->op(), "Square");
ASSERT_EQ(square_node2->input_size(), 1);
EXPECT_EQ(square_node2->input(0), "d");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c"), {b, b}, ops::Stack::Axis(2));
Output o = ops::Identity(s.WithOpName("output"), c);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplacePackWithTileReshape";
const NodeDef* t_node = node_map.GetNode(absl::StrCat(p, "_", "Tile_c"));
const NodeDef* c_node = node_map.GetNode(absl::StrCat(p, "_", "Multiples_c"));
const NodeDef* s_node = node_map.GetNode(absl::StrCat(p, "_", "Shape_c"));
const NodeDef* r_node = node_map.GetNode(absl::StrCat(p, "_", "Reshape_c"));
const NodeDef* a_node = node_map.GetNode("a");
ASSERT_NE(t_node, nullptr);
ASSERT_NE(c_node, nullptr);
ASSERT_NE(s_node, nullptr);
ASSERT_NE(r_node, nullptr);
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(c_node->op(), "Const");
EXPECT_EQ(s_node->op(), "Const");
ASSERT_EQ(r_node->input_size(), 2);
EXPECT_EQ(r_node->op(), "Reshape");
EXPECT_EQ(r_node->input(0), t_node->name());
EXPECT_EQ(r_node->input(1), s_node->name());
ASSERT_EQ(t_node->input_size(), 2);
EXPECT_EQ(t_node->op(), "Tile");
EXPECT_EQ(t_node->input(0), a_node->name());
EXPECT_EQ(t_node->input(1), c_node->name());
EXPECT_EQ(t_node->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(t_node->attr().at("Tmultiples").type(),
c_node->attr().at("dtype").type());
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshapeControlDeps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output x = ops::Identity(s.WithOpName("x"), a);
Output y = ops::Identity(s.WithOpName("y"), a);
Output b = ops::Stack(s.WithOpName("b").WithControlDependencies(x), {a, a},
ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c").WithControlDependencies(y), {b, b},
ops::Stack::Axis(2));
Output o = ops::Identity(s.WithOpName("output"), c);
GrapplerItem item;
item.fetch = {"output"};
item.keep_ops = {"x", "y"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
EXPECT_EQ(CountOpNodes(g, "Identity"), 3);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplacePackWithTileReshape";
const NodeDef* t_node = node_map.GetNode(absl::StrCat(p, "_", "Tile_c"));
const NodeDef* c_node = node_map.GetNode(absl::StrCat(p, "_", "Multiples_c"));
const NodeDef* s_node = node_map.GetNode(absl::StrCat(p, "_", "Shape_c"));
const NodeDef* a_node = node_map.GetNode("a");
ASSERT_NE(t_node, nullptr);
ASSERT_NE(c_node, nullptr);
ASSERT_NE(s_node, nullptr);
ASSERT_NE(a_node, nullptr);
ASSERT_EQ(t_node->input_size(), 4);
EXPECT_EQ(t_node->op(), "Tile");
EXPECT_EQ(t_node->input(0), a_node->name());
EXPECT_EQ(t_node->input(1), c_node->name());
EXPECT_EQ(t_node->input(2), "^y");
EXPECT_EQ(t_node->input(3), "^x");
ASSERT_EQ(c_node->input_size(), 1);
EXPECT_EQ(c_node->input(0), "^a");
ASSERT_EQ(s_node->input_size(), 1);
ASSERT_EQ(s_node->input(0), "^a");
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileRemoveReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c"), {b, b}, ops::Stack::Axis(2));
Output r =
ops::Reshape(s.WithOpName("r"), c, ops::Const(s, {3, 10, 14, 11}, {4}));
Output o = ops::Identity(s.WithOpName("output"), r);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 3);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 2);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshapeOutOfRange) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(4));
Output o = ops::Identity(s.WithOpName("output"), b);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
VerifyGraphsMatch(item.graph, g, __LINE__);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAdjacentNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto neg1 = ops::Neg(s.WithOpName("neg1"), c);
auto neg2 = ops::Neg(s.WithOpName("neg2"), neg1);
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), neg2);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), recip1);
auto id = ops::Identity(s.WithOpName("id"), recip2);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
ASSERT_EQ(output.node_size(), 2);
EXPECT_EQ(output.node(1).name(), "id");
ASSERT_EQ(output.node(1).input_size(), 1);
EXPECT_EQ(output.node(1).input(0), "c");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAroundValuePreservingChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), squeeze);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 3);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "squeeze") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "c");
found++;
} else if (node.name() == "id2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "squeeze");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionSkipControlDependencies) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(
s.WithOpName("recip2").WithControlDependencies(squeeze), c);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 5);
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
ASSERT_EQ(new_const->input_size(), 1);
EXPECT_EQ(new_const->input(0), "^x");
EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
string("\0\0\0@", 4));
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
ASSERT_EQ(new_mul->input_size(), 2);
EXPECT_EQ(new_mul->input(0), optimized_const_name);
EXPECT_EQ(new_mul->input(1), "x");
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
ASSERT_EQ(new_id->input_size(), 1);
EXPECT_EQ(new_id->input(0), optimized_mul_name);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Const(s.WithOpName("x"), {3.0f, 4.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add").WithControlDependencies(y), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
std::vector<string> fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 6);
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
ASSERT_EQ(new_const->input_size(), 1);
EXPECT_EQ(new_const->input(0), "^x");
EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
string("\0\0\0@", 4));
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
ASSERT_EQ(new_mul->input_size(), 3);
EXPECT_EQ(new_mul->input(0), optimized_const_name);
EXPECT_EQ(new_mul->input(1), "x");
EXPECT_EQ(new_mul->input(2), "^y");
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
ASSERT_EQ(new_id->input_size(), 1);
EXPECT_EQ(new_id->input(0), optimized_mul_name);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({10, 10}));
Output add = ops::Add(s.WithOpName("Add"), p, p);
Output add1 = ops::Add(s.WithOpName("Add_1"), p, p);
Output add4 = ops::Add(s.WithOpName("Add_4"), add, add1);
Output add5 = ops::Add(s.WithOpName("Add_5"), add, add1);
Output add6 = ops::Add(s.WithOpName("Add_6"), add4, add5);
Output id = ops::Identity(s.WithOpName("id"), add6);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const std::vector<string> devices{
"/device:CPU:0", "/device:GPU:0", "/device:CPU:0", "/device:GPU:1",
"/device:CPU:0", "/device:CPU:0", "/device:CPU:0",
};
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(devices[i]);
}
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
DisableAddToAddNCombining(&optimizer);
GraphDef output;
DedupAndOptimizeTwiceAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 8);
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_NE(id_node, nullptr);
ASSERT_EQ(id_node->input_size(), 1);
EXPECT_EQ(id_node->input(0), HoistMulName("Add_6"));
const NodeDef* mul_node = node_map.GetNode(HoistMulName("Add_6"));
ASSERT_NE(mul_node, nullptr);
ASSERT_EQ(mul_node->input_size(), 2);
EXPECT_EQ(mul_node->input(0), "Placeholder");
EXPECT_EQ(mul_node->input(1), HoistAddName("Add_6"));
const NodeDef* add_6_node = node_map.GetNode(HoistAddName("Add_6"));
ASSERT_NE(add_6_node, nullptr);
ASSERT_EQ(add_6_node->input_size(), 2);
EXPECT_EQ(add_6_node->input(0), HoistAddName("Add_4"));
EXPECT_EQ(add_6_node->input(1), HoistAddName("Add_5"));
const NodeDef* add_4_node = node_map.GetNode(HoistAddName("Add_4"));
ASSERT_NE(add_4_node, nullptr);
EXPECT_EQ(add_4_node->op(), "Add");
ASSERT_EQ(2, add_4_node->input_size());
EXPECT_EQ(add_4_node->input(0), AggregationConstName("Add"));
EXPECT_EQ(add_4_node->input(1), AggregationConstName("Add_1"));
const NodeDef* add_5_node = node_map.GetNode(HoistAddName("Add_5"));
ASSERT_NE(add_5_node, nullptr);
EXPECT_EQ(add_5_node->op(), "Add");
ASSERT_EQ(add_5_node->input_size(), 2);
EXPECT_EQ(add_5_node->input(0), AggregationConstName("Add"));
EXPECT_EQ(add_5_node->input(1), AggregationConstName("Add_1"));
const NodeDef* add_const_node = node_map.GetNode(AggregationConstName("Add"));
ASSERT_NE(add_const_node, nullptr);
EXPECT_EQ(add_const_node->op(), "Const");
ASSERT_EQ(add_const_node->input_size(), 1);
EXPECT_EQ(add_const_node->input(0), "^Placeholder");
const NodeDef* add_1_const_node =
node_map.GetNode(AggregationConstName("Add_1"));
ASSERT_NE(add_1_const_node, nullptr);
EXPECT_EQ(add_1_const_node->op(), "Const" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.