ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
36f457be-4d7f-4734-a657-1bf3fcf01002 | cpp | tensorflow/tensorflow | spectral_ops | tensorflow/core/ops/spectral_ops.cc | tensorflow/core/ops/spectral_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("FFT")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IFFT")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("FFT2D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 2);
});
REGISTER_OP("IFFT2D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 2);
});
REGISTER_OP("FFT3D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("IFFT3D")
.Input("input: Tcomplex")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 3);
});
REGISTER_OP("FFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IFFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
Status RFFTShape(InferenceContext* c, const bool forward, const int rank) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), rank, &out));
ShapeHandle unused_shape;
DimensionHandle unused_dim;
ShapeHandle fft_length_input = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(fft_length_input, 1, &unused_shape));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(fft_length_input, 0), rank, &unused_dim));
const Tensor* fft_length_tensor = c->input_tensor(1);
if (fft_length_tensor == nullptr) {
for (int i = 0; i < rank; ++i) {
TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->UnknownDim(), &out));
}
} else {
auto fft_length_as_vec = fft_length_tensor->vec<int32>();
for (int i = 0; i < rank; ++i) {
auto dim = forward && i == rank - 1 && fft_length_as_vec(i) != 0
? fft_length_as_vec(i) / 2 + 1
: fft_length_as_vec(i);
TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->MakeDim(dim), &out));
}
}
c->set_output(0, out);
return absl::OkStatus();
}
REGISTER_OP("RFFT")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 1); });
REGISTER_OP("IRFFT")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 1); });
REGISTER_OP("RFFT2D")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 2); });
REGISTER_OP("IRFFT2D")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 2); });
REGISTER_OP("RFFT3D")
.Input("input: Treal")
.Input("fft_length: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 3); });
REGISTER_OP("IRFFT3D")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 3); });
REGISTER_OP("RFFTND")
.Input("input: Treal")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Tcomplex")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("IRFFTND")
.Input("input: Tcomplex")
.Input("fft_length: int32")
.Input("axes: int32")
.Output("output: Treal")
.Attr("Treal: {float32, float64} = DT_FLOAT")
.Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRankAtLeast(c, 1);
});
REGISTER_OP("BatchFFT")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT");
REGISTER_OP("BatchIFFT")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT");
REGISTER_OP("BatchFFT2D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT2D");
REGISTER_OP("BatchIFFT2D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT2D");
REGISTER_OP("BatchFFT3D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use FFT3D");
REGISTER_OP("BatchIFFT3D")
.Input("input: complex64")
.Output("output: complex64")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(15, "Use IFFT3D");
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
TEST(MathOpsTest, FFT_ShapeFn) {
for (const auto* op_name : {"FFT", "IFFT"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "in0");
INFER_OK(op, "[1]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT2D", "IFFT2D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_OK(op, "[?,1]", "in0");
INFER_OK(op, "[1,2]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
for (const auto* op_name : {"FFT3D", "IFFT3D"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[?,1,?]", "in0");
INFER_OK(op, "[1,2,3]", "in0");
INFER_OK(op, "[1,2,3,4,5,6,7]", "in0");
}
}
TEST(MathOpsTest, RFFT_ShapeFn) {
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT" : "IRFFT");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[1]", "?");
INFER_OK(op, "[1];?", "[?]");
INFER_OK(op, "[1];[1]", "[?]");
INFER_OK(op, "[?];[1]", "[?]");
INFER_OK(op, "[1,2,3,4];[1]", "[d0_0,d0_1,d0_2,?]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1];[1,1]");
INFER_ERROR("Dimension must be 1 but is 2", op, "[1];[2]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({10});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?];[1]", "[6]");
INFER_OK(op, "[1];[1]", "[6]");
INFER_OK(op, "[1,1];[1]", "[d0_0,6]");
} else {
INFER_OK(op, "[?];[1]", "[10]");
INFER_OK(op, "[1];[1]", "[10]");
INFER_OK(op, "[1,1];[1]", "[d0_0,10]");
}
fft_length = test::AsTensor<int32>({11});
if (forward) {
INFER_OK(op, "[?];[1]", "[6]");
INFER_OK(op, "[1];[1]", "[6]");
INFER_OK(op, "[1,1];[1]", "[d0_0,6]");
} else {
INFER_OK(op, "[?];[1]", "[11]");
INFER_OK(op, "[1];[1]", "[11]");
INFER_OK(op, "[1,1];[1]", "[d0_0,11]");
}
fft_length = test::AsTensor<int32>({12});
if (forward) {
INFER_OK(op, "[?];[1]", "[7]");
INFER_OK(op, "[1];[1]", "[7]");
INFER_OK(op, "[1,1];[1]", "[d0_0,7]");
} else {
INFER_OK(op, "[?];[1]", "[12]");
INFER_OK(op, "[1];[1]", "[12]");
INFER_OK(op, "[1,1];[1]", "[d0_0,12]");
}
}
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT2D" : "IRFFT2D");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[2]", "?");
INFER_OK(op, "[1,1];?", "[?,?]");
INFER_OK(op, "[1,1];[2]", "[?,?]");
INFER_OK(op, "[?,?];[2]", "[?,?]");
INFER_OK(op, "[1,2,3,4];[2]", "[d0_0,d0_1,?,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1];[1,1]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[1,1];[3]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({9, 10});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?,?];[2]", "[9,6]");
INFER_OK(op, "[1,1];[2]", "[9,6]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,6]");
} else {
INFER_OK(op, "[?,?];[2]", "[9,10]");
INFER_OK(op, "[1,1];[2]", "[9,10]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,10]");
}
fft_length = test::AsTensor<int32>({10, 11});
if (forward) {
INFER_OK(op, "[?,?];[2]", "[10,6]");
INFER_OK(op, "[1,1];[2]", "[10,6]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,6]");
} else {
INFER_OK(op, "[?,?];[2]", "[10,11]");
INFER_OK(op, "[1,1];[2]", "[10,11]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,11]");
}
fft_length = test::AsTensor<int32>({11, 12});
if (forward) {
INFER_OK(op, "[?,?];[2]", "[11,7]");
INFER_OK(op, "[1,1];[2]", "[11,7]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,7]");
} else {
INFER_OK(op, "[?,?];[2]", "[11,12]");
INFER_OK(op, "[1,1];[2]", "[11,12]");
INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,12]");
}
}
for (const bool forward : {true, false}) {
ShapeInferenceTestOp op(forward ? "RFFT3D" : "IRFFT3D");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[3]", "?");
INFER_OK(op, "[1,1,1];?", "[?,?,?]");
INFER_OK(op, "[1,1,1];[3]", "[?,?,?]");
INFER_OK(op, "[?,?,?];[3]", "[?,?,?]");
INFER_OK(op, "[1,2,3,4];[3]", "[d0_0,?,?,?]");
INFER_ERROR("Shape must be at least rank 3 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1,1];[1,1]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[1,1,1];[4]");
op.input_tensors.resize(2);
Tensor fft_length = test::AsTensor<int32>({10, 11, 12});
op.input_tensors[1] = &fft_length;
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[10,11,7]");
INFER_OK(op, "[1,1,1];[3]", "[10,11,7]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,7]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[10,11,12]");
INFER_OK(op, "[1,1,1];[3]", "[10,11,12]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,12]");
}
fft_length = test::AsTensor<int32>({11, 12, 13});
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[11,12,7]");
INFER_OK(op, "[1,1,1];[3]", "[11,12,7]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,7]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[11,12,13]");
INFER_OK(op, "[1,1,1];[3]", "[11,12,13]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,13]");
}
fft_length = test::AsTensor<int32>({12, 13, 14});
if (forward) {
INFER_OK(op, "[?,?,?];[3]", "[12,13,8]");
INFER_OK(op, "[1,1,1];[3]", "[12,13,8]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,8]");
} else {
INFER_OK(op, "[?,?,?];[3]", "[12,13,14]");
INFER_OK(op, "[1,1,1];[3]", "[12,13,14]");
INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,14]");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/spectral_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/spectral_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4324f89-eb93-434c-987d-d0f5ebdbb8c2 | cpp | tensorflow/tensorflow | data_flow_ops | tensorflow/core/ops/data_flow_ops.cc | tensorflow/core/ops/data_flow_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status DequeueManyV2Shape(InferenceContext* c, ShapeHandle n_shape) {
auto* t = c->input_handle_shapes_and_types(0);
if (t != nullptr && t->size() == c->num_outputs()) {
for (int i = 0; i < c->num_outputs(); ++i) {
ShapeHandle combined_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(n_shape, (*t)[i].shape, &combined_shape));
c->set_output(i, combined_shape);
}
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
}
REGISTER_OP("DynamicPartition")
.Input("data: T")
.Input("partitions: int32")
.Output("outputs: num_partitions * T")
.Attr("num_partitions: int")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
int64_t num_partitions;
TF_RETURN_IF_ERROR(c->GetAttr("num_partitions", &num_partitions));
ShapeHandle data_shape = c->input(0);
ShapeHandle partitions_shape = c->input(1);
if (!c->RankKnown(partitions_shape)) {
return shape_inference::UnknownShape(c);
}
const int64_t rank = c->Rank(partitions_shape);
ShapeHandle unused;
TF_RETURN_IF_ERROR(
c->MergePrefix(data_shape, partitions_shape, &unused, &unused));
ShapeHandle unknown_dim0 = c->MakeShape({c->UnknownDim()});
ShapeHandle data_suffix_shape;
TF_RETURN_IF_ERROR(c->Subshape(data_shape, rank, &data_suffix_shape));
ShapeHandle result_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(unknown_dim0, data_suffix_shape, &result_shape));
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, result_shape);
}
return absl::OkStatus();
});
namespace {
Status DynamicStitchShapeFunction(InferenceContext* c) {
int32_t num_partitions;
TF_RETURN_IF_ERROR(c->GetAttr("N", &num_partitions));
bool all_indices_constant = true;
int32_t max_index = -1;
ShapeHandle extra_shape = c->UnknownShape();
for (int i = 0; i < num_partitions; ++i) {
const Tensor* indices_t = c->input_tensor(i);
if (indices_t == nullptr) {
all_indices_constant = false;
}
ShapeHandle indices_shape = c->input(i);
ShapeHandle data_shape = c->input(i + num_partitions);
if (!c->RankKnown(indices_shape)) {
continue;
}
const int64_t indices_rank = c->Rank(indices_shape);
ShapeHandle unused;
TF_RETURN_IF_ERROR(
c->MergePrefix(data_shape, indices_shape, &unused, &unused));
ShapeHandle rest;
TF_RETURN_IF_ERROR(c->Subshape(data_shape, indices_rank, &rest));
TF_RETURN_IF_ERROR(c->Merge(extra_shape, rest, &extra_shape));
if (indices_t != nullptr) {
const int32* indices = indices_t->flat<int32>().data();
int64_t count = indices_t->NumElements();
for (int64_t i = 0; i < count; ++i) {
if (indices[i] > max_index) {
max_index = indices[i];
}
}
}
}
ShapeHandle output_shape = c->Vector(
all_indices_constant ? c->MakeDim(max_index + 1) : c->UnknownDim());
TF_RETURN_IF_ERROR(c->Concatenate(output_shape, extra_shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
}
REGISTER_OP("DynamicStitch")
.Input("indices: N * int32")
.Input("data: N * T")
.Output("merged: T")
.Attr("N : int >= 1")
.Attr("T : type")
.SetShapeFn(DynamicStitchShapeFunction);
REGISTER_OP("ParallelDynamicStitch")
.Input("indices: N * int32")
.Input("data: N * T")
.Output("merged: T")
.Attr("N : int >= 1")
.Attr("T : type")
.SetShapeFn(DynamicStitchShapeFunction);
namespace {
Status TwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
}
REGISTER_OP("RandomShuffleQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("min_after_dequeue: int = 0")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("RandomShuffleQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("min_after_dequeue: int = 0")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FIFOQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("FIFOQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("PaddingFIFOQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("PaddingFIFOQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("PriorityQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 0 = []")
.Attr("shapes: list(shape) >= 0")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("PriorityQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 0 = []")
.Attr("shapes: list(shape) >= 0")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FakeQueue")
.Input("resource: resource")
.Output("handle: Ref(string)")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("QueueEnqueue")
.Input("handle: Ref(string)")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueV2")
.Input("handle: resource")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueMany")
.Input("handle: Ref(string)")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueManyV2")
.Input("handle: resource")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeue")
.Input("handle: Ref(string)")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueV2")
.Input("handle: resource")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
auto* t = c->input_handle_shapes_and_types(0);
if (t != nullptr && t->size() == c->num_outputs()) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, (*t)[i].shape);
}
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
});
REGISTER_OP("QueueDequeueMany")
.Input("handle: Ref(string)")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueManyV2")
.Input("handle: resource")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle n_shape;
if (c->input_tensor(1) == nullptr) {
n_shape = c->Vector(InferenceContext::kUnknownDim);
} else {
const int32_t n = c->input_tensor(1)->scalar<int32>()();
if (n < 0) {
return errors::InvalidArgument("Input 'n' must be >= 0, but is ", n);
}
n_shape = c->Vector(n);
}
return DequeueManyV2Shape(c, n_shape);
});
REGISTER_OP("QueueDequeueUpTo")
.Input("handle: Ref(string)")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueUpToV2")
.Input("handle: resource")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
return DequeueManyV2Shape(c, c->Vector(InferenceContext::kUnknownDim));
});
REGISTER_OP("QueueClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("QueueCloseV2")
.Input("handle: resource")
.SetShapeFn(shape_inference::NoOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("QueueIsClosed")
.Input("handle: Ref(string)")
.Output("is_closed: bool")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("QueueIsClosedV2")
.Input("handle: resource")
.Output("is_closed: bool")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("QueueSize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("QueueSizeV2")
.Input("handle: resource")
.Output("size: int32")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("AccumulatorNumAccumulated")
.Input("handle: Ref(string)")
.Output("num_accumulated: int32")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AccumulatorSetGlobalStep")
.Input("handle: Ref(string)")
.Input("new_global_step: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ConditionalAccumulator")
.Output("handle: Ref(string)")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("AccumulatorApplyGradient")
.Input("handle: Ref(string)")
.Input("local_step: int64")
.Input("gradient: dtype")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("AccumulatorTakeGradient")
.Input("handle: Ref(string)")
.Input("num_required: int32")
.Output("average: dtype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Attr("dtype: numbertype");
REGISTER_OP("ResourceAccumulatorNumAccumulated")
.Input("handle: resource")
.Output("num_accumulated: int32")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ResourceAccumulatorSetGlobalStep")
.Input("handle: resource")
.Input("new_global_step: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ResourceConditionalAccumulator")
.Output("handle: resource")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("ResourceAccumulatorApplyGradient")
.Input("handle: resource")
.Input("local_step: int64")
.Input("gradient: dtype")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ResourceAccumulatorTakeGradient")
.Input("handle: resource")
.Input("num_required: int32")
.Output("average: dtype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Attr("dtype: numbertype");
REGISTER_OP("SparseConditionalAccumulator")
.Output("handle: Ref(string)")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseAccumulatorApplyGradient")
.Input("handle: Ref(string)")
.Input("local_step: int64")
.Input("gradient_indices: int64")
.Input("gradient_values: dtype")
.Input("gradient_shape: int64")
.Attr("dtype: numbertype")
.Attr("has_known_shape: bool")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("SparseAccumulatorTakeGradient")
.Input("handle: Ref(string)")
.Input("num_required: int32")
.Output("indices: int64")
.Output("values: dtype")
.Output("shape: int64")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
});
REGISTER_OP("StackV2")
.Input("max_size: int32")
.Output("handle: resource")
.Attr("elem_type: type")
.Attr("stack_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("StackPushV2")
.Input("handle: resource")
.Input("elem: T")
.Output("output: T")
.Attr("T: type")
.Attr("swap_memory: bool = false")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("StackPopV2")
.Input("handle: resource")
.Output("elem: elem_type")
.Attr("elem_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("StackCloseV2")
.Input("handle: resource")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("Stack")
.Output("handle: Ref(string)")
.Attr("elem_type: type")
.Attr("stack_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("StackPush")
.Input("handle: Ref(string)")
.Input("elem: T")
.Output("output: T")
.Attr("T: type")
.Attr("swap_memory: bool = false")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("StackPop")
.Input("handle: Ref(string)")
.Output("elem: elem_type")
.Attr("elem_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("StackClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("TensorArrayV3")
.Input("size: int32")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("identical_element_shapes: bool = false")
.Attr("tensor_array_name: string = ''")
.Output("handle: resource")
.Output("flow: float")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
bool identical_shapes;
TF_RETURN_IF_ERROR(
c->GetAttr("identical_element_shapes", &identical_shapes));
DataType t;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &t));
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("element_shape", &p));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
if (c->FullyDefined(s) || identical_shapes) {
c->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{s, t}});
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayGradV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("grad_handle: resource")
.Output("flow_out: float")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
if (c->input_handle_shapes_and_types(0)) {
c->set_output_handle_shapes_and_types(
0, *c->input_handle_shapes_and_types(0));
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayGradWithShape")
.Input("handle: resource")
.Input("flow_in: float")
.Input("shape_to_prepend: int32")
.Output("grad_handle: resource")
.Output("flow_out: float")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
auto* shape_and_type = c->input_handle_shapes_and_types(0);
if (shape_and_type) {
auto input_shape = (*shape_and_type)[0].shape;
auto dtype = (*shape_and_type)[0].dtype;
int64_t prepend_rank = c->Value(c->Dim(c->input(2), 0));
if (c->RankKnown(input_shape) &&
prepend_rank != InferenceContext::kUnknownDim) {
int32_t input_rank = c->Rank(input_shape);
std::vector<DimensionHandle> dims;
dims.reserve(prepend_rank + input_rank);
for (int i = 0; i < prepend_rank; ++i) {
dims.push_back(c->UnknownDim());
}
for (int i = 0; i < input_rank; ++i) {
dims.push_back(c->Dim(input_shape, i));
}
c->set_output_handle_shapes_and_types(0,
{{c->MakeShape(dims), dtype}});
} else {
c->set_output_handle_shapes_and_types(0,
{{c->UnknownShape(), dtype}});
}
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayWriteV3")
.Input("handle: resource")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr && !handle_data->empty()) {
shape_inference::ShapeAndType shape_and_type = (*handle_data)[0];
ShapeHandle value_shape = c->input(2);
TF_RETURN_IF_ERROR(
c->Merge(shape_and_type.shape, value_shape, &unused));
}
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayReadV3")
.Input("handle: resource")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
c->set_output(0, tensor_shape);
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
});
REGISTER_OP("TensorArrayGatherV3")
.Input("handle: resource")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &indices));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(indices, tensor_shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
} else {
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("element_shape", &p));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->Concatenate(indices, s, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
});
REGISTER_OP("TensorArrayScatterV3")
.Input("handle: resource")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &indices));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
ShapeHandle value_shape;
TF_RETURN_IF_ERROR(
c->MergePrefix(c->input(2), indices, &value_shape, &indices));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
ShapeHandle fed_shape;
TF_RETURN_IF_ERROR(c->Subshape(value_shape, 1, &fed_shape));
TF_RETURN_IF_ERROR(c->Merge(tensor_shape, fed_shape, &fed_shape));
}
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayConcatV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
c->set_output(1, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("TensorArraySplitV3")
.Input("handle: resource")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArraySizeV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayCloseV3")
.Input("handle: resource")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("TensorArray")
.Input("size: int32")
.Attr("dtype: type")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("tensor_array_name: string = ''")
.Attr("element_shape: shape = { unknown_rank: true }")
.Output("handle: Ref(string)")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayV3");
REGISTER_OP("TensorArrayV2")
.Input("size: int32")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("tensor_array_name: string = ''")
.Output("handle: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
c->set_output(0, c->Vector(2));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayV3");
REGISTER_OP("TensorArrayGrad")
.Input("handle: string")
.Input("flow_in: float")
.Output("grad_handle: Ref(string)")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayGradV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("grad_handle: string")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayWrite")
.Input("handle: Ref(string)")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayWriteV3");
REGISTER_OP("TensorArrayWriteV2")
.Input("handle: string")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArrayWriteV3");
REGISTER_OP("TensorArrayRead")
.Input("handle: Ref(string)")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayReadV3");
REGISTER_OP("TensorArrayReadV2")
.Input("handle: string")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Deprecated(26, "Use TensorArrayReadV3");
REGISTER_OP("TensorArrayPack")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGatherV3 with RangeOp");
REGISTER_OP("TensorArrayUnpack")
.Input("handle: Ref(string)")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(20, "Use TensorArrayScatterV3 with RangeOp");
REGISTER_OP("TensorArrayGather")
.Input("handle: Ref(string)")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGatherV3");
REGISTER_OP("TensorArrayGatherV2")
.Input("handle: string")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Deprecated(26, "Use TensorArrayGatherV3");
REGISTER_OP("TensorArrayScatter")
.Input("handle: Ref(string)")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(19, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayScatterV2")
.Input("handle: string")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArrayScatterV3");
REGISTER_OP("TensorArrayConcat")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayConcatV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
c->set_output(1, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("TensorArraySplit")
.Input("handle: Ref(string)")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArraySplitV3");
REGISTER_OP("TensorArraySplitV2")
.Input("handle: string")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArraySplitV3");
REGISTER_OP("TensorArraySize")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArraySizeV3");
REGISTER_OP("TensorArraySizeV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArraySizeV3");
REGISTER_OP("TensorArrayClose")
.Input("handle: Ref(string)")
.SetShapeFn([](InferenceContext* c) { return absl::OkStatus(); })
.Deprecated(16, "Use TensorArrayCloseV3");
REGISTER_OP("TensorArrayCloseV2")
.Input("handle: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayCloseV3");
REGISTER_OP("Barrier")
.SetIsStateful()
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(TwoElementOutput);
REGISTER_OP("BarrierInsertMany")
.Input("handle: Ref(string)")
.Input("keys: string")
.Input("values: T")
.Attr("T: type")
.Attr("component_index: int")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle keys = c->input(1);
ShapeHandle values = c->input(2);
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(keys, 1, &keys));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(values, 1, &values));
TF_RETURN_IF_ERROR(c->Merge(keys, c->Vector(c->Dim(values, 0)), &handle));
return absl::OkStatus();
});
REGISTER_OP("BarrierTakeMany")
.Input("handle: Ref(string)")
.Input("num_elements: int32")
.Output("indices: int64")
.Output("keys: string")
.Output("values: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("allow_small_batch: bool = false")
.Attr("wait_for_incomplete: bool = false")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BarrierClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("BarrierReadySize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("BarrierIncompleteSize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("GetSessionHandle")
.Input("value: T")
.Output("handle: string")
.Attr("T: type")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("GetSessionHandleV2")
.Input("value: T")
.Output("handle: resource")
.Attr("T: type")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("GetSessionTensor")
.Input("handle: string")
.Output("value: dtype")
.Attr("dtype: type")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
return shape_inference::UnknownShape(c);
});
REGISTER_OP("DeleteSessionTensor")
.Input("handle: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("Stage")
.Input("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("Unstage")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("StagePeek")
.Input("index: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("StageSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("StageClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapStage")
.Input("key: int64")
.Input("indices: int32")
.Input("values: fake_dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("fake_dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("MapPeek")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapUnstage")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapUnstageNoKey")
.Input("indices: int32")
.Output("key: int64")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("MapIncompleteSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("MapClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("OrderedMapStage")
.Input("key: int64")
.Input("indices: int32")
.Input("values: fake_dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("fake_dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("OrderedMapPeek")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapUnstage")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapUnstageNoKey")
.Input("indices: int32")
.Output("key: int64")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("OrderedMapIncompleteSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("OrderedMapClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("RecordInput")
.Output("records: string")
.Attr("file_pattern: string")
.Attr("file_random_seed: int = 301")
.Attr("file_shuffle_shift_ratio: float = 0")
.Attr("file_buffer_size: int = 10000")
.Attr("file_parallelism: int = 16")
.Attr("batch_size: int = 32")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(DataFlowOpsTest, LookupTableFind) {
ShapeInferenceTestOp op("LookupTableFind");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[2];[];[]", "?");
INFER_OK(op, "[?];[1,2,3];[]", "?");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op,
"[?];[1,2,3];[1,2]");
}
TEST(DataFlowOpsTest, LookupTableInsert) {
ShapeInferenceTestOp op("LookupTableInsert");
INFER_OK(op, "?;?;?", "");
INFER_OK(op, "[2];[];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[1,2,3];[]");
INFER_OK(op, "[2];[1,?,3];[?,2,?]", "");
}
TEST(DataFlowOpsTest, LookupTableSize) {
ShapeInferenceTestOp op("LookupTableSize");
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
TEST(DataFlowOpsTest, LookupTableExport) {
ShapeInferenceTestOp op("LookupTableExport");
INFER_OK(op, "?", "[?];?");
INFER_OK(op, "[2]", "[?];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(DataFlowOpsTest, InitializeTable) {
ShapeInferenceTestOp op("InitializeTable");
INFER_OK(op, "?;?;?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"?;[1];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2];[1,2];[1,2]");
}
TEST(DataFlowOpsTest, InitializeTableFromTextFile) {
ShapeInferenceTestOp op("InitializeTableFromTextFile");
INFER_OK(op, "?;?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[1]");
}
TEST(DataFlowOpsTest, DynamicPartition) {
ShapeInferenceTestOp op("DynamicPartition");
TF_ASSERT_OK(NodeDefBuilder("test", "DynamicPartition")
.Input("data", 0, DT_FLOAT_REF)
.Input("indices", 0, DT_INT32)
.Attr("num_partitions", 4)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?;?;?");
INFER_OK(op, "[3,4,5];[3,4]", "[?,d0_2];[?,d0_2];[?,d0_2];[?,d0_2]");
TF_ASSERT_OK(NodeDefBuilder("test", "DynamicPartition")
.Input("data", 0, DT_FLOAT)
.Input("indices", 0, DT_INT32)
.Attr("num_partitions", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[3,4,5,6];[3,4]", "[?,d0_2,d0_3];[?,d0_2,d0_3]");
INFER_ERROR("Dimensions must be equal, but are 4 and 100", op,
"[3,4,5];[3,100]");
}
TEST(DataFlowOpsTest, DynamicStitch) {
ShapeInferenceTestOp op("DynamicStitch");
TF_ASSERT_OK(
NodeDefBuilder("test", "DynamicStitch")
.Input({{"indices", 0, DT_INT32}, {"indices_2", 1, DT_INT32}})
.Input({{"data", 0, DT_FLOAT}, {"data_2", 1, DT_FLOAT}})
.Attr("N", 2)
.Finalize(&op.node_def));
INFER_ERROR("Dimensions must be equal, but are 10 and 5", op,
"[2,3];[5,6];[2,3,4,5];[10,11,4,5]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 4 and 13", op,
"[2,3];[5,6];[2,3,4,5];[5,6,13,14]");
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
Tensor tensor_2 = test::AsTensor<int32>(
std::vector<int32>{2, 4, 6, 0, 10, 11}, TensorShape({2, 3}));
Tensor tensor_5 = test::AsTensor<int32>(
std::vector<int32>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
1000, 21, 22, 23, 24, 25, 26, 27, 28, 29},
TensorShape({5, 6}));
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&tensor_5);
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
op.input_tensors[0] = &tensor_2;
op.input_tensors[1] = nullptr;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
INFER_OK(op, "[2,3];?;[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
op.input_tensors[1] = &tensor_5;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[1001,d2_2,d2_3]");
tensor_2.flat<int32>()(3) = 10000;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[10001,d2_2,d2_3]");
}
TEST(DataFlowOpsTest, ParallelDynamicStitch) {
ShapeInferenceTestOp op("ParallelDynamicStitch");
TF_ASSERT_OK(
NodeDefBuilder("test", "ParallelDynamicStitch")
.Input({{"indices", 0, DT_INT32}, {"indices_2", 1, DT_INT32}})
.Input({{"data", 0, DT_FLOAT}, {"data_2", 1, DT_FLOAT}})
.Attr("N", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
INFER_ERROR("Dimensions must be equal, but are 10 and 5", op,
"[2,3];[5,6];[2,3,4,5];[10,11,4,5]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 4 and 13", op,
"[2,3];[5,6];[2,3,4,5];[5,6,13,14]");
}
TEST(DataFlowOpsTest, TensorArrayV3) {
ShapeInferenceTestOp op("TensorArrayV3");
TF_ASSERT_OK(NodeDefBuilder("test", "TensorArrayV3")
.Input({"size", 0, DT_INT32})
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[2];[]");
INFER_OK(op, "?", "[2];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2]");
}
TEST(DataFlowOpsTest, QueueDequeueV2ShapeFn) {
ShapeInferenceTestOp op("QueueDequeueV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
INFER_OK(op, "?", "?;?");
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?", "[1,?,3];[?,2]");
}
TEST(DataFlowOpsTest, QueueDequeueManyV2ShapeFn) {
ShapeInferenceTestOp op("QueueDequeueManyV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Input("n", 0, DT_INT32)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[?,1,?,3];[?,?,2]");
Tensor n_tensor = test::AsScalar(12);
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&n_tensor);
op.input_resource_handle_shapes_and_types.clear();
shapes_and_types.clear();
INFER_OK(op, "?;?", "?;?");
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[12,1,?,3];[12,?,2]");
n_tensor = test::AsScalar<int32>(-1);
INFER_ERROR("must be >= 0", op, "?;?");
}
TEST(DataFlowOpsTest, QueueDequeueUpToV2ShapeFn) {
for (int pass = 0; pass < 2; ++pass) {
ShapeInferenceTestOp op("QueueDequeueUpToV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Input("n", 0, DT_INT32)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
Tensor n_tensor = test::AsScalar(12);
if (pass == 1) {
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&n_tensor);
}
INFER_OK(op, "?;?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[?,1,?,3];[?,?,2]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/data_flow_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/data_flow_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d01d5a6-3afb-4951-8c70-dc57dad14950 | cpp | tensorflow/tensorflow | nn_grad | tensorflow/c/experimental/gradients/nn_grad.cc | tensorflow/c/experimental/gradients/nn_grad_test.cc | #include "tensorflow/c/experimental/gradients/nn_grad.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/experimental/ops/nn_ops.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
using std::vector;
using tensorflow::ops::BiasAddGrad;
using tensorflow::ops::ReluGrad;
namespace tensorflow {
namespace gradients {
namespace {
class ReluGradientFunction : public GradientFunction {
public:
explicit ReluGradientFunction(vector<AbstractTensorHandle*> f_outputs)
: forward_outputs_(f_outputs) {
for (auto output : forward_outputs_) {
if (output) {
output->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
AbstractTensorHandle* activations = forward_outputs_[0];
std::string name = "relu_grad";
TF_RETURN_IF_ERROR(ReluGrad(ctx, upstream_grad, activations,
&grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~ReluGradientFunction() override {
for (auto output : forward_outputs_) {
if (output) {
output->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_outputs_;
};
Status BroadcastMul(AbstractContext* ctx, AbstractTensorHandle* vec,
AbstractTensorHandle* mat,
absl::Span<AbstractTensorHandle*> outputs) {
if (!isa<ImmediateExecutionContext>(ctx)) {
return errors::Unimplemented(
"BroadcastMul is not supported in tracing mode yet.");
}
auto imm_ctx = dyn_cast<ImmediateExecutionContext>(ctx);
AbstractTensorPtr minus_1(imm_ctx->CreateInt32Scalar(-1));
ImmediateTensorHandlePtr dim(imm_ctx->CreateLocalHandle(minus_1.get()));
AbstractTensorHandle* expand_dims_outputs;
TF_RETURN_IF_ERROR(
ops::ExpandDims(ctx, vec, dim.get(), &expand_dims_outputs, "ExpandDims"));
TF_RETURN_IF_ERROR(
ops::Mul(ctx, expand_dims_outputs, mat, &outputs[0], "Mul"));
expand_dims_outputs->Unref();
return absl::OkStatus();
}
class SparseSoftmaxCrossEntropyWithLogitsGradientFunction
: public GradientFunction {
public:
explicit SparseSoftmaxCrossEntropyWithLogitsGradientFunction(
vector<AbstractTensorHandle*> f_outputs)
: forward_outputs_(f_outputs) {}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
TF_RETURN_IF_ERROR(BroadcastMul(
ctx, grad_outputs[0], forward_outputs_[1],
grad_inputs.subspan(0, 1)));
grad_inputs[1] = nullptr;
return absl::OkStatus();
}
~SparseSoftmaxCrossEntropyWithLogitsGradientFunction() override {}
private:
vector<AbstractTensorHandle*> forward_outputs_;
};
class BiasAddGradientFunction : public GradientFunction {
public:
explicit BiasAddGradientFunction(AttrBuilder f_attrs)
: forward_attrs_(f_attrs) {}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
DCHECK(upstream_grad);
std::string data_format;
TF_RETURN_IF_ERROR(forward_attrs_.Get("data_format", &data_format));
grad_inputs[0] = upstream_grad;
grad_inputs[0]->Ref();
std::string name = "bias_add_grad";
TF_RETURN_IF_ERROR(BiasAddGrad(ctx, upstream_grad, &grad_inputs[1],
data_format.c_str(), name.c_str()));
return absl::OkStatus();
}
~BiasAddGradientFunction() override {}
private:
AttrBuilder forward_attrs_;
};
}
GradientFunction* ReluRegisterer(const ForwardOperation& op) {
return new ReluGradientFunction(op.outputs);
}
GradientFunction* SparseSoftmaxCrossEntropyWithLogitsRegisterer(
const ForwardOperation& op) {
return new SparseSoftmaxCrossEntropyWithLogitsGradientFunction(op.outputs);
}
GradientFunction* BiasAddRegisterer(const ForwardOperation& op) {
return new BiasAddGradientFunction(op.attrs);
}
}
} | #include "tensorflow/c/experimental/gradients/nn_grad.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/grad_test_helper.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/nn_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
Status ReluModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Relu(ctx, inputs[0], &outputs[0], "Relu");
}
Status SparseSoftmaxCrossEntropyWithLogitsModel(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
AbstractTensorHandle* loss;
AbstractTensorHandle* backprop;
TF_RETURN_IF_ERROR(ops::SparseSoftmaxCrossEntropyWithLogits(
ctx, inputs[0], inputs[1], &loss, &backprop,
"SparseSoftmaxCrossEntropyWithLogits"));
outputs[0] = loss;
backprop->Unref();
return absl::OkStatus();
}
Status BiasAddModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::BiasAdd(ctx, inputs[0], inputs[1], &outputs[0], "NHWC",
"BiasAdd");
}
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr immediate_execution_ctx_;
GradientRegistry registry_;
Status status_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(CppGradients, TestReluGrad) {
status_ = registry_.Register("Relu", ReluRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto ReluGradModel = BuildGradModel(ReluModel, registry_);
float X_vals[] = {1.0f, 2.0f, 3.0f, -5.0f, -4.0f, -3.0f, 2.0f, 10.0f, -1.0f};
int64_t X_dims[] = {3, 3};
AbstractTensorHandlePtr X;
{
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
ReluModel, ReluGradModel, immediate_execution_ctx_.get(), {X.get()},
UseFunction()));
AbstractTensorHandlePtr Y;
{
AbstractTensorHandle* Y_raw;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 0.0f, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
std::vector<AbstractTensorHandle*> outputs(1);
status_ = RunModel(ReluGradModel, immediate_execution_ctx_.get(), {Y.get()},
absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], {0.0f}, {},
0));
outputs[0]->Unref();
}
TEST_P(CppGradients, TestSparseSoftmaxCrossEntropyWithLogitsGrad) {
if (UseFunction()) {
GTEST_SKIP() << "Can't take gradient of "
"SparseSoftmaxCrossEntropyWithLogits in tracing mode.";
}
float X_vals[] = {1.0f, 2.0f, 3.0f, -5.0f, -4.0f, -3.0f, 2.0f, 0.0f, -1.0f};
int64_t X_dims[] = {3, 3};
AbstractTensorHandlePtr X;
{
AbstractTensorHandle* X_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), X_vals, X_dims, 2, &X_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
X.reset(X_raw);
}
int32_t Y_vals[] = {1, 0, 1};
int64_t Y_dims[] = {3};
AbstractTensorHandlePtr Y;
{
AbstractTensorHandle* Y_raw;
status_ = TestTensorHandleWithDims<int32_t, TF_INT32>(
immediate_execution_ctx_.get(), Y_vals, Y_dims, 1, &Y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Y.reset(Y_raw);
}
status_ = registry_.Register("SparseSoftmaxCrossEntropyWithLogits",
SparseSoftmaxCrossEntropyWithLogitsRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SparseSoftmaxCrossEntropyWithLogitsModel,
BuildGradModel(SparseSoftmaxCrossEntropyWithLogitsModel, registry_),
immediate_execution_ctx_.get(), {X.get(), Y.get()}, UseFunction()));
}
TEST_P(CppGradients, TestBiasAddGrad) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "SetAttrString has not been implemented yet.\n";
}
float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f};
int64_t A_dims[] = {2, 2};
AbstractTensorHandlePtr A;
{
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
float Bias_vals[] = {2.0f, 3.0f};
int64_t Bias_dims[] = {2};
AbstractTensorHandlePtr Bias;
{
AbstractTensorHandle* Bias_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), Bias_vals, Bias_dims, 1, &Bias_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
Bias.reset(Bias_raw);
}
status_ = registry_.Register("BiasAdd", BiasAddRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
BiasAddModel, BuildGradModel(BiasAddModel, registry_),
immediate_execution_ctx_.get(), {A.get(), Bias.get()}, UseFunction()));
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/nn_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/nn_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f04a7bc0-66d1-4061-a1dd-c9111cff0a3a | cpp | tensorflow/tensorflow | tfprof_tensor | tensorflow/core/profiler/internal/tfprof_tensor.cc | tensorflow/core/profiler/internal/tfprof_tensor_test.cc | #include "tensorflow/core/profiler/internal/tfprof_tensor.h"
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
namespace tensorflow {
namespace tfprof {
void TFProfTensor::Display(string* formatted_str,
TFProfTensorProto* tfprof_tensor_pb) {
if (formatted_str) {
if (formatted_str_.length() >= kTFProfTenosrMaxDisplayLen) {
*formatted_str =
absl::StrCat(formatted_str_, "...omitted from display\n\n");
} else {
*formatted_str = formatted_str_;
}
}
if (tfprof_tensor_pb) {
tfprof_tensor_pb->MergeFrom(tfprof_tensor_pb_);
}
}
void TFProfTensor::Build() {
tfprof_tensor_pb_.set_dtype(tensor_->dtype());
switch (tensor_->dtype()) {
case DataType::DT_FLOAT:
case DataType::DT_DOUBLE: {
std::vector<double> values_vec;
if (tensor_->dtype() == DataType::DT_FLOAT) {
GetValueVec<float, double>(&values_vec);
} else if (tensor_->dtype() == DataType::DT_DOUBLE) {
GetValueVec<double, double>(&values_vec);
}
BuildOutput<double>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
case DataType::DT_INT32:
case DataType::DT_INT64: {
std::vector<int64_t> values_vec;
if (tensor_->dtype() == DataType::DT_INT32) {
GetValueVec<int32, int64_t>(&values_vec);
} else if (tensor_->dtype() == DataType::DT_INT64) {
GetValueVec<int64_t, int64_t>(&values_vec);
}
BuildOutput<int64_t>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
case DataType::DT_STRING: {
std::vector<tstring> values_vec;
GetValueVec<tstring, tstring>(&values_vec);
BuildOutput<tstring>(0, 0, values_vec, &tfprof_tensor_pb_);
break;
}
default: {
absl::FPrintF(stderr, "Not Supported type %d\n", tensor_->dtype());
break;
}
}
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTensorTest : public ::testing::Test {
protected:
TFProfTensorTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb;
std::unique_ptr<OpLogProto> op_log_pb;
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTensorTest, Basics) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {"VariableV2"},
{".*"}, {""}, {".*"}, {""}, false,
{"tensor_value"},
"", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
EXPECT_EQ(root.children(0).name(), "DW");
EXPECT_GT(root.children(0).tensor_value().value_double_size(), 10);
EXPECT_EQ(root.children(1).name(), "DW2");
EXPECT_GT(root.children(1).tensor_value().value_double_size(), 10);
EXPECT_EQ(root.children(2).name(), "ScalarW");
EXPECT_EQ(root.children(2).tensor_value().value_double_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0170513-7b26-44f8-8167-99ac87528589 | cpp | tensorflow/tensorflow | tfprof_stats | tensorflow/core/profiler/internal/tfprof_stats.cc | tensorflow/core/profiler/internal/tfprof_stats_test.cc | #include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include <stdio.h>
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/profiler/internal/tfprof_timeline.h"
namespace tensorflow {
namespace tfprof {
namespace {
const char* const kProfilePrefix = "Profile:\n";
bool CreateRunMetadataNode(const string& name, NodeDef* def) {
if (name == "RecvTensor" || name == "_SOURCE" ||
name.find("MEMCPY") != name.npos) {
return false;
}
def->set_name(name);
def->set_op(name);
return true;
}
}
TFStats::TFStats(std::unique_ptr<GraphDef> graph,
std::unique_ptr<RunMetadata> run_meta,
std::unique_ptr<OpLogProto> op_log,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader)
: has_code_traces_(false),
miss_accelerator_stream_(false),
ckpt_reader_(std::move(ckpt_reader)) {
CHECK(graph) << "Must at least have GraphDef";
AddGraph(std::move(graph));
if (run_meta && run_meta->has_step_stats()) {
AddRunMeta(0, std::move(run_meta));
}
AddOpLogProto(std::move(op_log));
if (ckpt_reader_) {
for (const auto& v : ckpt_reader_->GetVariableToShapeMap()) {
auto node = nodes_map_.find(v.first);
if (node != nodes_map_.end()) {
node->second->AddOpType("_checkpoint_variables");
}
}
}
}
TFStats::TFStats(const string& filename,
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader)
: has_code_traces_(false),
miss_accelerator_stream_(false),
ckpt_reader_(std::move(ckpt_reader)) {
string str;
Status s = ReadFileToString(Env::Default(), filename, &str);
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to read profile: %s", s.ToString());
return;
}
ProfileProto profile;
if (!profile.ParseFromString(str)) {
absl::FPrintF(stderr, "Failed to parse profile\n");
return;
}
for (const auto& entry : profile.id_to_string()) {
id_to_string_[entry.first] = entry.second;
}
for (const auto& node_pb : profile.nodes()) {
std::unique_ptr<TFGraphNode> node(
new TFGraphNode(node_pb.second, profile, &id_to_string_, &nodes_map_));
nodes_map_.insert(std::pair<string, std::unique_ptr<TFGraphNode>>(
node_pb.second.name(), std::move(node)));
}
has_code_traces_ = profile.has_trace();
for (int64_t s : profile.steps()) {
steps_.insert(s);
}
}
void TFStats::BuildView(const string& cmd) {
if (cmd == kCmds[0] && !scope_view_) {
scope_view_ = std::make_unique<TFScope>(ckpt_reader_.get());
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
scope_view_->AddNode(it->second.get());
}
scope_view_->Build();
}
if (cmd == kCmds[1] && !graph_view_) {
graph_view_ = std::make_unique<TFGraph>(ckpt_reader_.get());
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
graph_view_->AddNode(it->second.get());
}
graph_view_->Build();
}
if (cmd == kCmds[2] && !code_view_) {
code_view_ = std::make_unique<TFCode>();
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
code_view_->AddNode(it->second.get());
}
code_view_->Build();
}
if (cmd == kCmds[3] && !op_view_) {
op_view_ = std::make_unique<TFOp>();
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
op_view_->AddNode(it->second.get());
}
op_view_->Build();
}
}
void TFStats::BuildAllViews() {
std::vector<string> cmds_str(kCmds, kCmds + sizeof(kCmds) / sizeof(*kCmds));
for (const string& cmd : cmds_str) {
BuildView(cmd);
}
}
const GraphNodeProto& TFStats::ShowGraphNode(const string& cmd,
const Options& opts) const {
if (!Validate(opts)) {
return empty_graph_node_;
}
string prefix = MaybeReportMissingTrace();
prefix += QueryDoc(cmd, opts) + kProfilePrefix;
if (cmd == kCmds[0]) {
return scope_view_->Show(prefix, opts);
} else if (cmd == kCmds[1]) {
if (opts.step < 0 && opts.output_type == kOutput[0]) {
for (int64_t step : steps_) {
Options nopts = opts;
nopts.step = step;
graph_view_->Show(prefix, nopts);
}
return empty_graph_node_;
}
return graph_view_->Show(prefix, opts);
} else {
absl::FPrintF(stderr, "Unknown command: %s\n", cmd);
return empty_graph_node_;
}
}
const MultiGraphNodeProto& TFStats::ShowMultiGraphNode(
const string& cmd, const Options& opts) const {
if (!Validate(opts)) {
return empty_multi_graph_node_;
}
string prefix = MaybeReportMissingTrace();
prefix += QueryDoc(cmd, opts) + kProfilePrefix;
if (cmd == kCmds[2]) {
if (!has_code_traces()) {
absl::FPrintF(stderr, "No code trace information\n");
return empty_multi_graph_node_;
}
return code_view_->Show(prefix, opts);
} else if (cmd == kCmds[3]) {
return op_view_->Show(prefix, opts);
} else {
absl::FPrintF(stderr, "Unknown command: %s\n", cmd);
return empty_multi_graph_node_;
}
}
void TFStats::AddGraph(std::unique_ptr<GraphDef> graph) {
std::map<string, const NodeDef*> node_defs;
bool node_added = false;
for (const NodeDef& node : graph->node()) {
if (nodes_map_.find(node.name()) != nodes_map_.end()) {
continue;
}
node_added = true;
size_t num_nodes = nodes_map_.size();
nodes_map_[node.name()] =
std::make_unique<TFGraphNode>(&node, num_nodes, &nodes_map_);
node_defs[node.name()] = &node;
}
for (auto it = node_defs.begin(); it != node_defs.end(); it++) {
TFGraphNode* node = nodes_map_.at(it->first).get();
for (int i = 0; i < it->second->input_size(); ++i) {
string node_input = it->second->input(i);
int output_idx = 0;
auto prefix_pos = node_input.find(':');
if (prefix_pos != node_input.npos) {
std::vector<string> input_parts = absl::StrSplit(node_input, ':');
DCHECK(input_parts.size() == 2)
<< "Unknown NodeDef.input format: " << node_input;
node_input = input_parts[0];
DCHECK(absl::SimpleAtoi(input_parts[1], &output_idx))
<< "Failed to parse integer: " << output_idx;
}
if (node_input.substr(0, 1) == "^") {
node_input = node_input.substr(1);
}
node->AddInput(node_input, output_idx, i);
}
}
if (node_added) {
graph_view_.reset(nullptr);
scope_view_.reset(nullptr);
op_view_.reset(nullptr);
code_view_.reset(nullptr);
}
}
void TFStats::AddOpLogProto(std::unique_ptr<OpLogProto> op_log) {
if (!op_log) {
return;
}
for (const auto& entry : op_log->id_to_string()) {
if (id_to_string_.find(entry.first) == id_to_string_.end()) {
id_to_string_[entry.first] = entry.second;
}
}
for (const OpLogEntry& entry : op_log->log_entries()) {
auto node = nodes_map_.find(entry.name());
if (node == nodes_map_.end()) continue;
for (const string& type : entry.types()) {
node->second->AddOpType(type);
}
if (entry.float_ops()) {
node->second->AddFloatOps(entry.float_ops());
}
if (entry.has_code_def()) {
has_code_traces_ = true;
node->second->AddCode(entry.code_def(), &id_to_string_);
}
}
}
void TFStats::AddRunMeta(int64_t step, std::unique_ptr<RunMetadata> run_meta) {
if (!run_meta || !run_meta->has_step_stats()) {
absl::FPrintF(stderr, "Invalid RunMetadata for step %d\n", step);
return;
}
if (steps_.find(step) == steps_.end()) {
steps_.insert(step);
}
steps_.insert(step);
bool has_gpu_scheduling = false;
bool has_gpu_stream = false;
for (const auto& dev_stat : run_meta->step_stats().dev_stats()) {
string dev = absl::AsciiStrToLower(dev_stat.device());
if (IsPlacedOnAccelerator(dev)) {
has_gpu_scheduling = true;
if (CountAsAcceleratorTime(dev)) {
has_gpu_stream = true;
}
}
for (const NodeExecStats& node_stat : dev_stat.node_stats()) {
string name = node_stat.node_name();
auto split_pos = node_stat.node_name().find(':');
if (split_pos != node_stat.node_name().npos) {
name = node_stat.node_name().substr(0, split_pos);
}
auto node = nodes_map_.find(name);
if (node == nodes_map_.end()) {
NodeDef def;
if (CreateRunMetadataNode(name, &def)) {
size_t num_nodes = nodes_map_.size();
nodes_map_[name] =
std::make_unique<TFGraphNode>(&def, num_nodes, &nodes_map_);
nodes_map_.at(name)->AddStepStat(step, dev_stat.device(), node_stat);
}
} else {
covered_nodes_.insert(node->second->id());
node->second->AddStepStat(step, dev_stat.device(), node_stat);
}
}
}
if (has_gpu_scheduling && !has_gpu_stream) {
miss_accelerator_stream_ = true;
}
}
string TFStats::MaybeReportMissingTrace() const {
string report = "";
if (miss_accelerator_stream_) {
report +=
"\n\nFound accelerator operation but misses accelerator "
"stream stats!\n\n"
"It's likely a gpu tracing issue rather than tf-profiler issue.\n"
"If you found your operation missing accelerator time, "
"consider to post to [email protected]!\n\n";
}
return report;
}
void TFStats::SerializeToString(string* content) {
ProfileProto profile;
for (const auto& entry : id_to_string_) {
(*profile.mutable_id_to_string())[entry.first] = entry.second;
}
for (auto it = nodes_map_.begin(); it != nodes_map_.end(); it++) {
if (it->second->id() < 0) {
continue;
}
(*profile.mutable_nodes())[it->second->id()].MergeFrom(
it->second->ToProto(nodes_map_));
}
profile.set_has_trace(has_code_traces_);
profile.set_miss_accelerator_stream(miss_accelerator_stream_);
for (int64_t s : steps_) {
profile.add_steps(s);
}
*content = profile.SerializeAsString();
}
void TFStats::WriteProfile(const string& filename) {
string content;
SerializeToString(&content);
Status s = WriteStringToFile(Env::Default(), filename, content);
if (!s.ok()) {
absl::FPrintF(stderr, "%s\n", s.ToString());
}
}
bool TFStats::Validate(const Options& opts) const {
if (opts.step >= 0 && steps_.find(opts.step) == steps_.end()) {
absl::FPrintF(stderr,
"Options -step=%d not found.\nAvailable steps: ", opts.step);
for (int64_t s : steps_) {
absl::FPrintF(stderr, "%d ", s);
}
absl::FPrintF(stderr, "\n");
return false;
}
return true;
}
void TFStats::AddNodeForTest(int64_t step, std::unique_ptr<TFGraphNode> node) {
steps_.insert(step);
nodes_map_[node->name()] = std::move(node);
}
}
} | #include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfStatsTest : public ::testing::Test {
protected:
TFProfStatsTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
std::unique_ptr<OpLogProto> op_log_pb(new OpLogProto());
string op_log_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/tfprof_log");
TF_CHECK_OK(ReadBinaryProto(Env::Default(), op_log_path, op_log_pb.get()));
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
string TestToFromProto(const string& cmd, const Options& opts) {
string profile_file = io::JoinPath(testing::TmpDir(), "profile");
tf_stats_->WriteProfile(profile_file);
TFStats new_stats(profile_file, nullptr);
new_stats.BuildAllViews();
return new_stats.ShowGraphNode(cmd, opts).DebugString();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfStatsTest, CustomOpType) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{kTrainableVarType},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 13\ntotal_requested_bytes: "
"2560\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n exec_micros: "
"2\n requested_bytes: 1280\n parameters: 162\n total_exec_micros: 2\n "
" total_requested_bytes: 1280\n total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 2\n "
"total_cpu_exec_micros: 2\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"DW2\"\n "
"exec_micros: 11\n requested_bytes: 1280\n parameters: 288\n "
"total_exec_micros: 11\n total_requested_bytes: 1280\n "
"total_parameters: 288\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 11\n "
"total_cpu_exec_micros: 11\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"ScalarW\"\n "
"parameters: 1\n total_parameters: 1\n total_definition_count: "
"1\n}\ntotal_cpu_exec_micros: 13\ntotal_run_count: "
"2\ntotal_definition_count: 3\ntotal_peak_bytes: "
"2560\ntotal_residual_bytes: 2560\ntotal_output_bytes: 2560\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, CheckPointOpType) {
Options opts(3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{kCkptVarType},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 13\ntotal_requested_bytes: "
"2560\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n exec_micros: "
"2\n requested_bytes: 1280\n parameters: 162\n total_exec_micros: 2\n "
" total_requested_bytes: 1280\n total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 2\n "
"total_cpu_exec_micros: 2\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"DW2\"\n "
"exec_micros: 11\n requested_bytes: 1280\n parameters: 288\n "
"total_exec_micros: 11\n total_requested_bytes: 1280\n "
"total_parameters: 288\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n cpu_exec_micros: 11\n "
"total_cpu_exec_micros: 11\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 1280\n residual_bytes: 1280\n "
" output_bytes: 1280\n total_peak_bytes: 1280\n total_residual_bytes: "
"1280\n total_output_bytes: 1280\n}\nchildren {\n name: \"ScalarW\"\n "
"parameters: 1\n total_parameters: 1\n total_definition_count: "
"1\n}\ntotal_cpu_exec_micros: 13\ntotal_run_count: "
"2\ntotal_definition_count: 3\ntotal_peak_bytes: "
"2560\ntotal_residual_bytes: 2560\ntotal_output_bytes: 2560\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestGraph) {
Options opts(100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"},
{"DW/Initializer/random_normal/mul"},
{""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("graph", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: "
"\"DW/Initializer/random_normal/mul\"\n children {\n name: "
"\"DW/Initializer/random_normal/RandomStandardNormal\"\n children {\n "
" name: \"DW/Initializer/random_normal/shape\"\n "
"total_definition_count: 1\n }\n input_shapes {\n key: 0\n "
" value {\n dim {\n size: 4\n }\n }\n "
"}\n total_definition_count: 2\n }\n children {\n name: "
"\"DW/Initializer/random_normal/stddev\"\n total_definition_count: "
"1\n }\n input_shapes {\n key: 0\n value {\n dim {\n "
"size: 3\n }\n dim {\n size: 3\n }\n dim {\n "
" size: 3\n }\n dim {\n size: 6\n }\n }\n "
"}\n input_shapes {\n key: 1\n value {\n dim {\n "
"size: 1\n }\n }\n }\n total_definition_count: "
"4\n}\ntotal_float_ops: 10440\ntotal_accelerator_exec_micros: "
"404\ntotal_cpu_exec_micros: 4541\ntotal_run_count: "
"6\ntotal_definition_count: 32\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("graph", opts));
}
TEST_F(TFProfStatsTest, TestFloatOps) {
Options opts(10, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, "name", {".*"}, {".*"},
{""}, {".*"}, {""}, false, {"float_ops"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: \"Conv2D\"\n "
"exec_micros: 4292\n requested_bytes: 18176\n total_exec_micros: "
"4292\n total_requested_bytes: 18176\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 5832\n "
"total_float_ops: 5832\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 6\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"3\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 3\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n accelerator_exec_micros: 226\n "
"cpu_exec_micros: 4066\n total_accelerator_exec_micros: 226\n "
"total_cpu_exec_micros: 4066\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 14592\n residual_bytes: 768\n "
" output_bytes: 768\n total_peak_bytes: 14592\n total_residual_bytes: "
"768\n total_output_bytes: 768\n}\nchildren {\n name: \"Conv2D_1\"\n "
"exec_micros: 597\n requested_bytes: 9728\n total_exec_micros: 597\n "
"total_requested_bytes: 9728\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 4608\n "
"total_float_ops: 4608\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 2\n }\n dim {\n size: 2\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"12\n }\n }\n }\n accelerator_exec_micros: 178\n "
"cpu_exec_micros: 419\n total_accelerator_exec_micros: 178\n "
"total_cpu_exec_micros: 419\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 8704\n residual_bytes: 512\n "
"output_bytes: 512\n total_peak_bytes: 8704\n total_residual_bytes: "
"512\n total_output_bytes: 512\n}\ntotal_float_ops: "
"10440\ntotal_accelerator_exec_micros: 404\ntotal_cpu_exec_micros: "
"4541\ntotal_run_count: 6\ntotal_definition_count: 35\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestAccountShownNameOnly) {
Options opts(100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"}, {".*"},
{""}, {"Conv2D_1"},
{""}, true,
{"params"}, "", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 597\ntotal_requested_bytes: "
"9728\nchildren {\n name: \"Conv2D_1\"\n exec_micros: 597\n "
"requested_bytes: 9728\n total_exec_micros: 597\n "
"total_requested_bytes: 9728\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n float_ops: 4608\n "
"total_float_ops: 4608\n input_shapes {\n key: 0\n value {\n "
"dim {\n size: 2\n }\n dim {\n size: 3\n "
"}\n dim {\n size: 3\n }\n dim {\n size: "
"6\n }\n }\n }\n input_shapes {\n key: 1\n value {\n "
" dim {\n size: 2\n }\n dim {\n size: 2\n "
"}\n dim {\n size: 6\n }\n dim {\n size: "
"12\n }\n }\n }\n accelerator_exec_micros: 178\n "
"cpu_exec_micros: 419\n total_accelerator_exec_micros: 178\n "
"total_cpu_exec_micros: 419\n run_count: 1\n total_run_count: 1\n "
"total_definition_count: 1\n peak_bytes: 8704\n residual_bytes: 512\n "
"output_bytes: 512\n total_peak_bytes: 8704\n total_residual_bytes: "
"512\n total_output_bytes: 512\n}\ntotal_float_ops: "
"4608\ntotal_accelerator_exec_micros: 178\ntotal_cpu_exec_micros: "
"419\ntotal_run_count: 1\ntotal_definition_count: 2\ntotal_peak_bytes: "
"8704\ntotal_residual_bytes: 512\ntotal_output_bytes: 512\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
EXPECT_EQ(root.DebugString(), TestToFromProto("scope", opts));
}
TEST_F(TFProfStatsTest, TestShowTensorValue) {
Options opts(10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name", {".*"}, {".*"},
{""}, {"DW"}, {""}, false,
{"tensor_value"},
"", {});
const GraphNodeProto& root = tf_stats_->ShowGraphNode("scope", opts);
GraphNodeProto expected;
CHECK(protobuf::TextFormat::ParseFromString(
"name: \"_TFProfRoot\"\ntotal_exec_micros: 4945\ntotal_requested_bytes: "
"30464\ntotal_parameters: 451\nchildren {\n name: \"DW\"\n "
"exec_micros: 2\n requested_bytes: 1280\n parameters: 162\n "
"total_exec_micros: 2\n total_requested_bytes: 1280\n "
"total_parameters: 162\n devices: "
"\"/job:localhost/replica:0/task:0/gpu:0\"\n tensor_value {\n dtype: "
"DT_FLOAT\n value_double: -0.000534315\n value_double: "
"-0.00089602\n value_double: -0.000417239\n value_double: "
"0.00041444\n value_double: 0.000780691\n value_double: "
"-0.000559057\n value_double: -0.000234623\n value_double: "
"0.00013393\n value_double: -0.00187574\n value_double: "
"0.000785666\n value_double: 0.000673294\n value_double: "
"0.000653368\n value_double: 0.000924489\n value_double: "
"-0.000318373\n value_double: -0.000385202\n value_double: "
"-7.92661e-05\n value_double: 2.70287e-05\n value_double: "
"0.00152302\n value_double: 8.04435e-05\n value_double: "
"-0.00058102\n value_double: 0.000244291\n value_double: "
"-0.000438045\n value_double: -0.000110199\n value_double: "
"0.000731663\n value_double: -0.0012326\n value_double: "
"0.00064065\n value_double: -0.00135203\n value_double: "
"-6.42784e-05\n value_double: -0.0011857\n value_double: "
"-0.000487383\n value_double: 3.41493e-05\n value_double: "
"-0.00158447\n value_double: 0.00168448\n value_double: "
"0.00160946\n value_double: -0.000600483\n value_double: "
"0.000650259\n value_double: -0.00109938\n value_double: "
"-0.000842166\n value_double: -0.0022673\n value_double: "
"-0.00101941\n value_double: -0.0011169\n value_double: "
"-0.0013557\n value_double: -1.46354e-05\n value_double: "
"-1.05487e-05\n value_double: -0.00092014\n value_double: "
"0.00272874\n value_double: 5.13942e-05\n value_double: "
"-0.00223472\n value_double: -0.000250875\n value_double: "
"-0.00180747\n value_double: -0.00234714\n value_double: "
"-0.00113523\n value_double: -0.00112635\n value_double: "
"-0.000843118\n value_double: -6.84256e-05\n value_double: "
"0.000243336\n value_double: 0.00119151\n value_double: "
"0.00131022\n value_double: 0.000768038\n value_double: "
"-8.90095e-05\n value_double: -0.000626427\n value_double: "
"-7.0617e-05\n value_double: -0.0021988\n value_double: "
"-0.00221544\n value_double: -0.000393118\n value_double: "
"0.000159464\n value_double: -0.000874746\n value_double: "
"-0.00131239\n value_double: -0.00135747\n value_double: "
"-0.00179753\n value_double: -0.00101005\n value_double: "
"-0.000107518\n value_double: -0.000616882\n value_double: "
"-0.000360923\n value_double: -0.00026896\n value_double: "
"-0.000142548\n value_double: 0.000577227\n value_double: "
"0.000536027\n value_double: 0.00126907\n value_double: "
"-0.00122712\n value_double: -3.60499e-05\n value_double: "
"0.000151026\n value_double: 0.00107658\n value_double: "
"0.00116475\n value_double: -0.00145312\n value_double: "
"0.000233326\n value_double: -0.00020198\n value_double: "
"0.00179029\n value_double: 0.00150048\n value_double: "
"-0.000884775\n value_double: 0.000409188\n value_double: "
"2.97176e-05\n value_double: -0.000506118\n value_double: "
"-2.33992e-05\n value_double: -0.00037212\n value_double: "
"0.000862773\n value_double: 0.00174046\n value_double: "
"-0.000240207\n value_double: 0.000663976\n value_double: "
"-0.00134747\n value_double: 0.00115585\n value_double: "
"0.000555869\n value_double: 0.00176722\n value_double: "
"-0.000518409\n value_double: 0.00101051\n value_double: "
"0.000129399\n value_double: -0.000916389\n value_double: "
"-0.00137693\n value_double: -0.00152412\n value_double: "
"7.32515e-05\n value_double: -0.000190811\n value_double: "
"-0.000158692\n value_double: -5.7791e-05\n value_double: "
"0.000671785\n value_double: -0.00152924\n value_double: "
"0.00117314\n value_double: -0.000384202\n value_double: "
"0.00176709\n value_double: -0.000181703\n value_double: "
"-0.000460994\n value_double: 0.000643716\n value_double: "
"4.76719e-05\n value_double: -0.00101037\n value_double: "
"0.00159621\n value_double: 0.00186758\n value_double: "
"0.00100001\n value_double: -0.00121831\n value_double: "
"0.00132231\n value_double: 0.0013511\n value_double: 0.00106659\n "
" value_double: 0.00018091\n value_double: 0.00155925\n "
"value_double: 4.26087e-05\n value_double: 0.000243264\n "
"value_double: -0.0017202\n value_double: -0.000218897\n "
"value_double: 0.00118693\n value_double: 0.00258909\n "
"value_double: 0.000641913\n value_double: -0.0013211\n "
"value_double: -0.00171943\n value_double: 0.00089151\n "
"value_double: -0.00114969\n value_double: -0.000196331\n "
"value_double: 0.00109994\n value_double: 0.000302616\n "
"value_double: 0.000675812\n value_double: 0.00112222\n "
"value_double: 0.000516456\n value_double: 0.00133357\n "
"value_double: 0.000298491\n value_double: 0.00145934\n "
"value_double: -0.00159102\n value_double: -0.000819061\n "
"value_double: 0.000120583\n value_double: 0.0006108\n "
"value_double: 0.00124132\n value_double: 0.000764859\n "
"value_double: 0.000374641\n value_double: -0.00149603\n "
"value_double: -0.000317367\n value_double: -0.000417829\n }\n "
"cpu_exec_micros: 2\n total_cpu_exec_micros: 2\n run_count: 1\n "
"total_run_count: 1\n total_definition_count: 10\n peak_bytes: 1280\n "
"residual_bytes: 1280\n output_bytes: 1280\n total_peak_bytes: 1280\n "
"total_residual_bytes: 1280\n total_output_bytes: "
"1280\n}\ntotal_float_ops: 10440\ntotal_accelerator_exec_micros: "
"404\ntotal_cpu_exec_micros: 4541\ntotal_run_count: "
"6\ntotal_definition_count: 35\ntotal_peak_bytes: "
"25856\ntotal_residual_bytes: 3840\ntotal_output_bytes: 4864\n",
&expected));
EXPECT_EQ(expected.DebugString(), root.DebugString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6cec5175-feab-4687-a71f-6b5f3cebf0c2 | cpp | tensorflow/tensorflow | tfprof_timeline | tensorflow/core/profiler/internal/tfprof_timeline.cc | tensorflow/core/profiler/internal/tfprof_timeline_test.cc | #include "tensorflow/core/profiler/internal/tfprof_timeline.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
namespace tensorflow {
namespace tfprof {
namespace {
int kMaxDisplayedMemNode = 10;
std::string GetTimeDevName(const std::string& dev) {
if (dev.find("stream") != dev.npos) {
return absl::StrCat("Op execution threads: ", dev);
} else {
return absl::StrCat("Op scheduling threads: ", dev);
}
}
std::string GetMemoryLaneName(const std::string& dev) {
return absl::StrCat("mem usage on:", dev);
}
}
Json::Value ChromeTraceFormatter::CreateEvent(const string& ph,
const string& category,
const string& name, int64_t pid,
int64_t tid, int64_t ts) {
Json::Value event(Json::objectValue);
event["ph"] = Json::Value(ph);
event["cat"] = Json::Value(category);
event["name"] = Json::Value(name);
event["pid"] = Json::Int64(pid);
event["tid"] = Json::Int64(tid);
event["ts"] = Json::Int64(ts);
return event;
}
void ChromeTraceFormatter::EmitPID(const string& name, int64_t pid) {
Json::Value event(Json::objectValue);
event["name"] = Json::Value("process_name");
event["ph"] = Json::Value("M");
event["pid"] = Json::Int64(pid);
Json::Value args(Json::objectValue);
args["name"] = Json::Value(name);
event["args"] = args;
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitRegion(int64_t ts, int64_t duration, int64_t pid,
int64_t tid, const string& category,
const string& name, Json::Value args) {
Json::Value event = CreateEvent("X", category, name, pid, tid, ts);
event["dur"] = Json::Int64(duration);
event["args"] = std::move(args);
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowStart(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("s", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowEnd(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("t", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitCounter(
const string& category, const string& name, int64_t pid, int64_t ts,
const string& device, int64_t bytes,
const std::map<int64_t, std::vector<string>>& tensor_mem) {
Json::Value event = CreateEvent("C", category, "Allocated Bytes", pid, 0, ts);
Json::Value args(Json::objectValue);
args["Allocator Bytes in Use"] = Json::Int64(bytes);
event["args"] = args;
events_.push_back(event);
Json::Value event2 =
CreateEvent("C", category, "Top Allocations", pid + 1, 0, ts);
Json::Value args2(Json::objectValue);
for (int i = 1; i < kMaxDisplayedMemNode; ++i) {
args2[absl::StrFormat("Top Allocation %02d", i)] = Json::Value("N/A");
}
int count = 0;
for (auto it = tensor_mem.rbegin(); it != tensor_mem.rend(); ++it) {
for (const string& t : it->second) {
if (bytes < it->first || count >= kMaxDisplayedMemNode) {
break;
}
args2[absl::StrFormat("Top Allocation %02d", count)] =
Json::Value(absl::StrCat(it->first / 1000000.0, " MB from ", t));
++count;
bytes -= it->first;
}
}
args2[std::string("Not Displayed")] =
Json::Value(absl::StrFormat("%.2f MB", bytes / 1000000.0));
event2["args"] = args2;
events_.push_back(event2);
}
string ChromeTraceFormatter::Format() {
Json::Value trace;
trace["traceEvents"] = Json::Value(Json::arrayValue);
for (const Json::Value& v : metadata_) {
trace["traceEvents"].append(v);
}
for (const Json::Value& v : events_) {
trace["traceEvents"].append(v);
}
Json::FastWriter writer;
string trace_str = writer.write(trace);
if (trace_str.length() > 200 * 1024 * 1024) {
absl::FPrintF(stderr,
"Trace file is over 200MB. Chrome might not be able to "
"display it. Consider to use filters (e.g. -min_micros "
"> 1000 or -op_type .*gpu:0.* to reduce the size.\n");
}
return trace_str;
}
void MemoryTracker::TrackNode(int64_t step, const GraphNode* node) {
if (!node->Trackable(step)) {
return;
}
Device& dev = devices_[node->node->canonical_device()];
std::map<int64_t, int64_t> allocs;
for (const auto& alloc : node->node->allocations(step)) {
allocs[alloc.alloc_micros()] += alloc.alloc_bytes();
dev.tracked_allocations[alloc.alloc_micros()] += alloc.alloc_bytes();
}
dev.tracked_allocations[0] += node->node->accelerator_persistent_bytes();
allocs[0] += node->node->accelerator_persistent_bytes();
int64_t last = 0;
std::map<int64_t, int64_t>& aggregate_allocs =
dev.tensor_allocs[node->name()];
for (auto it = allocs.begin(); it != allocs.end(); ++it) {
last += it->second;
aggregate_allocs[it->first] = last;
}
for (const auto& bytes_in_use : node->node->allocator_bytes_in_use(step)) {
if (bytes_in_use.first <= 0) continue;
dev.allocations[bytes_in_use.first] = bytes_in_use.second;
}
}
void Timeline::AllocateTimeNodes(GraphNode* gnode) {
if (gnode->Trackable(step_)) {
TrackNode(gnode);
const TFGraphNode* node = gnode->node;
for (const auto& kernel_execs : node->op_execs(step_)) {
const string& device = kernel_execs.first;
if (process_.find(device) == process_.end()) {
int64_t pid = AllocatePID();
process_[device] = std::make_unique<Process>(device, pid);
chrome_formatter_.EmitPID(GetTimeDevName(device), pid);
}
Process* p = process_[device].get();
for (const auto& exec : kernel_execs.second) {
int64_t start_micros = exec.first;
int64_t exec_micros = exec.second;
if (tnodes_[device].find(start_micros) == tnodes_[device].end()) {
tnodes_[device][start_micros] =
std::make_unique<TimeNode>(p, gnode, start_micros, exec_micros);
}
}
}
}
for (GraphNode* n : gnode->show_children) {
AllocateTimeNodes(n);
}
}
void Timeline::GenerateGraphTimeline(const std::vector<GraphNode*>& gnodes) {
for (GraphNode* gnode : gnodes) {
AllocateTimeNodes(gnode);
}
for (auto& process : tnodes_) {
if (!IsCanonicalDevice(process.first)) continue;
for (auto& tn : process.second) {
TimeNode* tnode = tn.second.get();
for (GraphNode* inp : tnode->node->children) {
if (!inp->account || !inp->Trackable(step_)) {
continue;
}
for (const auto& execs : inp->node->cpu_execs(step_)) {
if (!IsCanonicalDevice(execs.first)) continue;
if (process.first == execs.first) {
continue;
}
for (const auto& exec : execs.second) {
int64_t start_micros = exec.first;
auto cprocess = tnodes_.find(execs.first);
if (cprocess == tnodes_.end()) continue;
auto ctn = cprocess->second.find(start_micros);
if (ctn == cprocess->second.end()) continue;
ctn->second->next_tnodes.push_back(tnode);
}
}
}
}
}
AllocateLanes();
absl::FPrintF(stdout, "generating trace file.\n");
int64_t flow_id = 1;
for (const auto& process : alloc_nodes_) {
for (const auto& lane : process.second) {
for (const auto& node : lane.second) {
TimeNode* tnode = node.second;
Json::Value args(Json::objectValue);
args["name"] = Json::Value(tnode->name());
chrome_formatter_.EmitRegion(node.first, tnode->exec_micros,
process.first, lane.first, "Op",
tnode->name(), args);
for (TimeNode* next_tnode : node.second->next_tnodes) {
chrome_formatter_.EmitFlowStart(
tnode->name() + "_flow", tnode->start_micros + tnode->exec_micros,
process.first, lane.first, flow_id);
chrome_formatter_.EmitFlowEnd(
tnode->name() + "_flow", next_tnode->start_micros,
next_tnode->process->pid, next_tnode->tid, flow_id);
flow_id += 1;
}
}
}
}
for (const auto& dev : mem_tracker_.devices()) {
if (IsPlacedOnCPU(dev.first)) {
continue;
}
int64_t pid = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first), pid);
int64_t pid2 = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first) + " allocations",
pid2);
const MemoryTracker::Device& device = dev.second;
int64_t max_bytes_in_use = 0;
int64_t cur_bytes_in_use = 0;
int64_t last_point = 0;
for (const auto& alloc : device.allocations) {
cur_bytes_in_use = alloc.second;
max_bytes_in_use = std::max(max_bytes_in_use, cur_bytes_in_use);
int64_t ts = alloc.first;
if (ts - last_point < 100) continue;
last_point = ts;
std::map<int64_t, std::vector<string>> tensor_mem;
for (const auto& tensor_alloc_it : dev.second.tensor_allocs) {
const auto& tensor_alloc = tensor_alloc_it.second;
auto it = tensor_alloc.lower_bound(ts);
if (it != tensor_alloc.begin()) {
--it;
}
if (it->second > 0) {
tensor_mem[it->second].push_back(tensor_alloc_it.first);
}
}
chrome_formatter_.EmitCounter("Memory", "Memory Series", pid, ts,
dev.first, cur_bytes_in_use, tensor_mem);
}
if (IsPlacedOnAccelerator(dev.first)) {
absl::FPrintF(stdout, "%s peak memory: %.2f MB\n", dev.first,
max_bytes_in_use / 1000000.0);
}
}
OutputTimeline();
}
void Timeline::GenerateScopeTimeline(const ScopeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::GenerateCodeTimeline(const CodeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::OutputTimeline() {
std::string outfile = absl::StrFormat("%s_%d", outfile_, step());
Status s =
WriteStringToFile(Env::Default(), outfile, chrome_formatter_.Format());
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to write timeline file: %s\nError: %s\n",
outfile, s.ToString());
return;
}
absl::FPrintF(stdout,
"\n******************************************************\n");
absl::FPrintF(stdout,
"Timeline file is written to %s.\n"
"Open a Chrome browser, enter URL chrome:
"load the timeline file.",
outfile);
absl::FPrintF(stdout,
"\n******************************************************\n");
fflush(stdout);
}
void Timeline::AllocateLanes() {
for (auto& process : tnodes_) {
Process* p = process_[process.first].get();
for (auto& tnode : process.second) {
int64_t start_time = tnode.second->start_micros;
int64_t end_time = tnode.second->start_micros + tnode.second->exec_micros;
int64_t l = -1;
for (int64_t i = 0, end = p->lanes.size(); i < end; ++i) {
const auto& lane = p->lanes[i];
l = i;
for (auto cur_it = lane.rbegin(); cur_it != lane.rend(); ++cur_it) {
if (cur_it->second > start_time) {
l = -1;
break;
}
if (start_time > cur_it->second) {
break;
}
}
if (l >= 0) {
break;
}
}
if (l < 0) {
l = p->lanes.size();
std::map<int64_t, int64_t> nlane;
nlane[start_time] = end_time;
p->lanes.push_back(nlane);
} else {
p->lanes[l][start_time] = end_time;
}
tnode.second->tid = l;
alloc_nodes_[p->pid][l][start_time] = tnode.second.get();
}
}
}
int64_t Timeline::AllocatePID() {
int64_t cur_pid = next_pid_;
next_pid_ += 1;
return cur_pid;
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTimelineTest : public ::testing::Test {
protected:
TFProfTimelineTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
tf_stats_ = std::make_unique<TFStats>(
std::move(graph_pb), std::move(run_meta_pb), nullptr, nullptr);
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTimelineTest, GraphView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(10000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("graph", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(16556121177519539380ull, Hash64(dump_str));
}
TEST_F(TFProfTimelineTest, ScopeView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(17545174915963890413ull, Hash64(dump_str));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_timeline.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_timeline_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5cba145-5a94-41c9-ab9e-482062e3c0d9 | cpp | tensorflow/tensorflow | tfprof_show | tensorflow/core/profiler/internal/tfprof_show.cc | tensorflow/core/profiler/internal/tfprof_show_test.cc | #include "tensorflow/core/profiler/internal/tfprof_show.h"
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace tfprof {
const GraphNodeProto& TFShow::Show(const string& prefix, const Options& opts) {
if (opts.output_type == kOutput[0]) {
Timeline timeline(opts.step, opts.output_options.at(kTimelineOpts[0]));
return ShowInternal(opts, &timeline)->proto();
} else {
const ShowNode* ret = ShowInternal(opts, nullptr);
if (opts.output_type == kOutput[1]) {
absl::PrintF("%s", (prefix + ret->formatted_str));
fflush(stdout);
} else if (opts.output_type == kOutput[2]) {
Status s = WriteStringToFile(Env::Default(),
opts.output_options.at(kFileOpts[0]),
prefix + ret->formatted_str);
if (!s.ok()) {
absl::FPrintF(stderr, "%s\n", s.ToString());
}
} else if (opts.output_type == kOutput[3] ||
opts.output_type == kOutput[4]) {
} else {
absl::FPrintF(stderr, "Unknown output type: %s\n", opts.output_type);
}
return ret->proto();
}
}
bool TFShow::LookUpCheckPoint(const string& name,
std::unique_ptr<TFProfTensor>* tensor) {
if (name == kTFProfRoot || !ckpt_reader_ || !tensor) {
return false;
}
std::unique_ptr<Tensor> out_tensor;
TF_Status* status = TF_NewStatus();
ckpt_reader_->GetTensor(name, &out_tensor, status);
if (TF_GetCode(status) != TF_OK) {
absl::FPrintF(stderr, "%s\n", TF_Message(status));
TF_DeleteStatus(status);
return false;
}
*tensor = std::make_unique<TFProfTensor>(std::move(out_tensor));
TF_DeleteStatus(status);
return true;
}
bool TFShow::ShouldShow(const ShowNode* node, const Options& opts,
int depth) const {
if (node->name() == kTFProfRoot) return true;
if (node->proto().total_requested_bytes() < opts.min_bytes ||
node->proto().total_peak_bytes() < opts.min_peak_bytes ||
node->proto().total_residual_bytes() < opts.min_residual_bytes ||
node->proto().total_output_bytes() < opts.min_output_bytes ||
node->proto().total_exec_micros() < opts.min_micros ||
node->proto().total_accelerator_exec_micros() <
opts.min_accelerator_micros ||
node->proto().total_cpu_exec_micros() < opts.min_cpu_micros ||
node->proto().parameters() < opts.min_params ||
node->proto().float_ops() < opts.min_float_ops ||
node->proto().run_count() < opts.min_occurrence ||
depth > opts.max_depth || !ShouldShowIfExtra(node, opts, depth)) {
return false;
}
bool show = false;
if (opts.show_name_regexes.size() == 1 && opts.show_name_regexes[0] == ".*") {
show = true;
} else {
for (const string& regex : opts.show_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) {
show = true;
break;
}
}
}
if (!show) return false;
for (const string& regex : opts.hide_name_regexes) {
if (RE2::FullMatch(node->name(), regex)) return false;
}
return true;
}
bool TFShow::ShouldTrim(const ShowNode* node,
const std::vector<string>& regexes) const {
for (const string& regex : regexes) {
if (RE2::FullMatch(node->name(), regex)) {
return true;
}
}
return false;
}
bool TFShow::ReAccount(ShowNode* node, const Options& opts) {
node->ReInit(opts.step);
if (opts.account_type_regexes.size() == 1 &&
opts.account_type_regexes[0] == ".*") {
return true;
}
for (const string& regex : opts.account_type_regexes) {
for (const string& type : node->node->op_types()) {
if (RE2::FullMatch(type, regex)) {
return true;
}
}
}
return false;
}
string TFShow::FormatNodeMemory(ShowNode* node, int64_t bytes,
int64_t total_bytes) const {
string memory = FormatMemory(total_bytes);
if (node->account) {
memory = FormatMemory(bytes) + "/" + memory;
} else {
memory = "--/" + memory;
}
return memory;
}
string TFShow::FormatNode(ShowNode* node, const Options& opts) const {
std::vector<string> info;
if (opts.select.find(kShown[2]) != opts.select.end()) {
const string shape = FormatShapes(node->node->shape());
if (!shape.empty()) {
info.push_back(shape);
}
string params = FormatNumber(node->proto().total_parameters()) + " params";
if (node->account) {
params = FormatNumber(node->proto().parameters()) + "/" + params;
} else {
params = "--/" + params;
}
info.push_back(params);
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
string fops = FormatNumber(node->proto().total_float_ops()) + " flops";
if (node->account) {
fops = FormatNumber(node->proto().float_ops()) + "/" + fops;
} else {
fops = "--/" + fops;
}
info.push_back(fops);
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().requested_bytes(),
node->proto().total_requested_bytes()));
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().peak_bytes(),
node->proto().total_peak_bytes()));
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().residual_bytes(),
node->proto().total_residual_bytes()));
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
info.push_back(FormatNodeMemory(node, node->proto().output_bytes(),
node->proto().total_output_bytes()));
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
info.push_back(FormatTotalExecTime(node, opts));
info.push_back(FormatAcceleratorExecTime(node, opts));
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatAcceleratorExecTime(node, opts));
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
info.push_back(FormatCPUExecTime(node, opts));
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
if (node->proto().devices_size() > 0) {
info.push_back(absl::StrJoin(node->proto().devices(), "|"));
}
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
const std::set<string>& op_types = node->node->op_types();
info.push_back(absl::StrJoin(op_types, "|"));
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
string run = FormatNumber(node->proto().total_run_count());
if (node->account) {
run = FormatNumber(node->proto().run_count()) + "/" + run;
} else {
run = "--/" + run;
}
string definition = FormatNumber(node->proto().total_definition_count());
if (node->account) {
definition = "1/" + definition;
} else {
definition = "--/" + definition;
}
info.push_back(run + "|" + definition);
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
std::vector<string> shape_vec;
for (const auto& s : node->node->input_shapes()) {
if (s.second.empty()) {
shape_vec.push_back(absl::StrFormat("%d:unknown", s.first));
} else {
shape_vec.push_back(
absl::StrFormat("%d:%s", s.first, absl::StrJoin(s.second, "x")));
}
}
info.push_back(absl::StrJoin(shape_vec, "|"));
}
return absl::StrFormat("%s (%s)", node->name(), absl::StrJoin(info, ", "));
}
string TFShow::FormatLegend(const Options& opts) const {
std::vector<string> legends;
if (opts.select.find(kShown[2]) != opts.select.end()) {
legends.push_back("# parameters");
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
legends.push_back("# float_ops");
}
if (opts.select.find(kShown[0]) != opts.select.end()) {
legends.push_back("requested bytes");
}
if (opts.select.find(kShown[11]) != opts.select.end()) {
legends.push_back("peak bytes");
}
if (opts.select.find(kShown[12]) != opts.select.end()) {
legends.push_back("residual bytes");
}
if (opts.select.find(kShown[13]) != opts.select.end()) {
legends.push_back("output bytes");
}
if (opts.select.find(kShown[1]) != opts.select.end()) {
legends.push_back("total execution time");
legends.push_back("accelerator execution time");
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[9]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("accelerator execution time");
}
if (opts.select.find(kShown[10]) != opts.select.end() &&
opts.select.find(kShown[1]) == opts.select.end()) {
legends.push_back("cpu execution time");
}
if (opts.select.find(kShown[5]) != opts.select.end()) {
legends.push_back("assigned devices");
}
if (opts.select.find(kShown[6]) != opts.select.end()) {
legends.push_back("op types");
}
if (opts.select.find(kShown[7]) != opts.select.end()) {
legends.push_back("op count (run|defined)");
}
if (opts.select.find(kShown[8]) != opts.select.end()) {
legends.push_back("input shapes");
}
return absl::StrFormat("node name | %s\n", absl::StrJoin(legends, " | "));
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
string CheckAndRemoveDoc(const string& doc) {
auto pos = doc.find("Profile:");
CHECK(pos != doc.npos);
return doc.substr(pos + 9);
}
class TFProfShowTest : public ::testing::Test {
protected:
TFProfShowTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
std::unique_ptr<OpLogProto> op_log_pb(new OpLogProto());
string op_log_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/tfprof_log");
TF_CHECK_OK(ReadBinaryProto(Env::Default(), op_log_path, op_log_pb.get()));
string ckpt_path = io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/ckpt");
TF_Status* status = TF_NewStatus();
std::unique_ptr<checkpoint::CheckpointReader> ckpt_reader(
new checkpoint::CheckpointReader(ckpt_path, status));
CHECK(TF_GetCode(status) == TF_OK);
TF_DeleteStatus(status);
tf_stats_ =
std::make_unique<TFStats>(std::move(graph_pb), std::move(run_meta_pb),
std::move(op_log_pb), std::move(ckpt_reader));
tf_stats_->BuildAllViews();
}
string TestToFromProto(const string& cmd, const Options& opts,
bool show_multi_node = false) {
string profile_file = io::JoinPath(testing::TmpDir(), "profile");
tf_stats_->WriteProfile(profile_file);
TFStats new_stats(profile_file, nullptr);
new_stats.BuildAllViews();
if (show_multi_node) {
new_stats.ShowMultiGraphNode(cmd, opts);
} else {
new_stats.ShowGraphNode(cmd, opts);
}
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(),
opts.output_options.at("outfile"), &dump_str));
return dump_str;
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfShowTest, DumpScopeMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "name",
{"VariableV2"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "peak_bytes", "residual_bytes", "output_bytes",
"micros", "accelerator_micros", "cpu_micros", "float_ops"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | # parameters | # float_ops | requested bytes | peak bytes | "
"residual bytes | output bytes | total execution time | accelerator "
"execution time | cpu execution time\n_TFProfRoot (--/451 params, --/0 "
"flops, --/2.56KB, --/2.56KB, --/2.56KB, --/2.56KB, --/13us, --/0us, "
"--/13us)\n DW (3x3x3x6, 162/162 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 2us/2us, 0us/0us, "
"2us/2us)\n DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.28KB/1.28KB, "
"1.28KB/1.28KB, 1.28KB/1.28KB, 1.28KB/1.28KB, 11us/11us, 0us/0us, "
"11us/11us)\n ScalarW (1, 1/1 params, 0/0 flops, 0B/0B, 0B/0B, 0B/0B, "
"0B/0B, 0us/0us, 0us/0us, 0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpAcceleratorAndCPUMicros) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, "cpu_micros",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"accelerator_micros", "cpu_micros"}, "file",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"node name | accelerator execution time | cpu execution "
"time\n_TFProfRoot (--/404us, --/4.54ms)\n Conv2D (226us/226us, "
"4.07ms/4.07ms)\n Conv2D_1 (178us/178us, 419us/419us)\n "
"_retval_Conv2D_1_0_0 (0us/0us, 41us/41us)\n DW2 (0us/0us, 11us/11us)\n "
" DW2/Assign (0us/0us, 0us/0us)\n DW2/Initializer (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal (0us/0us, 0us/0us)\n "
" DW2/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n DW2/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n DW2/read (0us/0us, 0us/0us)\n DW (0us/0us, 2us/2us)\n "
"DW/Assign (0us/0us, 0us/0us)\n DW/Initializer (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/RandomStandardNormal (0us/0us, 0us/0us)\n "
" DW/Initializer/random_normal/mean (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/mul (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/shape (0us/0us, 0us/0us)\n "
"DW/Initializer/random_normal/stddev (0us/0us, 0us/0us)\n DW/read "
"(0us/0us, 0us/0us)\n zeros (0us/0us, 2us/2us)\n ScalarW (0us/0us, "
"0us/0us)\n ScalarW/Assign (0us/0us, 0us/0us)\n "
"ScalarW/Initializer (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal (0us/0us, 0us/0us)\n "
"ScalarW/Initializer/random_normal/RandomStandardNormal (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mean (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/mul (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/shape (0us/0us, "
"0us/0us)\n ScalarW/Initializer/random_normal/stddev (0us/0us, "
"0us/0us)\n ScalarW/read (0us/0us, 0us/0us)\n init (0us/0us, "
"0us/0us)\n",
CheckAndRemoveDoc(dump_str));
EXPECT_EQ(dump_str, TestToFromProto("scope", opts));
}
TEST_F(TFProfShowTest, DumpOpMode) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, "params",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops", "occurrence", "input_shapes"},
"file", {{"outfile", dump_file}});
tf_stats_->ShowMultiGraphNode("op", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file, &dump_str));
EXPECT_EQ(
"nodename|requestedbytes|totalexecutiontime|acceleratorexecutiontime|"
"cpuexecutiontime|#parameters|#float_ops|opoccurrence(run|defined)|"
"inputshapes\nVariableV22.56KB(100.00%,8.40%),13us(100.00%,0.26%),0us("
"100.00%,0.00%),13us(100.00%,0.29%),451params(100.00%,100.00%),0float_"
"ops(100.00%,0.00%),2|3\n\ninput_type:\t(run*2|defined*3)\texec_time:"
"13us\n\nAdd0B(0.00%,0.00%),0us(99.74%,0.00%),0us(100.00%,0.00%),0us(99."
"71%,0.00%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),0|3\n\ninput_"
"type:0:1,\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12,"
"\t1:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6,\t1:1\t("
"run*0|defined*1)\texec_time:0us\n\nAssign0B(0.00%,0.00%),0us(99.74%,0."
"00%),0us(100.00%,0.00%),0us(99.71%,0.00%),0params(0.00%,0.00%),0float_"
"ops(100.00%,0.00%),0|3\n\ninput_type:0:1,\t1:1\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:2x2x6x12,\t1:2x2x6x12\t(run*0|defined*1)\texec_"
"time:0us\ninput_type:0:3x3x3x6,\t1:3x3x3x6\t(run*0|defined*1)\texec_"
"time:0us\n\nConst0B(0.00%,0.00%),2us(99.74%,0.04%),0us(100.00%,0.00%),"
"2us(99.71%,0.04%),0params(0.00%,0.00%),0float_ops(100.00%,0.00%),1|"
"10\n\ninput_type:\t(run*1|defined*10)\texec_time:2us\n\nConv2D27.90KB("
"91.60%,91.60%),4.89ms(99.70%,98.87%),404us(100.00%,100.00%),4.49ms(99."
"67%,98.77%),0params(0.00%,0.00%),10.44kfloat_ops(100.00%,100.00%),2|"
"2\n\ninput_type:0:2x3x3x6,\t1:2x2x6x12\t(run*1|defined*1)\texec_time:"
"597us\ninput_type:0:2x6x6x3,\t1:3x3x3x6\t(run*1|defined*1)\texec_time:4."
"29ms\n\nIdentity0B(0.00%,0.00%),0us(0.83%,0.00%),0us(0.00%,0.00%),0us(0."
"90%,0.00%),0params(0.00%,0.00%),0float_ops(0.00%,0.00%),0|3\n\ninput_"
"type:0:1\t(run*0|defined*1)\texec_time:0us\ninput_type:0:2x2x6x12\t(run*"
"0|defined*1)\texec_time:0us\ninput_type:0:3x3x3x6\t(run*0|defined*1)"
"\texec_time:0us\n\n",
StringReplace(CheckAndRemoveDoc(dump_str), " ", ""));
EXPECT_EQ(dump_str, TestToFromProto("op", opts, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_show.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_show_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67a22cb6-2ba5-40ca-af8d-032bd07077a4 | cpp | tensorflow/tensorflow | hardware_type_utils | tensorflow/core/profiler/utils/hardware_type_utils.cc | tensorflow/core/profiler/utils/hardware_type_utils_test.cc | #include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include <algorithm>
#include "absl/container/btree_map.h"
#include "absl/strings/match.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_9_0 = {
.cuda_core =
{
.fp64_tflops = 128,
.fp32_tflops = 256,
.bf16_tflops = 512,
.fp16_tflops = 512,
.int8_tops = 1024,
},
.tensor_core =
{
.fp64_tflops = 256,
.fp32_tflops = 2048,
.bf16_tflops = 4096,
.fp16_tflops = 4096,
.fp8_tflops = 8192,
.int8_tops = 8192,
},
.has_tensor_core_sparsity_support = true,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_9 = {
.cuda_core =
{
.fp64_tflops = 128,
.fp32_tflops = 256,
.bf16_tflops = 256,
.fp16_tflops = 256,
.int8_tops = 512,
},
.tensor_core =
{
.fp32_tflops = 512,
.bf16_tflops = 1024,
.fp16_tflops = 1024,
.fp8_tflops = 2048,
.int8_tops = 2048,
.int4_tops = 4096,
},
.has_tensor_core_sparsity_support = true,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_6 = {
.cuda_core =
{
.fp64_tflops = 128,
.fp32_tflops = 256,
.bf16_tflops = 256,
.fp16_tflops = 256,
.int8_tops = 512,
},
.tensor_core =
{
.fp32_tflops = 256,
.bf16_tflops = 512,
.fp16_tflops = 1024,
.int8_tops = 2048,
.int4_tops = 4096,
},
.has_tensor_core_sparsity_support = true,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_0 = {
.cuda_core =
{
.fp64_tflops = 64,
.fp32_tflops = 128,
.bf16_tflops = 256,
.fp16_tflops = 512,
.int8_tops = 512,
},
.tensor_core =
{
.fp64_tflops = 128,
.fp32_tflops = 1024,
.bf16_tflops = 2048,
.fp16_tflops = 2048,
.int8_tops = 4096,
},
.has_tensor_core_sparsity_support = true,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_7_5 = {
.cuda_core =
{
.fp64_tflops = 64,
.fp32_tflops = 128,
.fp16_tflops = 256,
.int8_tops = 512,
},
.tensor_core =
{
.fp16_tflops = 1024,
.int8_tops = 2048,
.int4_tops = 4096,
},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_7_0 = {
.cuda_core =
{
.fp64_tflops = 64,
.fp32_tflops = 128,
.bf16_tflops = 0.0,
.fp16_tflops = 256,
.int8_tops = 512,
},
.tensor_core =
{
.fp16_tflops = 1024,
},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_6_1 = {
.cuda_core =
{
.fp64_tflops = 8,
.fp32_tflops = 256,
.fp16_tflops = 4,
.int8_tops = 1024,
},
.tensor_core = {},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_6_0 = {
.cuda_core =
{
.fp64_tflops = 64,
.fp32_tflops = 128,
.fp16_tflops = 256,
.int8_tops = 512,
},
.tensor_core = {},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_5_0 = {
.cuda_core =
{
.fp64_tflops = 4,
.fp32_tflops = 256,
},
.tensor_core = {},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_3_0 = {
.cuda_core =
{
.fp64_tflops = 128,
.fp32_tflops = 384,
},
.tensor_core = {},
.has_tensor_core_sparsity_support = false,
};
const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_2_0 = {
.cuda_core =
{
.fp64_tflops = 8,
.fp32_tflops = 64,
},
.tensor_core = {},
.has_tensor_core_sparsity_support = false,
};
GpuFlopCapabilities GetNvidiaFlopCapsPerSMPerCycle(int major_comp_cap,
int minor_comp_cap) {
static const auto& kPerSMFlopCapsTable =
*new absl::btree_map<int, GpuFlopCapabilities const*>{
{9000, &kComputeCap_PerSM_PerCycle_9_0},
{8090, &kComputeCap_PerSM_PerCycle_8_9},
{8060, &kComputeCap_PerSM_PerCycle_8_6},
{8000, &kComputeCap_PerSM_PerCycle_8_0},
{7050, &kComputeCap_PerSM_PerCycle_7_5},
{7000, &kComputeCap_PerSM_PerCycle_7_0},
{6010, &kComputeCap_PerSM_PerCycle_6_1},
{6000, &kComputeCap_PerSM_PerCycle_6_0},
{5000, &kComputeCap_PerSM_PerCycle_5_0},
{3000, &kComputeCap_PerSM_PerCycle_3_0},
{2000, &kComputeCap_PerSM_PerCycle_2_0},
};
const int normalized_compute_cap =
major_comp_cap * 1000 + minor_comp_cap * 10;
GpuFlopCapabilities flops_cap{};
auto it = kPerSMFlopCapsTable.lower_bound(normalized_compute_cap);
if (it == kPerSMFlopCapsTable.end()) {
LOG(WARNING) << "GPU compute capability " << major_comp_cap << "."
<< minor_comp_cap << " is too old to support.";
} else {
flops_cap = *it->second;
if (it->first != normalized_compute_cap) {
LOG(WARNING) << "GPU compute capability " << major_comp_cap << "."
<< minor_comp_cap
<< " is not found. Use the highest compute cap known "
<< (it->first / 1000) << "." << ((it->first % 1000) / 10)
<< " instead.";
}
}
return flops_cap;
}
GpuFlopCapabilities GetGpuFlopCapabilitiesPerSM(
const DeviceCapabilities& device_cap) {
GpuFlopCapabilities flops_cap{};
if (device_cap.device_vendor() == kDeviceVendorNvidia) {
flops_cap =
GetNvidiaFlopCapsPerSMPerCycle(device_cap.compute_capability().major(),
device_cap.compute_capability().minor());
} else {
LOG(WARNING) << "Unsupported device vendor " << device_cap.device_vendor();
}
flops_cap.ScaleWith(device_cap.clock_rate_in_ghz());
return flops_cap;
}
}
double GetFlopMaxThroughputPerSM(const DeviceCapabilities& device_cap) {
GpuFlopCapabilities sm_flops = GetGpuFlopCapabilitiesPerSM(device_cap);
double result = std::max(
{sm_flops.cuda_core.fp32_tflops, sm_flops.cuda_core.fp16_tflops,
sm_flops.tensor_core.fp32_tflops, sm_flops.tensor_core.fp16_tflops});
VLOG(3) << "GetFlopMaxThroughputPerSM get result: " << result << " GFLOPs";
return result;
}
double GetSharedMemoryBandwidthPerSM(const DeviceCapabilities& device_cap) {
double transaction_byts_per_cycle =
device_cap.compute_capability().major() <= 2 ? (32 * 4 / 2) : (32 * 8);
double GiBPS = transaction_byts_per_cycle * device_cap.clock_rate_in_ghz();
return tsl::profiler::GigaToUni(GiBPS);
}
absl::string_view GpuModelName(const DeviceCapabilities& device_cap) {
if (device_cap.device_vendor() == kDeviceVendorNvidia) {
switch (device_cap.compute_capability().major()) {
case 2:
return "Nvidia GPU (Fermi)";
case 3:
return "Nvidia GPU (Kepler)";
case 5:
return "Nvidia GPU (Maxwell)";
case 6:
return "Nvidia GPU (Pascal)";
case 7:
if (device_cap.compute_capability().minor() < 5) {
return "Nvidia GPU (Volta)";
} else {
return "Nvidia GPU (Turing)";
}
case 8:
if (device_cap.compute_capability().minor() < 9) {
return "Nvidia GPU (Ampere)";
} else {
return "Nvidia GPU (Ada Lovelace)";
}
case 9:
return "Nvidia GPU (Hopper)";
case 10:
return "Nvidia GPU (Blackwell)";
default:
return "Nvidia GPU";
}
} else if (device_cap.device_vendor() == kDeviceVendorAMD) {
switch (device_cap.compute_capability().major()) {
case 9:
return "AMD GPU - gfx-9XX series";
case 10:
return "AMD GPU - gfx-10XX series";
case 11:
return "AMD GPU - gfx-11XX series";
default:
return "AMD GPU";
}
} else {
LOG(ERROR) << "Unknown device vendor " << device_cap.device_vendor();
return "";
}
}
HardwareType ParseHardwareType(absl::string_view device_type) {
if (absl::StrContains(device_type, "GPU")) return HardwareType::GPU;
if (device_type == "CPU") return HardwareType::CPU_ONLY;
if (absl::StrContains(device_type, "TPU")) return HardwareType::TPU;
return HardwareType::UNKNOWN_HARDWARE;
}
bool HasDevice(HardwareType x) { return x > tensorflow::profiler::CPU_ONLY; }
}
} | #include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(HardwareTypeUtilsTest, H100PeakComputTFlops) {
DeviceCapabilities device_cap;
device_cap.set_clock_rate_in_ghz(1.620);
device_cap.set_num_cores(114);
device_cap.set_memory_size_in_bytes(
tsl::profiler::GibiToGiga(tsl::profiler::GigaToUni(80)));
device_cap.set_memory_bandwidth(tsl::profiler::GigaToUni(2.04 * 1024));
device_cap.set_device_vendor("Nvidia");
device_cap.mutable_compute_capability()->set_major(9);
device_cap.mutable_compute_capability()->set_minor(0);
double peak_tflops =
GetFlopMaxThroughputPerSM(device_cap) * device_cap.num_cores() / 1000.0;
EXPECT_NEAR(peak_tflops, 756, 1.0);
}
TEST(HardwareTypeUtilsTest, A100PeakComputTFlops) {
DeviceCapabilities device_cap;
device_cap.set_clock_rate_in_ghz(1.410);
device_cap.set_num_cores(108);
device_cap.set_memory_size_in_bytes(
tsl::profiler::GibiToGiga(tsl::profiler::GigaToUni(80)));
device_cap.set_memory_bandwidth(tsl::profiler::GigaToUni(2.04 * 1024));
device_cap.set_device_vendor("Nvidia");
device_cap.mutable_compute_capability()->set_major(8);
device_cap.mutable_compute_capability()->set_minor(0);
double peak_tflops =
GetFlopMaxThroughputPerSM(device_cap) * device_cap.num_cores() / 1000.0;
EXPECT_NEAR(peak_tflops, 312, 1.0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/hardware_type_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/hardware_type_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6dd790b3-9fd6-42c8-9057-21d992daec43 | cpp | tensorflow/tensorflow | derived_timeline | tensorflow/core/profiler/utils/derived_timeline.cc | tensorflow/core/profiler/utils/derived_timeline_test.cc | #include "tensorflow/core/profiler/utils/derived_timeline.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/trace_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/util/stats_calculator.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/gpu_event_stats.h"
#include "tensorflow/core/profiler/utils/hlo_module_map.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/host_offload_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindMutableTensorCorePlanes;
inline std::string HloModuleEventName(const GpuEventStats& stats) {
return stats.program_id ? tsl::profiler::HloModuleNameWithProgramId(
stats.hlo_module_name, *stats.program_id)
: std::string(stats.hlo_module_name);
}
inline std::string HloOpEventPrefix(const GpuEventStats& stats) {
return stats.program_id ? absl::StrCat(*stats.program_id, "/")
: absl::StrCat(stats.hlo_module_name, "/");
}
std::vector<XEventMetadata*> GetOrCreateHloOpEventsMetadata(
XPlaneBuilder& xplane, const GpuEventStats& stats, const Symbol symbol) {
DCHECK(stats.IsXlaOp());
std::vector<XEventMetadata*> hlo_op_events_metadata;
hlo_op_events_metadata.reserve(stats.hlo_op_names.size());
std::string hlo_op_event_prefix = HloOpEventPrefix(stats);
for (absl::string_view hlo_op_name : stats.hlo_op_names) {
XEventMetadata* hlo_op_event_metadata = xplane.GetOrCreateEventMetadata(
absl::StrCat(hlo_op_event_prefix, hlo_op_name));
if (hlo_op_event_metadata->display_name().empty()) {
hlo_op_event_metadata->set_display_name(std::string(hlo_op_name));
}
hlo_op_events_metadata.push_back(hlo_op_event_metadata);
if (!symbol.hlo_text.empty()) {
XStatsBuilder<XEventMetadata> event_stats(hlo_op_event_metadata, &xplane);
event_stats.SetOrAddStatValue(*xplane.GetOrCreateStatMetadata("hlo_text"),
symbol.hlo_text);
}
}
return hlo_op_events_metadata;
}
}
void ProcessTfOpEvent(absl::string_view tf_op_full_name,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id,
XPlaneBuilder& plane_builder,
DerivedXLineBuilder& tf_name_scope_line_builder,
DerivedXLineBuilder& tf_op_line_builder) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(tf_op_full_name);
tsl::profiler::Category category = tf_op.category;
if (category == tsl::profiler::Category::kTensorFlow ||
category == tsl::profiler::Category::kJax) {
tf_name_scope_line_builder.ExpandOrAddEvents(
plane_builder.GetOrCreateEventsMetadata(
tsl::profiler::ParseTfNameScopes(tf_op)),
event_span, group_id);
}
XEventMetadata* tf_op_event_metadata =
plane_builder.GetOrCreateEventMetadata(tf_op_full_name);
if (tf_op_event_metadata->display_name().empty()) {
tf_op_event_metadata->set_display_name(tsl::profiler::TfOpEventName(tf_op));
}
tf_op_line_builder.ExpandOrAddEvent(*tf_op_event_metadata, event_span,
group_id);
}
DerivedXEventBuilder::DerivedXEventBuilder(XEventBuilder event,
std::optional<int64_t> group_id)
: event_(std::move(event)), group_id_(group_id) {}
bool DerivedXEventBuilder::ShouldExpand(const XEventMetadata& event_metadata,
std::optional<int64_t> group_id) const {
return event_.MetadataId() == event_metadata.id() && group_id_ == group_id;
}
void DerivedXEventBuilder::Expand(tsl::profiler::Timespan event_span) {
tsl::profiler::Timespan timespan = event_.GetTimespan();
DCHECK_LE(timespan.begin_ps(), event_span.begin_ps());
timespan.ExpandToInclude(event_span);
event_.SetTimespan(timespan);
}
DerivedXLineBuilder::DerivedXLineBuilder(
XPlaneBuilder* plane, int64_t line_id, absl::string_view name,
int64_t timestamp_ns, std::vector<DerivedXLineBuilder*> dependent_lines)
: group_id_stat_metadata_(
plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId))),
correlation_id_metadata_(plane->GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kCorrelationId))),
cuda_graph_id_metadata_(plane->GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kCudaGraphId))),
line_(plane->GetOrCreateLine(line_id)),
dependent_lines_(std::move(dependent_lines)) {
line_.SetName(name);
line_.SetTimestampNs(timestamp_ns);
}
void DerivedXLineBuilder::ExpandOrAddEvent(const XEventMetadata& event_metadata,
tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id) {
ExpandOrAddLevelEvent(event_metadata, event_span, group_id,
0);
}
void DerivedXLineBuilder::ExpandOrAddEvents(
const std::vector<XEventMetadata*>& events_metadata_per_level,
tsl::profiler::Timespan event_span, std::optional<int64_t> group_id) {
if (events_metadata_per_level.empty()) return;
size_t current_nested_level = events_metadata_per_level.size();
for (size_t level = 0; level < current_nested_level; ++level) {
ExpandOrAddLevelEvent(*events_metadata_per_level[level], event_span,
group_id, level);
}
ResetLastEvents(current_nested_level);
}
void DerivedXLineBuilder::ExpandOrAddLevelEvent(
const XEventMetadata& event_metadata, tsl::profiler::Timespan event_span,
std::optional<int64_t> group_id, int level) {
auto& last_event = last_event_by_level_[level];
if (last_event && last_event->ShouldExpand(event_metadata, group_id)) {
last_event->Expand(event_span);
} else {
ResetLastEvents(level);
XEventBuilder event = line_.AddEvent(event_metadata);
event.SetTimespan(event_span);
if (group_id.has_value()) {
event.AddStatValue(*group_id_stat_metadata_, *group_id);
}
last_event.emplace(std::move(event), group_id);
}
}
void DerivedXLineBuilder::AddStatToLevelEvent(int level,
const XStatMetadata& metadata,
int64_t value) {
if (auto it = last_event_by_level_.find(level);
it != last_event_by_level_.end() && it->second.has_value()) {
it->second->SetOrAddStatValue(metadata, value);
}
}
void DerivedXLineBuilder::AddStatToLevelEvent(int level,
const XStatMetadata& metadata,
uint64_t value) {
if (auto it = last_event_by_level_.find(level);
it != last_event_by_level_.end() && it->second.has_value()) {
it->second->SetOrAddStatValue(metadata, value);
}
}
void DerivedXLineBuilder::AdjustDurationForTraceViewer(int level) {
if (level >= last_event_by_level_.size() || !last_event_by_level_[level])
return;
int max_level = level;
for (; max_level < last_event_by_level_.size(); ++max_level) {
if (!last_event_by_level_[max_level].has_value()) {
break;
}
}
--max_level;
if (max_level <= level) return;
auto& event_on_top_stack = *last_event_by_level_[max_level];
tsl::profiler::Timespan timespan = event_on_top_stack.GetTimespan();
int64_t max_shrink_ns = timespan.duration_ps() / 1000 - 1;
int64_t shrink_ns = 0;
std::optional<tsl::profiler::Timespan> last_level_timespan;
for (int i = level; i <= max_level; ++i) {
auto& current_event = *last_event_by_level_[i];
if (shrink_ns < max_shrink_ns &&
last_level_timespan == current_event.GetTimespan()) {
shrink_ns++;
}
last_level_timespan = current_event.GetTimespan();
if (shrink_ns) {
current_event.SetTimespan(tsl::profiler::Timespan::FromEndPoints(
last_level_timespan->begin_ps(),
last_level_timespan->end_ps() - 1000 * shrink_ns));
}
}
}
void DerivedXLineBuilder::ResetLastEvents(int level) {
AdjustDurationForTraceViewer(level);
for (int i = level, end = last_event_by_level_.size(); i < end; ++i) {
last_event_by_level_[i].reset();
}
if (level == 0) {
for (DerivedXLineBuilder* line : dependent_lines_) {
line->ResetLastEvents(0);
}
}
}
void DeriveStepEventsFromGroups(
const tsl::profiler::GroupMetadataMap& group_metadata_map,
XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
const XStatMetadata* group_id_stat_metadata =
plane_visitor.GetStatMetadataByType(StatType::kGroupId);
if (group_id_stat_metadata == nullptr) return;
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder steps(&plane_builder, kThreadIdStepInfo, kStepLineName,
start_timestamp_ns, {});
for (const XEventVisitor& event_visitor :
GetSortedEvents<XEventVisitor>(plane_visitor)) {
std::optional<XStatVisitor> group_id_stat =
event_visitor.GetStat(StatType::kGroupId, *group_id_stat_metadata);
if (group_id_stat.has_value()) {
int64_t group_id = group_id_stat->IntValue();
steps.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(absl::StrCat(group_id)),
event_visitor.GetTimespan(), group_id);
}
}
AddGroupMetadataToStepEvents(group_metadata_map, steps.Line());
}
void DeriveEventsFromAnnotations(const SymbolResolver& symbol_resolver,
XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder tf_ops(&plane_builder, kThreadIdTfOp,
kTensorFlowOpLineName, start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(&plane_builder, kThreadIdTfNameScope,
kTensorFlowNameScopeLineName,
start_timestamp_ns, {&tf_ops});
DerivedXLineBuilder hlo_ops(&plane_builder, kThreadIdHloOp, kXlaOpLineName,
start_timestamp_ns, {});
DerivedXLineBuilder hlo_modules(&plane_builder, kThreadIdHloModule,
kXlaModuleLineName, start_timestamp_ns,
{&tf_name_scope, &hlo_ops});
DerivedXLineBuilder source(&plane_builder, kThreadIdSource, kSourceLineName,
start_timestamp_ns, {});
for (const XEventVisitor& event :
GetSortedEvents<XEventVisitor>(plane_visitor)) {
GpuEventStats stats(&event);
if (!stats.IsKernel() && !stats.IsCudaGraphExecution()) continue;
tsl::profiler::Timespan event_span = event.GetTimespan();
if (!stats.hlo_module_name.empty()) {
hlo_modules.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(HloModuleEventName(stats)),
event_span, stats.group_id);
}
if (stats.IsXlaOp()) {
auto symbol = symbol_resolver(stats.program_id, stats.hlo_module_name,
stats.hlo_op_names.back());
auto hlo_events_metadata =
GetOrCreateHloOpEventsMetadata(plane_builder, stats, symbol);
hlo_ops.ExpandOrAddEvents(hlo_events_metadata, event_span,
stats.group_id);
if (stats.cuda_graph_id_for_inner_node.has_value() &&
*stats.cuda_graph_id_for_inner_node != 0) {
int level = static_cast<int>(hlo_events_metadata.size()) - 1;
if (level >= 0) {
hlo_ops.AddStatToLevelEvent(level, *hlo_ops.GetCudaGraphIdMetadata(),
*stats.cuda_graph_id_for_inner_node);
if (stats.correlation_id.has_value()) {
hlo_ops.AddStatToLevelEvent(level,
*hlo_ops.GetCorrelationIdMetadata(),
*stats.correlation_id);
}
}
}
if (!symbol.tf_op_name.empty()) {
ProcessTfOpEvent(symbol.tf_op_name,
event_span, stats.group_id, plane_builder,
tf_name_scope, tf_ops);
}
if (!symbol.source_info.empty()) {
source.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(symbol.source_info),
event_span, stats.group_id);
}
} else if (stats.IsTfOp()) {
ProcessTfOpEvent(stats.tf_op_fullname,
event_span, stats.group_id, plane_builder, tf_name_scope,
tf_ops);
}
}
RemoveEmptyLines(device_trace);
}
void DeriveEventsFromHostTrace(
const XPlane* host_trace,
const tsl::profiler::GroupMetadataMap& group_metadata_map,
std::vector<XPlane*> device_traces) {
struct GroupLaunchInfo {
tsl::profiler::Timespan timespan;
tsl::Stat<uint64_t> stat;
void AddEventTimespan(tsl::profiler::Timespan event_span) {
if (stat.count() == 0) {
timespan = event_span;
} else {
timespan.ExpandToInclude(event_span);
}
stat.UpdateStat(event_span.duration_ps());
}
};
using DeviceLaunchInfo =
absl::flat_hash_map<int64_t , GroupLaunchInfo>;
const int num_devices = device_traces.size();
std::vector<DeviceLaunchInfo> per_device_launch_info(num_devices);
XPlaneVisitor host_plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
host_plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) return;
line.ForEachEvent([&](const XEventVisitor& event) {
if (absl::StartsWith(event.Name(), "cu")) return;
LaunchEventStats stats(&event);
if (stats.group_id.has_value() && stats.IsLaunch() &&
0 <= *stats.device_id && *stats.device_id < num_devices) {
GroupLaunchInfo& group_launch_info =
per_device_launch_info[*stats.device_id][*stats.group_id];
group_launch_info.AddEventTimespan(event.GetTimespan());
}
});
});
int64_t host_plane_start = GetStartTimestampNs(*host_trace);
for (int i = 0; i < num_devices; ++i) {
if (per_device_launch_info[i].empty()) continue;
int64_t device_plane_start = GetStartTimestampNs(*device_traces[i]);
XPlaneBuilder device_plane(device_traces[i]);
const XStatMetadata& group_id_stat_metadata =
*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
const XStatMetadata& num_launches_stat_metadata =
*device_plane.GetOrCreateStatMetadata("num_launches");
const XStatMetadata& max_launch_time_us_stat_metadata =
*device_plane.GetOrCreateStatMetadata("max_launch_time_us");
const XStatMetadata& avg_launch_time_us_stat_metadata =
*device_plane.GetOrCreateStatMetadata("avg_launch_time_us");
XLineBuilder launch_line =
device_plane.GetOrCreateLine(kThreadIdKernelLaunch);
launch_line.SetName(kKernelLaunchLineName);
launch_line.SetTimestampNs(std::min(device_plane_start, host_plane_start));
for (const auto& kv : per_device_launch_info[i]) {
int64_t group_id = kv.first;
const GroupLaunchInfo& group_info = kv.second;
if (const tsl::profiler::GroupMetadata* group_metadata =
gtl::FindOrNull(group_metadata_map, group_id)) {
XEventBuilder device_event =
launch_line.AddEvent(*device_plane.GetOrCreateEventMetadata(
absl::StrCat("Launch Stats for ", group_metadata->name)));
device_event.SetTimespan(group_info.timespan);
device_event.AddStatValue(group_id_stat_metadata, group_id);
device_event.AddStatValue(num_launches_stat_metadata,
group_info.stat.count());
device_event.AddStatValue(
max_launch_time_us_stat_metadata,
tsl::profiler::PicoToMicro(group_info.stat.max()));
device_event.AddStatValue(
avg_launch_time_us_stat_metadata,
tsl::profiler::PicoToMicro(group_info.stat.avg()));
}
}
}
}
void GenerateDerivedTimeLines(
const tsl::profiler::GroupMetadataMap& group_metadata_map, XSpace* space) {
HloModuleMap hlo_module_map;
{
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(*space);
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
AddHloProto(hlo_module_map, program_id, *hlo_proto);
}
}
auto symbol_resolver = [&](absl::optional<uint64_t> program_id,
absl::string_view hlo_module,
absl::string_view hlo_op) -> Symbol {
Symbol output;
const auto* hlo_instruction =
GetHloInstruction(hlo_module_map, program_id, hlo_op);
if (hlo_instruction != nullptr) {
output.tf_op_name = hlo_instruction->op_full_name();
output.source_info = std::string(hlo_instruction->source_info());
}
return output;
};
std::vector<XPlane*> device_planes =
FindMutablePlanesWithPrefix(space, kGpuPlanePrefix);
for (XPlane* plane : device_planes) {
DeriveStepEventsFromGroups(group_metadata_map, plane);
DeriveEventsFromAnnotations(symbol_resolver, plane);
}
const XPlane* host_plane = FindPlaneWithName(*space, kHostThreadsPlaneName);
if (host_plane) {
DeriveEventsFromHostTrace(host_plane, group_metadata_map, device_planes);
}
for (XPlane* plane : FindMutableTensorCorePlanes(space)) {
DeriveLinesFromStats(plane);
SortXPlane(plane);
}
}
void DeriveLinesFromStats(XPlane* device_trace) {
XPlaneVisitor plane_visitor =
tsl::profiler::CreateTfXPlaneVisitor(device_trace);
XPlaneBuilder plane_builder(device_trace);
int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace);
DerivedXLineBuilder tf_ops(
&plane_builder, tensorflow::profiler::kThreadIdTfOp,
tensorflow::profiler::kTensorFlowOpLineName, start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(
&plane_builder, tensorflow::profiler::kThreadIdTfNameScope,
tensorflow::profiler::kTensorFlowNameScopeLineName, start_timestamp_ns,
{&tf_ops});
DerivedXLineBuilder source(
&plane_builder, tensorflow::profiler::kThreadIdSource,
tensorflow::profiler::kSourceLineName, start_timestamp_ns, {});
HostOffloadEventProcessor host_offload_event_processor(&plane_builder,
start_timestamp_ns);
for (const XEventVisitor& event :
GetSortedEvents<XEventVisitor>(plane_visitor, true)) {
tsl::profiler::Timespan event_span = event.GetTimespan();
std::optional<absl::string_view> tf_op_name;
std::optional<absl::string_view> source_info;
std::optional<uint64_t> group_id;
std::optional<uint64_t> is_async;
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.Type() == StatType::kTfOp) {
tf_op_name = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kGroupId) {
group_id = stat.IntOrUintValue();
} else if (stat.Type() == StatType::kSourceInfo) {
source_info = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kIsAsync) {
is_async = stat.IntOrUintValue();
}
};
event.Metadata().ForEachStat(for_each_stat);
event.ForEachStat(for_each_stat);
if (is_async && *is_async) continue;
if (tf_op_name && !tf_op_name->empty()) {
ProcessTfOpEvent(*tf_op_name, event_span, group_id, plane_builder,
tf_name_scope, tf_ops);
}
if (source_info && !source_info->empty()) {
source.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(*source_info), event_span,
group_id);
}
if (host_offload_event_processor.IsHostOffloadOpName(event)) {
host_offload_event_processor.ProcessHostOffloadOpEvent(event, group_id);
}
}
RemoveEmptyLines(device_trace);
}
void DeriveLinesForXlaCpuOps(XPlane* host_trace) {
if (host_trace == nullptr ||
!absl::StartsWith(host_trace->name(), kHostThreadsPlaneName))
return;
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
XPlane destination_plane;
XPlaneBuilder plane_builder(&destination_plane);
int64_t line_id = tsl::profiler::kThreadIdHostXlaRegionStart;
visitor.ForEachLine([&](const XLineVisitor& line) {
int64_t start_timestamp_ns = line.TimestampNs();
DerivedXLineBuilder tf_ops(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-",
tensorflow::profiler::kTensorFlowOpLineName),
start_timestamp_ns, {});
DerivedXLineBuilder tf_name_scope(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-",
tensorflow::profiler::kTensorFlowNameScopeLineName),
start_timestamp_ns, {&tf_ops});
DerivedXLineBuilder xla_cpu_ops(
&plane_builder, line_id++,
absl::StrCat(line.Name(), "-", tsl::profiler::kXlaModuleLineName),
start_timestamp_ns, {});
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<std::string> hlo_module_name;
std::optional<std::string> framework_op_name;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kHloModule:
hlo_module_name = stat.StrOrRefValue();
break;
case StatType::kTfOp:
framework_op_name = stat.StrOrRefValue();
break;
}
});
if (hlo_module_name.has_value()) {
xla_cpu_ops.ExpandOrAddEvent(
*plane_builder.GetOrCreateEventMetadata(*hlo_module_name),
event.GetTimespan(), std::nullopt);
if (framework_op_name.has_value()) {
ProcessTfOpEvent(*framework_op_name, event.GetTimespan(),
std::nullopt, plane_builder, tf_name_scope, tf_ops);
}
}
});
});
RemoveEmptyLines(&destination_plane);
MergePlanes(destination_plane, host_trace);
}
}
} | #include "tensorflow/core/profiler/utils/derived_timeline.h"
#include <cstdint>
#include <map>
#include <optional>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(DerivedTimelineTest, EmptySpaceTest) {
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
GenerateDerivedTimeLines(group_metadata_map, &space);
EXPECT_EQ(space.planes_size(), 0);
}
TEST(DerivedTimelineTest, HloModuleNameTest) {
const absl::string_view kHloModuleName = "hlo_module";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kHloModule, kHloModuleName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kHloModule, kHloModuleName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_EQ(line_visitor.Id(), kThreadIdHloModule);
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kHloModuleName);
});
});
}
TEST(DerivedTimelineTest, NoHloModuleNameTest) {
const absl::string_view kKernelDetails = "kernel_details";
const uint64_t kCudaGraphExecId = 1;
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane& plane = *GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(&plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op3", 500, 100,
{{StatType::kCudaGraphExecId, kCudaGraphExecId}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&plane);
EXPECT_EQ(plane_visitor.NumLines(), 1);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_EQ(line_visitor.Id(), kThreadIdHloModule);
EXPECT_EQ(line_visitor.NumEvents(), 0);
});
}
TEST(DerivedTimelineTest, TfOpLineTest) {
const absl::string_view kTfOpName = "mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
const uint64_t kCudaGraphExecId = 1;
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op3", 500, 100,
{{StatType::kTfOp, kTfOpName},
{StatType::kCudaGraphExecId, kCudaGraphExecId}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_EQ(line_visitor.Id(), kThreadIdTfOp);
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kTfOpName);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 600);
});
});
}
TEST(DerivedTimelineTest, DependencyTest) {
constexpr int64_t kFirstGroupId = 0;
constexpr int64_t kSecondGroupId = 1;
const absl::string_view kTfOpName = "mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map(
{{0, {"train 0"}}, {1, {"train 1"}}});
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kGroupId, kFirstGroupId},
{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kGroupId, kSecondGroupId},
{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == 0) return;
EXPECT_TRUE(line_visitor.Id() == kThreadIdStepInfo ||
line_visitor.Id() == kThreadIdTfOp);
EXPECT_EQ(line_visitor.NumEvents(), 2);
});
}
TEST(DerivedTimelineTest, TfOpNameScopeTest) {
const absl::string_view kTfOpName = "scope1/scope2/mul:Mul";
const absl::string_view kKernelDetails = "kernel_details";
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kTfOp, kTfOpName},
{StatType::kKernelDetails, kKernelDetails}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 2);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 500);
});
} else if (line_id == kThreadIdTfOp) {
EXPECT_EQ(line_visitor.NumEvents(), 1);
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
EXPECT_EQ(event_visitor.Name(), kTfOpName);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
EXPECT_EQ(event_visitor.DurationPs(), 500);
});
}
});
}
TEST(DerivedTimelineTest, TfOpNameScopeShrinkTest) {
{
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000,
{{StatType::kTfOp, "a/b/c/Add:Add"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(
&plane_builder, &line_builder, "op2", 20000, 30000,
{{StatType::kTfOp, "a/d/Mul:Mul"}, {StatType::kKernelDetails, "blah"}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 4);
std::map<absl::string_view, uint64_t> durations;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
durations[event_visitor.Name()] = event_visitor.DurationPs();
});
EXPECT_EQ(durations["a"], 50000);
EXPECT_EQ(durations["b"], 10000);
EXPECT_EQ(durations["c"], 9000);
EXPECT_EQ(durations["d"], 30000);
}
});
}
{
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane* plane = GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000,
{{StatType::kTfOp, "a/b/c/d/e/Add:Add"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(&plane_builder, &line_builder, "op2", 10000, 2000,
{{StatType::kTfOp, "a/b/c/d/f/Sub:Sub"},
{StatType::kKernelDetails, "blah"}});
CreateXEvent(
&plane_builder, &line_builder, "op3", 20000, 30000,
{{StatType::kTfOp, "a/g/Mul:Mul"}, {StatType::kKernelDetails, "blah"}});
GenerateDerivedTimeLines(group_metadata_map, &space);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane);
EXPECT_EQ(plane_visitor.NumLines(), 3);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
int64_t line_id = line_visitor.Id();
if (line_id == 0) {
return;
} else if (line_id == kThreadIdTfNameScope) {
EXPECT_EQ(line_visitor.NumEvents(), 7);
std::map<absl::string_view, uint64_t> durations;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
durations[event_visitor.Name()] = event_visitor.DurationPs();
});
for (const auto& [name, duration] : durations) {
LOG(ERROR) << name << ": " << duration;
}
EXPECT_EQ(durations["a"], 50000);
EXPECT_EQ(durations["b"], 12000);
EXPECT_EQ(durations["c"], 11000);
EXPECT_EQ(durations["d"], 11000);
EXPECT_EQ(durations["e"], 10000);
EXPECT_EQ(durations["f"], 1000);
EXPECT_EQ(durations["g"], 30000);
}
});
}
}
TEST(DerivedTimelineTest, XloOpHasCudaGraphStats) {
constexpr absl::string_view kModuleName = "module";
constexpr absl::string_view kHloOpName = "op_level_2";
constexpr absl::string_view kKernelDetails = "kernel_details";
constexpr int64_t kGroupIdValue = 1;
constexpr int64_t kCorrelationIdValue = 10000;
const uint64_t kCudaGraphIdValue = 20;
XSpace space;
tsl::profiler::GroupMetadataMap group_metadata_map;
XPlane& plane = *GetOrCreateGpuXPlane(&space, 0);
XPlaneBuilder plane_builder(&plane);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kKernelDetails, kKernelDetails},
{StatType::kGroupId, kGroupIdValue},
{StatType::kHloModule, kModuleName},
{StatType::kHloOp, kHloOpName},
{StatType::kCorrelationId, kCorrelationIdValue},
{StatType::kCudaGraphId, kCudaGraphIdValue}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300,
{{StatType::kKernelDetails, kKernelDetails},
{StatType::kGroupId, kGroupIdValue},
{StatType::kHloModule, kModuleName},
{StatType::kHloOp, kHloOpName},
{StatType::kCorrelationId, kCorrelationIdValue},
{StatType::kCudaGraphId, kCudaGraphIdValue}});
GenerateDerivedTimeLines(group_metadata_map, &space);
size_t num_hlo_op_line = 0;
size_t num_events = 0;
std::optional<XStatVisitor> correlation_id;
std::optional<XStatVisitor> cuda_graph_id;
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Id() == kThreadIdHloOp) {
num_hlo_op_line++;
if (num_hlo_op_line == 1) {
num_events = line_visitor.NumEvents();
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
correlation_id = event_visitor.GetStat(StatType::kCorrelationId);
cuda_graph_id = event_visitor.GetStat(StatType::kCudaGraphId);
});
}
}
});
EXPECT_EQ(num_hlo_op_line, 1);
EXPECT_EQ(num_events, 1);
ASSERT_TRUE(correlation_id.has_value());
EXPECT_EQ(correlation_id->IntValue(), kCorrelationIdValue);
ASSERT_TRUE(cuda_graph_id.has_value());
EXPECT_EQ(cuda_graph_id->UintValue(), kCudaGraphIdValue);
}
TEST(DerivedTimelineTest, DeriveLinesForXlaCpuOps) {
XPlane xplane;
XPlaneBuilder plane_builder(&xplane);
plane_builder.SetName(tsl::profiler::kHostThreadsPlaneName);
absl::string_view main_line_name = "main";
auto line_builder = plane_builder.GetOrCreateLine(0);
line_builder.SetName(main_line_name);
CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100,
{{StatType::kHloModule, "Module1"}});
CreateXEvent(&plane_builder, &line_builder, "op2", 200, 400,
{{StatType::kHloModule, "Module2"}});
DeriveLinesForXlaCpuOps(&xplane);
XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
EXPECT_EQ(plane_visitor.NumLines(), 2);
plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) {
if (line_visitor.Name() == main_line_name) return;
line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) {
if (event_visitor.Name() == "Module1") {
EXPECT_EQ(event_visitor.DurationPs(), 100);
EXPECT_EQ(event_visitor.OffsetPs(), 0);
} else if (event_visitor.Name() == "Module2") {
EXPECT_EQ(event_visitor.DurationPs(), 400);
EXPECT_EQ(event_visitor.OffsetPs(), 200);
} else {
FAIL() << "Found Event " << event_visitor.Name();
}
});
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/derived_timeline.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/derived_timeline_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a4e579-c145-4cc7-ada9-64353d57ef67 | cpp | tensorflow/tensorflow | xprof_gpu_cost_analysis | tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.cc | tensorflow/core/profiler/utils/xprof_gpu_cost_analysis_test.cc | #include "tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
namespace tensorflow {
namespace profiler {
namespace {
std::vector<uint32_t> GetInputBitwidths(const xla::HloInstruction& hlo) {
std::vector<uint32_t> input_bitwidths;
for (const auto& operand : hlo.operands()) {
switch (operand->shape().element_type()) {
case xla::PRIMITIVE_TYPE_INVALID:
case xla::TUPLE:
case xla::OPAQUE_TYPE:
case xla::TOKEN:
break;
default:
input_bitwidths.push_back(
xla::primitive_util::BitWidth(operand->shape().element_type()));
}
}
return input_bitwidths;
}
}
absl::Status XProfGpuCostAnalysis::Postprocess(const xla::HloInstruction* hlo) {
if (hlo == nullptr) {
return absl::OkStatus();
}
uint32_t flop_rate_adjustment = 1;
float model_flops = current_properties_[kFlopsKey];
std::vector<uint32_t> input_bitwidths = GetInputBitwidths(*hlo);
if (!input_bitwidths.empty()) {
int max_input_bitwidth =
*std::max_element(input_bitwidths.begin(), input_bitwidths.end());
if (model_flops) {
switch (max_input_bitwidth) {
case 8:
flop_rate_adjustment = 2;
break;
case 4:
flop_rate_adjustment = 4;
break;
}
}
}
current_properties_[kDeviceFlopsAdjustment] =
model_flops - model_flops / flop_rate_adjustment;
return xla::gpu::GpuHloCostAnalysis::Postprocess(hlo);
}
std::unique_ptr<xla::HloCostAnalysis>
XProfGpuCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<XProfGpuCostAnalysis>(options_);
}
int64_t XProfGpuCostAnalysis::GetDeviceFlopsAdjustment(
const xla::HloInstruction& hlo) {
return GetPropertyForHlo(hlo, kDeviceFlopsAdjustment, hlo_properties_);
}
}
} | #include "tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace profiler {
class XprofGpuHloCostAnalysisTest : public xla::HloTestBase {
xla::HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const xla::Shape& shape) {
constexpr int64_t kPointerSize = 8;
return xla::ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
xla::HloCostAnalysis::Options options_{
ShapeSizeBytesFunction(),
{},
{},
true};
XProfGpuCostAnalysis analysis_{options_};
XprofGpuHloCostAnalysisTest() : xla::HloTestBase() {}
};
TEST_F(XprofGpuHloCostAnalysisTest, Fp16GemmNoAdjustment) {
absl::string_view hlo_string = R"(
HloModule r
ENTRY e {
arg0 = f16[65536,32800] parameter(0)
arg1 = f16[32800,32] parameter(1)
gemm = (f16[65536,32], s8[0]) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config="{
\"gemm_backend_config\": {
\"alpha_real\":1,
\"beta\":0,
\"dot_dimension_numbers\":{
\"lhs_contracting_dimensions\":[\"1\"],
\"rhs_contracting_dimensions\":[\"0\"],
\"lhs_batch_dimensions\":[],
\"rhs_batch_dimensions\":[]
},
\"alpha_imag\":0,
\"precision_config\":{
\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]
},
\"epilogue\":\"DEFAULT\"
}
}"
ROOT get-tuple-element = f16[65536,32]
get-tuple-element((f16[65536,32], s8[0]) gemm), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
xla::HloComputation* comp = module->entry_computation();
const xla::HloInstruction* fp16gemm = comp->GetInstructionWithName("gemm");
int64_t gold_flops = 65536LL * 32800 * 32 * 2;
EXPECT_EQ(analysis_.flop_count(*fp16gemm), gold_flops);
EXPECT_EQ(analysis_.GetDeviceFlopsAdjustment(*fp16gemm), 0);
}
TEST_F(XprofGpuHloCostAnalysisTest, S8GemmAdjustment) {
absl::string_view hlo_string = R"(
HloModule r
ENTRY e {
arg0 = s8[65536,32800] parameter(0)
arg1 = s8[32800,32] parameter(1)
gemm = (s32[65536,32], s8[0]) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config="{
\"gemm_backend_config\": {
\"alpha_real\":1,
\"beta\":0,
\"dot_dimension_numbers\":{
\"lhs_contracting_dimensions\":[\"1\"],
\"rhs_contracting_dimensions\":[\"0\"],
\"lhs_batch_dimensions\":[],
\"rhs_batch_dimensions\":[]
},
\"alpha_imag\":0,
\"precision_config\":{
\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]
},
\"epilogue\":\"DEFAULT\"
}
}"
ROOT get-tuple-element = s32[65536,32]
get-tuple-element((s32[65536,32], s8[0]) gemm), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
xla::HloComputation* comp = module->entry_computation();
const xla::HloInstruction* s8gemm = comp->GetInstructionWithName("gemm");
int64_t gold_flops = 65536LL * 32800 * 32 * 2;
EXPECT_EQ(analysis_.flop_count(*s8gemm), gold_flops);
EXPECT_EQ(analysis_.GetDeviceFlopsAdjustment(*s8gemm), gold_flops / 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/xprof_gpu_cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
944a5e29-2e4e-4401-9e0f-8585788d40e5 | cpp | tensorflow/tensorflow | kernel_stats_utils | tensorflow/core/profiler/utils/kernel_stats_utils.cc | tensorflow/core/profiler/utils/kernel_stats_utils_test.cc | #include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
const int kMaxNumOfKernels = 1000;
constexpr absl::string_view kTensorCoreKernelNamePatterns[] = {
"16816",
"c1688",
"conv1x1",
"conv2d_c1_k1",
"dgrad_1x1_stride_2x2",
"direct_group",
"first_layer_wgrad_kernel",
"h1688",
"h884",
"hmma",
"i16832",
"i8816",
"s884",
"s1688",
"xmma_gemm",
"xmma_implicit_gemm",
"xmma_sparse_conv",
"xmma_sparse_gemm",
"xmma_warp_specialized_implicit_gemm"};
}
void ParseKernelLaunchParams(absl::string_view xstat_kernel_details,
KernelReport* kernel) {
const std::vector<absl::string_view> params =
absl::StrSplit(xstat_kernel_details, absl::ByAnyChar(" \n"));
constexpr uint32 kNumDimensions = 3;
for (uint32 dim = 0; dim < kNumDimensions; ++dim) {
kernel->add_block_dim(1);
kernel->add_grid_dim(1);
}
for (const auto& param : params) {
const std::vector<absl::string_view> key_value = absl::StrSplit(param, ':');
if (key_value.size() != 2) {
continue;
}
absl::string_view key = key_value[0];
absl::string_view value_str = key_value[1];
uint32 value = 0;
double pct = 0.0;
if (key == "regs" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_registers_per_thread(value);
} else if (key == "static_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_static_shmem_bytes(value);
} else if (key == "dynamic_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_dynamic_shmem_bytes(value);
} else if (key == "block") {
const std::vector<absl::string_view>& block =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (block.size() == 3 && absl::SimpleAtoi(block[0], &tmp[0]) &&
absl::SimpleAtoi(block[1], &tmp[1]) &&
absl::SimpleAtoi(block[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_block_dim()->begin());
}
} else if (key == "grid") {
const std::vector<absl::string_view>& grid =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (grid.size() == 3 && absl::SimpleAtoi(grid[0], &tmp[0]) &&
absl::SimpleAtoi(grid[1], &tmp[1]) &&
absl::SimpleAtoi(grid[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_grid_dim()->begin());
}
} else if (key == "occ_pct" && absl::SimpleAtod(value_str, &pct)) {
kernel->set_occupancy_pct(pct);
}
}
}
bool IsKernelUsingTensorCore(absl::string_view kernel_name) {
VLOG(1) << "kernel name: " << kernel_name;
for (absl::string_view pattern : kTensorCoreKernelNamePatterns) {
if (absl::StrContains(kernel_name, pattern)) {
return true;
}
}
return false;
}
bool IsOpTensorCoreEligible(absl::string_view tf_op_name) {
return false
|| absl::EndsWith(tf_op_name, "Conv2D")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropFilter")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropInput")
|| absl::EndsWith(tf_op_name, "Conv3D")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNative")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropFilter")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropInput")
|| absl::StrContains(tf_op_name, "BatchMatMul")
|| absl::EndsWith(tf_op_name, "/MatMul")
|| absl::EndsWith(tf_op_name, "FusedMatMul")
|| absl::EndsWith(tf_op_name, "/CudnnRNN")
|| absl::StrContains(tf_op_name, "CudnnRNNV")
|| absl::StrContains(tf_op_name, "CudnnRNNForward")
|| absl::StrContains(tf_op_name, "CudnnRNNBackprop")
|| absl::EndsWith(tf_op_name, "XlaDot")
|| absl::EndsWith(tf_op_name, "XlaDotV2");
}
bool IsEinsumTensorCoreEligible(absl::string_view equation) {
if (equation.empty()) {
return false;
}
const std::vector<absl::string_view> input_output =
absl::StrSplit(equation, "->");
if (input_output.size() != 2) {
return false;
}
const std::vector<absl::string_view> lhs_rhs =
absl::StrSplit(input_output[0], ',');
return lhs_rhs.size() == 2;
}
bool KernelReportLessThanComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
auto lhs_tuple = std::make_tuple(
lhs.name(),
lhs.grid_dim(0),
lhs.grid_dim(1),
lhs.grid_dim(2),
lhs.block_dim(0),
lhs.block_dim(1),
lhs.block_dim(2),
lhs.registers_per_thread(),
lhs.static_shmem_bytes(),
lhs.dynamic_shmem_bytes(),
lhs.is_kernel_using_tensor_core(),
lhs.is_op_tensor_core_eligible(),
lhs.op_name());
auto rhs_tuple = std::make_tuple(
rhs.name(),
rhs.grid_dim(0),
rhs.grid_dim(1),
rhs.grid_dim(2),
rhs.block_dim(0),
rhs.block_dim(1),
rhs.block_dim(2),
rhs.registers_per_thread(),
rhs.static_shmem_bytes(),
rhs.dynamic_shmem_bytes(),
rhs.is_kernel_using_tensor_core(),
rhs.is_op_tensor_core_eligible(),
rhs.op_name());
return lhs_tuple < rhs_tuple;
}
bool KernelReportEqualToComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
return (
lhs.is_kernel_using_tensor_core() == rhs.is_kernel_using_tensor_core() &&
lhs.is_op_tensor_core_eligible() == rhs.is_op_tensor_core_eligible() &&
lhs.block_dim(0) == rhs.block_dim(0) &&
lhs.block_dim(1) == rhs.block_dim(1) &&
lhs.block_dim(2) == rhs.block_dim(2) &&
lhs.grid_dim(0) == rhs.grid_dim(0) &&
lhs.grid_dim(1) == rhs.grid_dim(1) &&
lhs.grid_dim(2) == rhs.grid_dim(2) &&
lhs.registers_per_thread() == rhs.registers_per_thread() &&
lhs.static_shmem_bytes() == rhs.static_shmem_bytes() &&
lhs.dynamic_shmem_bytes() == rhs.dynamic_shmem_bytes() &&
lhs.name() == rhs.name() &&
lhs.op_name() == rhs.op_name());
}
void SortAndKeepTopKDurationKernelReportsInDb(KernelStatsDb* kernel_stats_db) {
auto comp = [](const KernelReport& lhs, const KernelReport& rhs) {
return lhs.total_duration_ns() > rhs.total_duration_ns() ||
(lhs.total_duration_ns() == rhs.total_duration_ns() &&
KernelReportLessThanComparator()(lhs, rhs));
};
if (kernel_stats_db->reports_size() > kMaxNumOfKernels) {
std::partial_sort(
kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end(), comp);
kernel_stats_db->mutable_reports()->erase(
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end());
} else {
std::sort(kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->end(), comp);
}
}
void CopyTopKDurationKernelReportsToDb(const KernelReportMap& reports,
KernelStatsDb* dst) {
std::vector<std::pair<const KernelReport*, const KernelReportValue*>>
kernels_to_sort;
kernels_to_sort.reserve(reports.size());
for (const auto& report_value : reports) {
kernels_to_sort.push_back(
std::make_pair(&report_value.first, &report_value.second));
}
auto comp =
[](const std::pair<const KernelReport*, const KernelReportValue*>& lhs,
const std::pair<const KernelReport*, const KernelReportValue*>& rhs) {
return lhs.second->total_duration_ns > rhs.second->total_duration_ns ||
(lhs.second->total_duration_ns ==
rhs.second->total_duration_ns &&
KernelReportLessThanComparator()(*lhs.first, *rhs.first));
};
if (kernels_to_sort.size() > kMaxNumOfKernels) {
absl::c_partial_sort(kernels_to_sort,
kernels_to_sort.begin() + kMaxNumOfKernels, comp);
} else {
absl::c_sort(kernels_to_sort, comp);
}
int copy_size =
std::min(kMaxNumOfKernels, static_cast<int>(kernels_to_sort.size()));
for (int i = 0; i < copy_size; i++) {
KernelReport* report = dst->add_reports();
*report = *kernels_to_sort[i].first;
const KernelReportValue& kernel_value = *kernels_to_sort[i].second;
report->set_occurrences(kernel_value.occurrences);
report->set_min_duration_ns(kernel_value.min_duration_ns);
report->set_max_duration_ns(kernel_value.max_duration_ns);
report->set_total_duration_ns(kernel_value.total_duration_ns);
}
}
void InsertOrUpdateKernelReport(const KernelReport& kernel,
const KernelReportValue& value,
KernelReportMap* dst) {
KernelReportValue& element = (*dst)[kernel];
if (element.occurrences == 0) {
element = value;
} else {
element.total_duration_ns += value.total_duration_ns;
element.min_duration_ns =
std::min(element.min_duration_ns, value.min_duration_ns);
element.max_duration_ns =
std::max(element.max_duration_ns, value.max_duration_ns);
element.occurrences += value.occurrences;
}
}
void MergeKernelReports(const KernelReportMap& reports, KernelReportMap* dst) {
for (auto& kernel_value : reports) {
InsertOrUpdateKernelReport(kernel_value.first, kernel_value.second, dst);
}
}
KernelStatsByOpName GroupKernelReportsByOpName(
const KernelStatsDb& kernel_stats_db) {
KernelStatsByOpName op_level_kernel_stats;
for (const KernelReport& kernel_report : kernel_stats_db.reports()) {
auto ret = op_level_kernel_stats.emplace(kernel_report.op_name(),
OpLevelKernelStats());
if (ret.second) {
OpLevelKernelStats& stats = ret.first->second;
stats.is_op_tensor_core_eligible =
kernel_report.is_op_tensor_core_eligible();
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
} else {
OpLevelKernelStats& stats = ret.first->second;
DCHECK_EQ(stats.is_op_tensor_core_eligible,
kernel_report.is_op_tensor_core_eligible());
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
}
}
return op_level_kernel_stats;
}
}
} | #include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <gmock/gmock.h>
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::FieldsAre;
TEST(KernelStatsUtilsTest, TestGroupKernelReportsByOpName) {
KernelStatsDb kernel_stats_db;
KernelReport* kernel_report_1 = kernel_stats_db.add_reports();
kernel_report_1->set_name("op1_kernel1");
kernel_report_1->set_op_name("op1");
kernel_report_1->set_total_duration_ns(1000);
kernel_report_1->set_is_kernel_using_tensor_core(true);
kernel_report_1->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_2 = kernel_stats_db.add_reports();
kernel_report_2->set_name("op1_kernel2");
kernel_report_2->set_op_name("op1");
kernel_report_2->set_total_duration_ns(1000);
kernel_report_2->set_is_kernel_using_tensor_core(false);
kernel_report_2->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_3 = kernel_stats_db.add_reports();
kernel_report_3->set_name("op2_kernel1");
kernel_report_3->set_op_name("op2");
kernel_report_3->set_total_duration_ns(100);
kernel_report_3->set_is_kernel_using_tensor_core(false);
kernel_report_3->set_is_op_tensor_core_eligible(false);
KernelStatsByOpName kernel_stats_by_op_name =
GroupKernelReportsByOpName(kernel_stats_db);
ASSERT_EQ(kernel_stats_by_op_name.size(), 2);
auto iter1 = kernel_stats_by_op_name.find("op1");
auto iter2 = kernel_stats_by_op_name.find("op2");
ASSERT_NE(iter1, kernel_stats_by_op_name.end());
ASSERT_NE(iter2, kernel_stats_by_op_name.end());
const OpLevelKernelStats& op1_stats = iter1->second;
const OpLevelKernelStats& op2_stats = iter2->second;
EXPECT_EQ(op1_stats.is_op_tensor_core_eligible, true);
EXPECT_EQ(op1_stats.total_duration_ns, 2000);
EXPECT_EQ(op1_stats.tensor_core_duration_ns, 1000);
EXPECT_EQ(op2_stats.is_op_tensor_core_eligible, false);
EXPECT_EQ(op2_stats.total_duration_ns, 100);
EXPECT_EQ(op2_stats.tensor_core_duration_ns, 0);
}
TEST(KernelStatsUtilsTest, KernelDetailsXStatParser) {
xla::profiler::KernelDetails kernel_info;
kernel_info.registers_per_thread = 10;
kernel_info.static_shared_memory_usage = 128;
kernel_info.dynamic_shared_memory_usage = 256;
kernel_info.block_x = 32;
kernel_info.block_y = 8;
kernel_info.block_z = 4;
kernel_info.grid_x = 3;
kernel_info.grid_y = 2;
kernel_info.grid_z = 1;
const double occupancy_pct = 50.0;
std::string xstat_kernel_details = ToXStat(kernel_info, occupancy_pct);
KernelReport kernel;
ParseKernelLaunchParams(xstat_kernel_details, &kernel);
EXPECT_EQ(kernel.registers_per_thread(), 10);
EXPECT_EQ(kernel.static_shmem_bytes(), 128);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 256);
EXPECT_EQ(kernel.block_dim()[0], 32);
EXPECT_EQ(kernel.block_dim()[1], 8);
EXPECT_EQ(kernel.block_dim()[2], 4);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
}
TEST(KernelStatsUtilsTest, KernelDetailsTokenizer) {
KernelReport kernel;
absl::string_view kernel_details_0 = "odd grid:3,2,1";
ParseKernelLaunchParams(kernel_details_0, &kernel);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
absl::string_view kernel_details_1 = "block:6,5,4 odd ";
ParseKernelLaunchParams(kernel_details_1, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 6);
EXPECT_EQ(kernel.block_dim()[1], 5);
EXPECT_EQ(kernel.block_dim()[2], 4);
absl::string_view kernel_details_2 = "block:1,2,3 odd grid:4,5,6";
ParseKernelLaunchParams(kernel_details_2, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 1);
EXPECT_EQ(kernel.block_dim()[1], 2);
EXPECT_EQ(kernel.block_dim()[2], 3);
EXPECT_EQ(kernel.grid_dim()[0], 4);
EXPECT_EQ(kernel.grid_dim()[1], 5);
EXPECT_EQ(kernel.grid_dim()[2], 6);
absl::string_view kernel_details_3 = "static_shared:7 dynamic_shared:8";
ParseKernelLaunchParams(kernel_details_3, &kernel);
EXPECT_EQ(kernel.static_shmem_bytes(), 7);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 8);
}
TEST(KernelStatsUtilsTest, TestInsertOrUpdateKernelReport) {
KernelReport kr;
kr.set_name("op1_kernel1");
kr.set_op_name("op1");
kr.add_block_dim(32);
kr.add_block_dim(8);
kr.add_block_dim(4);
kr.add_grid_dim(3);
kr.add_grid_dim(2);
kr.add_grid_dim(1);
KernelReportValue krv1;
krv1.total_duration_ns = 1700;
krv1.min_duration_ns = 500;
krv1.max_duration_ns = 1200;
krv1.occurrences = 2;
KernelReportValue krv2;
krv2.total_duration_ns = 900;
krv2.min_duration_ns = 900;
krv2.max_duration_ns = 900;
krv2.occurrences = 1;
KernelReportMap dst1;
InsertOrUpdateKernelReport(kr, krv1, &dst1);
InsertOrUpdateKernelReport(kr, krv2, &dst1);
EXPECT_THAT(dst1[kr], FieldsAre(2600, 500, 1200, 3));
KernelReportMap dst2;
InsertOrUpdateKernelReport(kr, krv2, &dst2);
InsertOrUpdateKernelReport(kr, krv1, &dst2);
EXPECT_THAT(dst2[kr], FieldsAre(2600, 500, 1200, 3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/kernel_stats_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/kernel_stats_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9e152ae-637d-49e9-93dd-dd7bc78b2bd5 | cpp | tensorflow/tensorflow | step_intersection | tensorflow/core/profiler/utils/step_intersection.cc | tensorflow/core/profiler/utils/step_intersection_test.cc | #include "tensorflow/core/profiler/utils/step_intersection.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace profiler {
namespace {
tsl::profiler::Timespan StepTimespan(const PerCoreStepInfo& percore_stepinfo) {
uint64 min_ps = kuint64max;
uint64 max_ps = 0;
for (const auto& core_stepinfo : percore_stepinfo.step_info_per_core()) {
const auto& stepinfo = core_stepinfo.second;
uint64 begin_ps = stepinfo.begin_ps();
uint64 end_ps = begin_ps + stepinfo.duration_ps();
min_ps = std::min(min_ps, begin_ps);
max_ps = std::max(max_ps, end_ps);
}
return (min_ps < max_ps)
? tsl::profiler::Timespan::FromEndPoints(min_ps, max_ps)
: tsl::profiler::Timespan();
}
tsl::profiler::Timespan AllStepsTimespan(const StepDatabaseResult& step_db) {
uint64 min_ps = kuint64max;
uint64 max_ps = 0;
for (const auto& step : step_db.step_sequence()) {
tsl::profiler::Timespan timespan = StepTimespan(step);
uint64 begin_ps = timespan.begin_ps();
uint64 end_ps = timespan.end_ps();
min_ps = std::min(min_ps, begin_ps);
max_ps = std::max(max_ps, end_ps);
}
return (min_ps < max_ps)
? tsl::profiler::Timespan::FromEndPoints(min_ps, max_ps)
: tsl::profiler::Timespan();
}
struct AlignmentInfo {
StepsAlignment alignment;
double similarity;
};
double StepSimilarity(const PerCoreStepInfo& subordinate_step,
const PerCoreStepInfo& chief_step) {
tsl::profiler::Timespan subordinate_timespan = StepTimespan(subordinate_step);
tsl::profiler::Timespan chief_timespan = StepTimespan(chief_step);
return chief_timespan.OverlappedDurationPs(subordinate_timespan);
}
AlignmentInfo ComputeAlignmentInfo(const StepDatabaseResult& subordinate,
uint32 subordinate_anchor,
const StepDatabaseResult& chief,
uint32 chief_anchor) {
uint32 pre_anchor_steps = std::min(subordinate_anchor, chief_anchor);
uint32 post_anchor_steps =
std::min(subordinate.step_sequence_size() - subordinate_anchor,
chief.step_sequence_size() - chief_anchor);
uint32 alignment_steps = pre_anchor_steps + post_anchor_steps;
double similarity = 0;
uint32 begin_subordinate_idx = subordinate_anchor - pre_anchor_steps;
uint32 begin_chief_idx = chief_anchor - pre_anchor_steps;
for (uint32 i = 0; i < alignment_steps; i++) {
similarity +=
StepSimilarity(subordinate.step_sequence(begin_subordinate_idx + i),
chief.step_sequence(begin_chief_idx + i));
}
StepsAlignment alignment = {begin_subordinate_idx, begin_chief_idx,
alignment_steps};
return {alignment, similarity};
}
StepsAlignment FindStepsAlignment(const StepDatabaseResult& subordinate,
const StepDatabaseResult& chief) {
double max_similarity = -1;
StepsAlignment alignment = {0, 0, 0};
if (subordinate.step_sequence_size() == 0 || chief.step_sequence_size() == 0)
return alignment;
for (auto c = 0; c < chief.step_sequence_size(); c++) {
AlignmentInfo info =
ComputeAlignmentInfo(subordinate, 0, chief, c);
if (info.similarity <= max_similarity) continue;
max_similarity = info.similarity;
alignment = info.alignment;
}
for (auto s = 1; s < subordinate.step_sequence_size(); s++) {
AlignmentInfo info =
ComputeAlignmentInfo(subordinate, s, chief, 0);
if (info.similarity <= max_similarity) continue;
max_similarity = info.similarity;
alignment = info.alignment;
}
return alignment;
}
std::string StringStepsAlignment(const StepsAlignment& alignment) {
return absl::StrCat(
"[begin_subordinate_idx: ", alignment.begin_subordinate_idx,
", begin_chief_idx: ", alignment.begin_chief_idx,
", num_steps: ", alignment.num_steps, "]");
}
std::string StringDstStepNumbers(const std::vector<uint32>& step_numbers) {
std::string str;
absl::StrAppend(&str, "[");
for (auto i = 0; i < step_numbers.size(); i++) {
if (i > 0) absl::StrAppend(&str, ", ");
absl::StrAppend(&str, step_numbers[i]);
}
absl::StrAppend(&str, "]");
return str;
}
std::string StringSrcToDstIndexMap(uint32 src_first_step_idx,
uint32 num_steps) {
std::string str;
absl::StrAppend(&str, "[");
for (auto i = 0; i < num_steps; i++) {
if (i > 0) absl::StrAppend(&str, ", ");
absl::StrAppend(&str, src_first_step_idx + i, ":", i);
}
absl::StrAppend(&str, "]");
return str;
}
}
StepIntersection::StepIntersection(
uint32 max_steps,
const absl::flat_hash_map<uint32, const StepDatabaseResult*>&
perhost_stepdb) {
empty_intersect_ = false;
chief_host_id_ = kuint32max;
uint64 min_duration_ps = kuint64max;
const StepDatabaseResult* chief_step_db = nullptr;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
tsl::profiler::Timespan timespan = AllStepsTimespan(*step_db);
if (timespan.duration_ps() < min_duration_ps) {
chief_host_id_ = host_id;
chief_step_db = step_db;
min_duration_ps = timespan.duration_ps();
}
}
if (chief_host_id_ == kuint32max) {
steps_dropped_ = 0;
begin_chief_idx_ = 0;
end_chief_idx_ = 0;
return;
}
uint32 max_begin_chief_idx = 0;
uint32 min_end_chief_idx = kuint32max;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
if (host_id == chief_host_id_) {
perhost_alignment_[host_id] = {
0, 0,
static_cast<uint32>(step_db->step_sequence_size())};
} else {
perhost_alignment_[host_id] =
FindStepsAlignment(*step_db, *chief_step_db);
}
uint32 host_begin_chief_idx = perhost_alignment_[host_id].begin_chief_idx;
max_begin_chief_idx = std::max(max_begin_chief_idx, host_begin_chief_idx);
uint32 host_end_chief_idx = perhost_alignment_[host_id].begin_chief_idx +
perhost_alignment_[host_id].num_steps;
min_end_chief_idx = std::min(min_end_chief_idx, host_end_chief_idx);
}
if (max_begin_chief_idx > min_end_chief_idx) {
steps_dropped_ = 0;
begin_chief_idx_ = 0;
end_chief_idx_ = 0;
empty_intersect_ = true;
return;
}
begin_chief_idx_ = max_begin_chief_idx;
uint32 num_steps = min_end_chief_idx - max_begin_chief_idx;
if (num_steps > max_steps) {
steps_dropped_ = num_steps - max_steps;
end_chief_idx_ = max_begin_chief_idx + max_steps;
} else {
steps_dropped_ = 0;
end_chief_idx_ = min_end_chief_idx;
}
}
std::vector<uint32> StepIntersection::DstStepNumbers() const {
std::vector<uint32> result;
result.reserve(NumSteps());
for (uint32 i = 0; i < NumSteps(); i++) {
result.push_back(i);
}
return result;
}
uint32 StepIntersection::FirstStepIndex(uint32 host_id) const {
const auto* alignment = gtl::FindOrNull(perhost_alignment_, host_id);
if (alignment == nullptr) return 0;
DCHECK(alignment->begin_chief_idx <= begin_chief_idx_);
uint32 shift = begin_chief_idx_ - alignment->begin_chief_idx;
uint32 begin_subordinate_idx = alignment->begin_subordinate_idx + shift;
return begin_subordinate_idx;
}
std::string StepIntersection::DebugString() const {
std::string str;
absl::StrAppend(&str, "chief host id_: ", chief_host_id_, "\n");
absl::StrAppend(&str, "begin_chief_idx_: ", begin_chief_idx_,
", num_steps: ", NumSteps(), "\n");
absl::StrAppend(
&str, "DstStepNumbers(): ", StringDstStepNumbers(DstStepNumbers()), "\n");
std::vector<uint32> host_ids;
host_ids.reserve(perhost_alignment_.size());
for (const auto& hostid_alignment : perhost_alignment_) {
auto host_id = hostid_alignment.first;
host_ids.push_back(host_id);
}
absl::c_sort(host_ids);
absl::StrAppend(&str, "perhost_alignment:\n");
for (const auto host_id : host_ids) {
const auto* ptr = gtl::FindOrNull(perhost_alignment_, host_id);
if (ptr == nullptr) continue;
absl::StrAppend(&str, "host: ", host_id,
", step-alignment: ", StringStepsAlignment(*ptr), "\n");
}
absl::StrAppend(&str, "SrcToDstIndexMap():\n");
for (const auto host_id : host_ids) {
absl::StrAppend(&str, "host: ", host_id, ", src-to-dst-index-map: ",
StringSrcToDstIndexMap(FirstStepIndex(host_id), NumSteps()),
"\n");
}
return str;
}
}
} | #include "tensorflow/core/profiler/utils/step_intersection.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace profiler {
namespace {
using PerHostStepDb =
absl::flat_hash_map<uint32 , StepDatabaseResult>;
constexpr uint64 kStepDurationPs = 2000000000;
constexpr uint32 kNumStepsPerHost = 10;
constexpr uint64 kStepGapPs = 0;
constexpr uint32 kNumCoresPerHost = 8;
PerCoreStepInfo CreateOneTestStep(uint32 host_id, uint32 num_steps,
uint32 step_idx, uint64 step_begin_ps) {
PerCoreStepInfo result;
uint32 step_num =
step_idx * host_id;
result.set_step_num(step_num);
StepInfoResult info;
info.set_step_num(step_num);
if (host_id == 0 && step_idx == (num_steps - 1)) {
info.set_duration_ps(kStepDurationPs - 1);
} else {
info.set_duration_ps(kStepDurationPs);
}
info.set_begin_ps(step_begin_ps);
for (uint32 core_id = 0; core_id < kNumCoresPerHost; core_id++) {
(*result.mutable_step_info_per_core())[core_id] = info;
}
return result;
}
PerHostStepDb CreateTestSteps(uint32 num_hosts, uint64 shift_ps) {
PerHostStepDb result;
uint64 first_step_begin_ps = 0;
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
StepDatabaseResult step_db;
uint64 step_begin_ps = first_step_begin_ps;
for (uint32 step_idx = 0; step_idx < kNumStepsPerHost; step_idx++) {
*step_db.add_step_sequence() =
CreateOneTestStep(host_id, kNumStepsPerHost, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db;
first_step_begin_ps += shift_ps;
}
return result;
}
PerHostStepDb CreateEmptyIntersectTestSteps() {
PerHostStepDb result;
uint64 step_begin_ps;
uint32 host_id;
host_id = 0;
step_begin_ps = 0;
uint64 host_0_num_steps = 10;
StepDatabaseResult step_db_0;
for (uint32 step_idx = 0; step_idx < host_0_num_steps; step_idx++) {
*step_db_0.add_step_sequence() =
CreateOneTestStep(host_id, host_0_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_0;
host_id = 1;
step_begin_ps = (host_0_num_steps - 2) * (kStepDurationPs + kStepGapPs);
uint64 host_1_num_steps = 5;
StepDatabaseResult step_db_1;
for (uint32 step_idx = 0; step_idx < host_1_num_steps; step_idx++) {
*step_db_1.add_step_sequence() =
CreateOneTestStep(host_id, host_1_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_1;
host_id = 2;
step_begin_ps = (host_0_num_steps + host_1_num_steps - 4) *
(kStepDurationPs + kStepGapPs);
uint64 host_2_num_steps = 10;
StepDatabaseResult step_db_2;
for (uint32 step_idx = 0; step_idx < host_2_num_steps; step_idx++) {
*step_db_2.add_step_sequence() =
CreateOneTestStep(host_id, host_2_num_steps, step_idx, step_begin_ps);
step_begin_ps += (kStepDurationPs + kStepGapPs);
}
result[host_id] = step_db_2;
return result;
}
PerHostStepDb CreateNoStep(uint32 num_hosts) {
PerHostStepDb result;
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
StepDatabaseResult step_db;
result[host_id] = step_db;
}
return result;
}
absl::flat_hash_map<uint32 , const StepDatabaseResult*> Convert(
const PerHostStepDb& perhost_stepdb) {
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
for (const auto& hostid_stepdb : perhost_stepdb) {
auto host_id = hostid_stepdb.first;
const auto& step_db = hostid_stepdb.second;
result[host_id] = &step_db;
}
return result;
}
TEST(StepIntersectionTest, EachHostShiftedBy1StepDuration) {
uint32 num_hosts = 4;
uint64 shift_ps = kStepDurationPs;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost - num_hosts + 1;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
uint32 src_first_step_index = intersection.FirstStepIndex(0);
EXPECT_EQ(src_first_step_index, num_hosts - 1);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
}
TEST(StepIntersectionTest, ExactlyNoShift) {
uint32 num_hosts = 4;
uint64 shift_ps = 0;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, EachHostShiftedByJustABit) {
uint32 num_hosts = 4;
uint64 shift_ps = 100;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, SingleHost) {
uint32 num_hosts = 1;
uint64 shift_ps = 0;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(kNumStepsPerHost, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
uint32 dst_num_steps = kNumStepsPerHost;
EXPECT_EQ(intersection.NumSteps(), dst_num_steps);
std::vector<uint32> dst_step_numbers = intersection.DstStepNumbers();
for (uint32 i = 0; i < dst_num_steps; i++) {
EXPECT_EQ(dst_step_numbers[i], i);
}
for (uint32 host_id = 0; host_id < num_hosts; host_id++) {
uint32 src_first_step_index = intersection.FirstStepIndex(host_id);
EXPECT_EQ(src_first_step_index, 0);
}
}
TEST(StepIntersectionTest, WithMaxSteps) {
uint32 num_hosts = 4;
uint64 shift_ps = 0;
uint32 max_steps = 3;
PerHostStepDb perhost_stepdb = CreateTestSteps(num_hosts, shift_ps);
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), kNumStepsPerHost - max_steps);
EXPECT_EQ(intersection.NumSteps(), max_steps);
}
TEST(StepIntersectionTest, NoStep) {
uint32 num_hosts = 4;
uint32 max_steps = 100;
PerHostStepDb perhost_stepdb = CreateNoStep(num_hosts);
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.NumSteps(), 0);
EXPECT_FALSE(intersection.EmptyIntersect());
}
TEST(StepIntersectionTest, EmptyIntersection) {
uint32 max_steps = 100;
PerHostStepDb perhost_stepdb = CreateEmptyIntersectTestSteps();
StepIntersection intersection =
StepIntersection(max_steps, Convert(perhost_stepdb));
EXPECT_EQ(intersection.StepsDropped(), 0);
EXPECT_EQ(intersection.NumSteps(), 0);
EXPECT_TRUE(intersection.EmptyIntersect());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/step_intersection.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/step_intersection_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e652fd5d-582c-4120-9c12-4a2f60eea061 | cpp | tensorflow/tensorflow | op_metrics_db_utils | tensorflow/core/profiler/utils/op_metrics_db_utils.cc | tensorflow/core/profiler/utils/op_metrics_db_utils_test.cc | #include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
const absl::string_view kIdle = "IDLE";
const uint32_t kSparseCoreIndexStart = 1000000;
namespace {
constexpr uint64_t kRootSymbolId = 0;
using tsl::profiler::StatType;
using tsl::profiler::XEventMetadataVisitor;
using tsl::profiler::XStatVisitor;
class DeviceTfOpMetricsDbBuilder : public OpMetricsDbBuilder {
public:
explicit DeviceTfOpMetricsDbBuilder(OpMetricsDb* db)
: OpMetricsDbBuilder(db) {}
void UpdateTfOpMetricsWithDeviceOpMetrics(absl::string_view tf_op_name,
absl::string_view tf_op_type,
const OpMetrics& device_op_metrics,
uint64_t fingerprint) {
OpMetrics* tf_op_metrics = OpMetricsDbBuilder::LookupOrInsertNewOpMetrics(
0, tf_op_name, fingerprint);
if (tf_op_metrics->category().empty()) {
tf_op_metrics->set_category(tf_op_type == tsl::profiler::kUnknownOp
? "Unknown"
: std::string(tf_op_type));
}
tf_op_metrics->set_is_eager(device_op_metrics.is_eager());
tf_op_metrics->set_occurrences(std::max(tf_op_metrics->occurrences(),
device_op_metrics.occurrences()));
tf_op_metrics->set_time_ps(tf_op_metrics->time_ps() +
device_op_metrics.time_ps());
tf_op_metrics->set_self_time_ps(tf_op_metrics->self_time_ps() +
device_op_metrics.self_time_ps());
tf_op_metrics->set_flops(tf_op_metrics->flops() +
device_op_metrics.flops());
tf_op_metrics->set_bytes_accessed(tf_op_metrics->bytes_accessed() +
device_op_metrics.bytes_accessed());
}
};
struct OpKey {
std::optional<uint64_t> program_id;
std::optional<uint64_t> symbol_id;
};
OpKey GetOpKeyFromHloEventMetadata(
const XEventMetadataVisitor& hlo_event_metadata) {
OpKey op_key;
hlo_event_metadata.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type().has_value()) {
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kProgramId:
op_key.program_id = stat.IntOrUintValue();
break;
case StatType::kSymbolId:
op_key.symbol_id = stat.IntOrUintValue();
break;
default:
break;
}
}
});
return op_key;
}
void SetOpMetadataFromHloEventMetadata(
const XEventMetadataVisitor& hlo_event_metadata, OpMetrics* op_metrics) {
if (hlo_event_metadata.HasDisplayName()) {
op_metrics->set_name(std::string(hlo_event_metadata.DisplayName()));
op_metrics->set_long_name(std::string(hlo_event_metadata.Name()));
} else {
op_metrics->set_name(std::string(hlo_event_metadata.Name()));
}
hlo_event_metadata.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type().has_value()) {
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kProgramId:
op_metrics->set_hlo_module_id(stat.IntOrUintValue());
break;
case StatType::kHloCategory:
op_metrics->set_category(std::string(stat.StrOrRefValue()));
break;
case StatType::kTfOp:
op_metrics->set_provenance(std::string(stat.StrOrRefValue()));
break;
case StatType::kFlops:
op_metrics->set_flops(stat.IntOrUintValue());
break;
case StatType::kBytesAccessed:
op_metrics->set_bytes_accessed(stat.IntOrUintValue());
break;
case StatType::kMemoryAccessBreakdown: {
tensorflow::profiler::MemoryAccessBreakdown breakdown;
const auto& value = stat.BytesValue();
if (breakdown.ParseFromArray(value.data(), value.size())) {
*op_metrics->mutable_memory_accessed_breakdown() =
breakdown.memory_accessed();
}
break;
}
case StatType::kDeduplicatedName:
op_metrics->set_deduplicated_name(std::string(stat.StrOrRefValue()));
break;
default:
break;
}
}
});
hlo_event_metadata.ForEachChild(
[&](const XEventMetadataVisitor& child_hlo_event_metadata) {
OpMetrics* child = op_metrics->mutable_children()->add_metrics_db();
child->set_occurrences(1);
SetOpMetadataFromHloEventMetadata(child_hlo_event_metadata, child);
});
}
void SetOpMetricsFromHloEvent(const tsl::profiler::XEventVisitor& hlo_event,
OpMetrics* op_metrics) {
uint64_t duration_ps = hlo_event.DurationPs();
uint64_t min_duration_ps = duration_ps;
uint64_t self_duration_ps = duration_ps;
uint64_t dma_stall_ps = 0;
hlo_event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type()) return;
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kMinDurationPs:
min_duration_ps = stat.IntValue();
break;
case StatType::kSelfDurationPs:
self_duration_ps = stat.IntValue();
break;
case StatType::kDmaStallDurationPs:
dma_stall_ps = stat.IntValue();
break;
default:
break;
}
});
if (op_metrics->occurrences() == 0) {
SetOpMetadataFromHloEventMetadata(hlo_event.Metadata(), op_metrics);
op_metrics->set_occurrences(hlo_event.NumOccurrences());
op_metrics->set_time_ps(duration_ps);
op_metrics->set_min_time_ps(min_duration_ps);
op_metrics->set_self_time_ps(self_duration_ps);
op_metrics->set_dma_stall_ps(dma_stall_ps);
} else {
op_metrics->set_occurrences(op_metrics->occurrences() +
hlo_event.NumOccurrences());
op_metrics->set_time_ps(op_metrics->time_ps() + duration_ps);
op_metrics->set_min_time_ps(
std::min<uint64_t>(op_metrics->min_time_ps(), min_duration_ps));
op_metrics->set_self_time_ps(op_metrics->self_time_ps() + self_duration_ps);
op_metrics->set_dma_stall_ps(op_metrics->dma_stall_ps() + dma_stall_ps);
}
}
void AdjustFlopsAndBytesAccessed(OpMetrics& op_metrics) {
op_metrics.set_flops(op_metrics.flops() * op_metrics.occurrences());
op_metrics.set_bytes_accessed(op_metrics.bytes_accessed() *
op_metrics.occurrences());
for (auto& memory_access : *op_metrics.mutable_memory_accessed_breakdown()) {
memory_access.set_bytes_accessed(memory_access.bytes_accessed() *
op_metrics.occurrences());
}
}
}
OpMetricsDbBuilder::OpMetricsDbBuilder(OpMetricsDb* db) : db_(db) {
DCHECK_NE(db_, nullptr);
DCHECK_EQ(db_->metrics_db_size(), 0);
}
OpMetrics* OpMetricsDbBuilder::LookupOrInsertNewOpMetrics(
uint64 hlo_module_id, absl::string_view name, uint64_t fingerprint) {
OpMetrics*& op_metrics = op_metrics_map_[hlo_module_id][name];
if (op_metrics == nullptr) {
op_metrics = db_->add_metrics_db();
op_metrics->set_hlo_module_id(hlo_module_id);
op_metrics->set_fingerprint(fingerprint);
op_metrics->set_name(name.data(), name.size());
}
return op_metrics;
}
void XEventsOpMetricsDbBuilder::AddOpMetric(
const tsl::profiler::XEventVisitor& event) {
OpKey key = GetOpKeyFromHloEventMetadata(event.Metadata());
std::optional<XStatVisitor> stat = event.GetStat(StatType::kStepIdleTimePs);
if (stat.has_value()) {
uint64_t idle_time_ps = stat->IntOrUintValue();
OpMetrics op_metrics;
op_metrics.set_self_time_ps(event.DurationPs() - idle_time_ps);
op_metrics.set_name("sparse_core_busy_ops");
op_metrics.set_category("sparse_core_busy_ops");
constexpr uint64_t kMaxProgramId = std::numeric_limits<uint64_t>::max();
constexpr uint64_t kMaxSymbolId = std::numeric_limits<uint64_t>::max();
flat_op_metric_[kMaxProgramId][kMaxSymbolId] = op_metrics;
SetOpMetricsFromHloEvent(event, &op_metrics);
}
if (!key.program_id.has_value() || !key.symbol_id.has_value()) return;
OpMetricBySymbol& op_metric_by_symbol =
flat_op_metric_[key.program_id.value()];
if (key.symbol_id != kRootSymbolId) {
OpMetrics& op_metrics = op_metric_by_symbol[key.symbol_id.value()];
SetOpMetricsFromHloEvent(event, &op_metrics);
}
}
OpMetricsDb XEventsOpMetricsDbBuilder::Finalize(uint64_t total_time_ps) {
OpMetricsDb db = Finalize();
SetTotalTimePs(db, total_time_ps);
AddIdleOp(db);
return db;
}
OpMetricsDb XEventsOpMetricsDbBuilder::Finalize() {
OpMetricsDb db;
uint64_t total_op_time_ps = 0;
for (auto& [program_id, op_metric_by_symbol] : flat_op_metric_) {
for (auto& [symbol_id, op_metrics] : op_metric_by_symbol) {
AdjustFlopsAndBytesAccessed(op_metrics);
total_op_time_ps += op_metrics.self_time_ps();
db.add_metrics_db()->Swap(&op_metrics);
}
}
db.set_total_op_time_ps(total_op_time_ps);
return db;
}
double IdleTimeRatio(const OpMetricsDb& db) {
return 1.0 -
tsl::profiler::SafeDivide(db.total_op_time_ps(), db.total_time_ps());
}
uint64 IdleTimePs(const OpMetricsDb& db) {
DCHECK_GE(db.total_time_ps(), db.total_op_time_ps());
return db.total_time_ps() - db.total_op_time_ps();
}
void SetIdleOp(uint64_t idle_time_ps, OpMetrics& metrics) {
metrics.set_name(std::string(kIdle));
metrics.set_category(std::string(kIdle));
metrics.set_occurrences(0);
metrics.set_time_ps(idle_time_ps);
metrics.set_self_time_ps(idle_time_ps);
}
void AddIdleOp(OpMetricsDb& db) {
uint64 idle_time_ps = IdleTimePs(db);
SetIdleOp(idle_time_ps, *db.add_metrics_db());
}
std::optional<double> HostInfeedEnqueueRatio(const OpMetricsDb& db) {
if (db.total_host_infeed_enq_start_timestamp_ps_diff() > 0) {
return tsl::profiler::SafeDivide(
db.total_host_infeed_enq_duration_ps(),
db.total_host_infeed_enq_start_timestamp_ps_diff());
}
return std::nullopt;
}
OpMetricsDb CreateTfMetricsDbFromDeviceOpMetricsDb(
const OpMetricsDb& device_op_metrics_db, bool with_idle) {
OpMetricsDb tf_op_metrics_db;
DeviceTfOpMetricsDbBuilder builder(&tf_op_metrics_db);
for (const auto& device_op_metrics : device_op_metrics_db.metrics_db()) {
if (IsIdleOp(device_op_metrics)) {
if (with_idle) {
builder.UpdateTfOpMetricsWithDeviceOpMetrics(
kIdle, kIdle, device_op_metrics, device_op_metrics.fingerprint());
}
} else if (device_op_metrics.provenance().empty()) {
builder.UpdateTfOpMetricsWithDeviceOpMetrics(
device_op_metrics.name(), tsl::profiler::kUnknownOp,
device_op_metrics, device_op_metrics.fingerprint());
} else {
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(device_op_metrics.provenance());
builder.UpdateTfOpMetricsWithDeviceOpMetrics(
tf_op.name, tf_op.type, device_op_metrics,
device_op_metrics.fingerprint());
}
}
tf_op_metrics_db.set_total_op_time_ps(
device_op_metrics_db.total_op_time_ps());
tf_op_metrics_db.set_total_time_ps(
with_idle ? device_op_metrics_db.total_time_ps()
: device_op_metrics_db.total_op_time_ps());
return tf_op_metrics_db;
}
}
} | #include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr double kMaxError = 1E-10;
TEST(OpMetricsDbTest, IdleTimeRatio) {
OpMetricsDb metrics_db_0;
metrics_db_0.set_total_time_ps(100000000);
metrics_db_0.set_total_op_time_ps(60000000);
EXPECT_NEAR(0.4, IdleTimeRatio(metrics_db_0), kMaxError);
OpMetricsDb metrics_db_1;
metrics_db_1.set_total_time_ps(200000000);
metrics_db_1.set_total_op_time_ps(150000000);
EXPECT_NEAR(0.25, IdleTimeRatio(metrics_db_1), kMaxError);
OpMetricsDb metrics_db_2;
metrics_db_1.set_total_time_ps(0);
metrics_db_1.set_total_op_time_ps(0);
EXPECT_NEAR(1.0, IdleTimeRatio(metrics_db_2), kMaxError);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/op_metrics_db_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/op_metrics_db_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da8d2664-ab11-4fc3-b0f0-3d232f73b516 | cpp | tensorflow/tensorflow | op_stats_to_pod_stats | tensorflow/core/profiler/convert/op_stats_to_pod_stats.cc | tensorflow/core/profiler/convert/op_stats_to_pod_stats_test.cc | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsRecord CreatePodStatsRecord(absl::string_view host_name,
const StepInfoResult& step_info) {
PodStatsRecord record;
GenericStepBreakdown generic;
bool success = step_info.step_breakdown().UnpackTo(&generic);
DCHECK(success);
record.set_host_name(string(host_name));
record.set_step_num(step_info.step_num());
record.set_total_duration_us(
tsl::profiler::PicoToMicro(step_info.duration_ps()));
auto& step_breakdown_map = *record.mutable_step_breakdown_us();
std::vector<std::pair<uint64, absl::string_view>> metrics;
auto add_event = [&](GenericEventType type,
std::initializer_list<EventType> event_list) {
uint64 ps = 0;
for (const auto& event_type : event_list) {
ps += gtl::FindWithDefault(generic.type_ps(), event_type, 0);
}
step_breakdown_map[type] = tsl::profiler::PicoToMicro(ps);
metrics.emplace_back(ps, GetGenericEventTypeStr(type));
};
add_event(kDeviceCompute, {DEVICE_COMPUTE_32, DEVICE_COMPUTE_16});
add_event(kDeviceToDevice, {DEVICE_TO_DEVICE, DEVICE_WAIT_DEVICE});
add_event(kDeviceCollectives, {DEVICE_COLLECTIVES});
add_event(kHostCompute, {HOST_COMPUTE});
add_event(kHostPrepare, {HOST_PREPARE});
add_event(kInput, {HOST_WAIT_INPUT, HOST_TO_DEVICE, DEVICE_WAIT_HOST});
add_event(kOutput, {DEVICE_TO_HOST});
add_event(kCompile, {HOST_COMPILE});
add_event(kAllOthers, {UNKNOWN_TIME});
std::sort(metrics.begin(), metrics.end());
record.set_bottleneck(metrics.back().second.data(),
metrics.back().second.size());
return record;
}
}
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats) {
PodStatsDatabase pod_stats_db;
const auto& core_id_map = op_stats.core_id_to_details();
for (int i = GenericEventType::kFirstGenericEventType;
i <= GenericEventType::kLastGenericEventType; i++) {
auto& event = *pod_stats_db.add_step_breakdown_events();
event.set_id(i);
absl::string_view type_str =
GetGenericEventTypeStr(static_cast<GenericEventType>(i));
event.set_name(type_str.data(), type_str.size());
}
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
for (const auto& entry : step_sequence.step_info_per_core()) {
if (!core_id_map.contains(entry.first)) {
LOG(WARNING) << "core_id_map does not contain " << entry.first;
continue;
}
const CoreDetails& details = core_id_map.at(entry.first);
*pod_stats_db.add_pod_stats_record() =
CreatePodStatsRecord(details.hostname(), entry.second);
}
}
PopulateStepDiagnostics(op_stats, pod_stats_db.mutable_diagnostics());
return pod_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodStats, GpuPodStats) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.pod_stats_record_size());
const PodStatsRecord& record = pod_stats_db.pod_stats_record(0);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodStats, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_stats_db.diagnostics().warnings(0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
26f2d42c-ee60-46c1-bd75-a32f45db3a9b | cpp | tensorflow/tensorflow | hlo_proto_to_graph_view | tensorflow/core/profiler/convert/hlo_proto_to_graph_view.cc | tensorflow/core/profiler/convert/hlo_proto_to_graph_view_test.cc | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_opcode.h"
#ifdef PLATFORM_GOOGLE
#include "third_party/json/src/json.hpp"
#include "tensorflow/compiler/mlir/lite/experimental/google/tooling/google/direct_hlo_to_json_graph_convert.h"
#endif
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/profiler/utils/hlo_module_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_to_module.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::StatusOr;
using ::tensorflow::errors::InvalidArgument;
using ::xla::HloComputation;
using ::xla::HloInstruction;
using ::xla::HloModule;
using ::xla::HloPrintOptions;
using ::xla::HloProto;
using ::xla::HloRenderOptions;
using ::xla::RenderedGraphFormat;
constexpr char kCenterNodeKey[] = "centerNode";
void CleanUpHloModuleForGraphviz(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* inst : computation->instructions()) {
if (inst->opcode() == xla::HloOpcode::kInfeed) {
inst->set_infeed_config("");
} else if (inst->opcode() == xla::HloOpcode::kOutfeed) {
inst->set_outfeed_config("");
}
}
}
}
std::string GetLayerId(absl::string_view namespace_name) {
return absl::StrCat(namespace_name, "___group___");
}
#ifdef PLATFORM_GOOGLE
void AddCenterNodeMetadata(nlohmann::json& graph_json, std::string id,
absl::string_view name, absl::string_view opcode) {
nlohmann::json centerGroupNodeAttributes;
centerGroupNodeAttributes["name"] = name;
centerGroupNodeAttributes["id"] = id;
if (!opcode.empty()) {
centerGroupNodeAttributes["opcode"] = opcode;
}
graph_json[0]["subgraphs"][0]["groupNodeAttributes"][kCenterNodeKey] =
centerGroupNodeAttributes;
}
#endif
void AddGraphMetadata(std::string& graph_json_str,
const HloInstruction& instr) {
#ifdef PLATFORM_GOOGLE
nlohmann::json graph_json = nlohmann::json::parse(graph_json_str);
auto id =
instr.opcode() == xla::HloOpcode::kFusion
? GetLayerId(absl::StrCat(instr.parent()->name(), "/", instr.name()))
: absl::StrCat(instr.unique_id());
AddCenterNodeMetadata(graph_json, id, instr.name(),
HloOpcodeString(instr.opcode()));
graph_json_str = graph_json.dump();
#endif
}
void AddGraphMetadata(std::string& graph_json_str, const HloComputation& comp) {
#ifdef PLATFORM_GOOGLE
nlohmann::json graph_json = nlohmann::json::parse(graph_json_str);
AddCenterNodeMetadata(graph_json, GetLayerId(comp.name()), comp.name(), "");
graph_json_str = graph_json.dump();
#endif
}
absl::StatusOr<std::string> PlotMe(std::unique_ptr<HloModule> module,
const std::string& node_name,
int graph_width) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
std::string graph_json_str;
#ifdef PLATFORM_GOOGLE
if (comp) {
graph_handle = tooling::visualization_client::HloGraphAdapter(*comp);
} else {
graph_handle =
tooling::visualization_client::HloGraphAdapter(*instr, graph_width);
}
#endif
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
graph_json_str = graph_handle.value();
if (comp) {
AddGraphMetadata(graph_json_str, *comp);
} else {
AddGraphMetadata(graph_json_str, *instr);
}
return graph_json_str;
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
absl::StatusOr<std::string> Plot(std::unique_ptr<HloModule> module,
const std::string& node_name, int graph_width,
const HloRenderOptions& render_options,
const RenderedGraphFormat& format) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
CleanUpHloModuleForGraphviz(module.get());
if (comp) {
graph_handle =
RenderGraphView(*comp, "", comp->parent()->config().debug_options(),
format, render_options);
} else {
graph_handle = RenderGraphNeighborhoodAround(*instr, graph_width, format,
render_options);
}
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
static constexpr char kGraphTypeName[] = "graph";
static constexpr char kShortTxtTypeName[] = "short_txt";
static constexpr char kLongTxtTypeName[] = "long_txt";
static constexpr char kDefaultFormatString[] = "url";
static constexpr int kDefaultWidth = 3;
static constexpr int kDefaultShowMetadata = 0;
static constexpr int kDefaultMergeFusion = 0;
}
absl::StatusOr<std::string> GetNodeStyles() {
std::vector<xla::HloOpcode> async_op_codes = {xla::HloOpcode::kAsyncStart,
xla::HloOpcode::kAsyncUpdate,
xla::HloOpcode::kAsyncDone};
std::vector<xla::HloOpcode> brown_op_codes = {
xla::HloOpcode::kAllGather,
xla::HloOpcode::kAllGatherStart,
xla::HloOpcode::kAllGatherDone,
xla::HloOpcode::kAllReduce,
xla::HloOpcode::kReduceScatter,
xla::HloOpcode::kAllReduceStart,
xla::HloOpcode::kAllReduceDone,
xla::HloOpcode::kAllToAll,
xla::HloOpcode::kCollectiveBroadcast,
xla::HloOpcode::kCollectivePermute,
xla::HloOpcode::kCollectivePermuteStart,
xla::HloOpcode::kCollectivePermuteDone,
xla::HloOpcode::kInfeed,
xla::HloOpcode::kOutfeed,
xla::HloOpcode::kPartitionId,
xla::HloOpcode::kRecv,
xla::HloOpcode::kRecvDone,
xla::HloOpcode::kSend,
xla::HloOpcode::kSendDone,
xla::HloOpcode::kReplicaId};
std::vector<xla::HloOpcode> dark_blue_op_codes = {
xla::HloOpcode::kConvolution, xla::HloOpcode::kDot, xla::HloOpcode::kFft,
xla::HloOpcode::kTriangularSolve, xla::HloOpcode::kCholesky};
std::vector<xla::HloOpcode> dark_green_op_codes = {
xla::HloOpcode::kCall, xla::HloOpcode::kConditional,
xla::HloOpcode::kCustomCall, xla::HloOpcode::kWhile};
std::vector<xla::HloOpcode> gray_op_codes = {
xla::HloOpcode::kDomain, xla::HloOpcode::kFusion, xla::HloOpcode::kMap,
xla::HloOpcode::kGetDimensionSize, xla::HloOpcode::kSetDimensionSize};
std::vector<xla::HloOpcode> green_op_codes = {
xla::HloOpcode::kConcatenate, xla::HloOpcode::kDynamicSlice,
xla::HloOpcode::kReshape, xla::HloOpcode::kDynamicReshape,
xla::HloOpcode::kReverse, xla::HloOpcode::kTranspose,
xla::HloOpcode::kCopy, xla::HloOpcode::kCopyStart,
xla::HloOpcode::kCopyDone};
std::vector<xla::HloOpcode> orange_op_codes = {xla::HloOpcode::kParameter};
std::vector<xla::HloOpcode> purple_op_codes = {
xla::HloOpcode::kBatchNormGrad, xla::HloOpcode::kBatchNormInference,
xla::HloOpcode::kBatchNormTraining, xla::HloOpcode::kReduce,
xla::HloOpcode::kReduceWindow, xla::HloOpcode::kScatter,
xla::HloOpcode::kSelectAndScatter, xla::HloOpcode::kGather};
std::vector<xla::HloOpcode> yellow_op_codes = {
xla::HloOpcode::kBroadcast, xla::HloOpcode::kDynamicUpdateSlice};
auto OpCodesToNames =
[&](std::vector<xla::HloOpcode> op_codes) -> std::string {
std::string op_names = "";
for (const auto& op_code : op_codes) {
if (!op_names.empty()) {
op_names += ",";
}
op_names += std::string(xla::HloOpcodeString(op_code));
}
return op_names;
};
return absl::StrReplaceAll(
R"json({
"kBlue": "$asyncOpNames",
"kBrown": "$brownOpNames",
"kDarkBlue": "$darkBlueOpNames",
"kDarkGreen": "$darkGreenOpNames",
"kGray": "$grayOpNames",
"kGreen": "$greenOpNames",
"kOrange": "$orangeOpNames",
"kPurple": "$purpleOpNames",
"kYellow": "$yellowOpNames"
})json",
{
{"$asyncOpNames", OpCodesToNames(async_op_codes)},
{"$brownOpNames", OpCodesToNames(brown_op_codes)},
{"$darkBlueOpNames", OpCodesToNames(dark_blue_op_codes)},
{"$darkGreenOpNames", OpCodesToNames(dark_green_op_codes)},
{"$grayOpNames", OpCodesToNames(gray_op_codes)},
{"$greenOpNames", OpCodesToNames(green_op_codes)},
{"$orangeOpNames", OpCodesToNames(orange_op_codes)},
{"$purpleOpNames", OpCodesToNames(purple_op_codes)},
{"$yellowOpNames", OpCodesToNames(yellow_op_codes)},
});
}
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options) {
GraphViewerParams params;
std::optional<std::string> type = GetParam<std::string>(options, "type");
if (!type.has_value()) {
return errors::InvalidArgument("Graph viewer must provide a type option.");
}
if (type == kGraphTypeName) {
params.type = type.value();
if (std::optional<std::string> node_name =
GetParam<std::string>(options, "node_name")) {
params.node_name = node_name.value();
}
params.graph_width =
GetParamWithDefault<int>(options, "graph_width", kDefaultWidth);
params.render_options.show_backend_config = GetParamWithDefault<int>(
options, "show_metadata", kDefaultShowMetadata);
params.render_options.show_fusion_subcomputations =
!GetParamWithDefault<int>(options, "merge_fusion", kDefaultMergeFusion);
params.format = GetRenderFormat(GetParamWithDefault<std::string>(
options, "format", kDefaultFormatString));
return params;
}
if (type == kShortTxtTypeName || type == kLongTxtTypeName) {
params.type = type.value();
params.verbose = (type == kLongTxtTypeName);
params.show_metadata =
GetParamWithDefault(options, "show_metadata", kDefaultShowMetadata);
return params;
}
return errors::InvalidArgument("Unknown graph viewer type option: ",
type.value());
}
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string) {
if (format_string == "html") {
return xla::RenderedGraphFormat::kHtml;
} else if (format_string == "dot") {
return xla::RenderedGraphFormat::kDot;
} else if (format_string == "url") {
return xla::RenderedGraphFormat::kUrl;
} else {
LOG(ERROR) << "Invalid graph format argument: " << format_string
<< ", fallback to default url";
return xla::RenderedGraphFormat::kUrl;
}
}
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width,
const HloRenderOptions& render_options, const RenderedGraphFormat& format) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return Plot(std::move(hlo_module), node_name, graph_width, render_options,
format);
}
absl::StatusOr<std::string> ConvertHloProtoToMeGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return PlotMe(std::move(hlo_module), node_name, graph_width);
}
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const HloProto& hlo_proto, bool verbose, bool metadata) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
HloPrintOptions options;
if (!verbose) {
options = HloPrintOptions::ShortParsable();
}
options.set_print_large_constants(verbose);
options.set_print_metadata(metadata);
return hlo_module->ToString(options);
}
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer =
nullptr;
absl::Status CheckPrecondition(xla::RenderedGraphFormat format) {
if (format == xla::RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return absl::FailedPreconditionError(
"Can't render as URL; no URL renderer was registered.");
}
return absl::OkStatus();
}
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot =
xla::RenderGraph(computation, label, debug_options,
RenderedGraphFormat::kDot, hlo_render_options);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format, xla::HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const xla::HloInstruction*>& boundary) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot = xla::RenderNeighborhoodAround(
node, radius, RenderedGraphFormat::kDot, hlo_render_options, boundary);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format) {
switch (format) {
case xla::RenderedGraphFormat::kUrl:
if (url_renderer == nullptr) {
return absl::InternalError("url_renderer is null");
}
return (*url_renderer)(dot);
case xla::RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot);
case xla::RenderedGraphFormat::kDot:
return std::string(dot);
}
}
std::string WrapDotInHtml(std::string dot) {
return absl::StrReplaceAll(R"html(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
body {
height: 100vh;
margin: 0;
}
#graph-container {height:95vh;width:100%;padding:10px;display:block;}
#graph-container svg { height: 100% !important; width: 100% !important;}
.node, .cluster {cursor:pointer;}
.cluster:hover, .node:hover {outline: solid 3px black;}
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
<div id="graph-container"></div>
<script>
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const data = `$DOT`;
const results = cssregex.exec(data);
let dot_data = data;
let css_data = '';
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_data = data.replace(cssregex, '');
}
var render_start = performance.now()
function add_controls(svg) {
var htmlblob = new Blob([document.documentElement.innerHTML],
{type: 'text/html'});
var savehtml = document.createElement('a');
savehtml.setAttribute('href', URL.createObjectURL(htmlblob));
savehtml.setAttribute('download', 'graph.html');
savehtml.innerHTML = " [Save HTML+SVG] ";
document.body.append(savehtml);
var svgblob = new Blob([svg.outerHTML], {type: 'image/svg'});
var savesvg = document.createElement('a');
savesvg.setAttribute('href', URL.createObjectURL(svgblob));
savesvg.setAttribute('download', 'graph.svg');
savesvg.innerHTML = " [Save SVG] ";
document.body.append(savesvg);
var dotblob = new Blob([data], {type: 'text/dot'});
var savedot = document.createElement('a');
savedot.setAttribute('href', URL.createObjectURL(dotblob));
savedot.setAttribute('download', 'graph.dot');
savedot.innerHTML = " [Save DOT] ";
document.body.append(savedot);
var render_end = performance.now();
var render_note = document.createElement('div')
render_note.innerHTML = 'Rendering took '
+ (render_end - render_start).toFixed(2) + "ms."
document.body.append(render_note);
}
const render_callback = svg => {
const container = document.getElementById('graph-container')
container.innerHTML = `${svg}<style>${css_data}</style>`;
const panZoom = svgPanZoom(container.children[0], {
zoomEnabled: true,
controlIconsEnabled: true,
maxZoom: 200,
minZoom: 0,
});
add_controls(svg);
};
hpccWasm.graphviz.layout(dot_data, "svg", "dot").then(render_callback);
</script>
</body>
</html>
)html",
{
{"$DOT", dot},
});
}
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <string>
#include <variant>
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(GraphViewerParamsTest, GraphType) {
ToolOptions options1;
options1["type"] = "graph";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "graph");
EXPECT_EQ(params1.node_name, "");
EXPECT_EQ(params1.graph_width, 3);
EXPECT_EQ(params1.render_options.show_backend_config, false);
EXPECT_EQ(params1.render_options.show_fusion_subcomputations, true);
EXPECT_EQ(params1.format, xla::RenderedGraphFormat::kUrl);
ToolOptions options2;
options2["type"] = "graph";
options2["node_name"] = "fusion.111";
options2["graph_width"] = 10;
options2["show_metadata"] = 1;
options2["merge_fusion"] = 1;
options2["format"] = "html";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "graph");
EXPECT_EQ(params2.node_name, "fusion.111");
EXPECT_EQ(params2.graph_width, 10);
EXPECT_EQ(params2.render_options.show_backend_config, true);
EXPECT_EQ(params2.render_options.show_fusion_subcomputations, false);
EXPECT_EQ(params2.format, xla::RenderedGraphFormat::kHtml);
}
TEST(GraphViewerParamsTest, ShortTxtType) {
ToolOptions options1;
options1["type"] = "short_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "short_txt");
EXPECT_EQ(params1.verbose, false);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "short_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "short_txt");
EXPECT_EQ(params2.verbose, false);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, LongTxtType) {
ToolOptions options1;
options1["type"] = "long_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "long_txt");
EXPECT_EQ(params1.verbose, true);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "long_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "long_txt");
EXPECT_EQ(params2.verbose, true);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, OtherTypes) {
ToolOptions options1;
EXPECT_THAT(ParseGraphViewerParams(options1),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Graph viewer must provide a type option")));
ToolOptions options2;
options2["type"] = "abcd";
EXPECT_THAT(ParseGraphViewerParams(options2),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Unknown graph viewer type option: abcd")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_graph_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_graph_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4318bf3e-f2f8-4868-a881-8c5e2a50ffeb | cpp | tensorflow/tensorflow | repository | tensorflow/core/profiler/convert/repository.cc | tensorflow/core/profiler/convert/repository_test.cc | #include "tensorflow/core/profiler/convert/repository.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
std::string GetHostnameByPath(absl::string_view xspace_path) {
std::string_view file_name = tensorflow::io::Basename(xspace_path);
absl::ConsumeSuffix(&file_name, ".xplane.pb");
return std::string(file_name);
}
}
absl::StatusOr<SessionSnapshot> SessionSnapshot::Create(
std::vector<std::string> xspace_paths,
std::optional<std::vector<std::unique_ptr<XSpace>>> xspaces) {
if (xspace_paths.empty()) {
return errors::InvalidArgument("Can not find XSpace path.");
}
if (xspaces.has_value()) {
if (xspaces->size() != xspace_paths.size()) {
return errors::InvalidArgument(
"The size of the XSpace paths: ", xspace_paths.size(),
" is not equal ",
"to the size of the XSpace proto: ", xspaces->size());
}
for (size_t i = 0; i < xspace_paths.size(); ++i) {
auto host_name = GetHostnameByPath(xspace_paths.at(i));
if (xspaces->at(i)->hostnames_size() > 0 && !host_name.empty()) {
if (!absl::StrContains(host_name, xspaces->at(i)->hostnames(0))) {
return errors::InvalidArgument(
"The hostname of xspace path and preloaded xpace don't match at "
"index: ",
i, ". \nThe host name of xpace path is ", host_name,
" but the host name of preloaded xpace is ",
xspaces->at(i)->hostnames(0), ".");
}
}
}
}
return SessionSnapshot(std::move(xspace_paths), std::move(xspaces));
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpace(
size_t index) const {
if (index >= xspace_paths_.size()) {
return errors::InvalidArgument("Can not get the ", index,
"th XSpace. The total number of XSpace is ",
xspace_paths_.size());
}
if (xspaces_.has_value()) {
if (xspaces_->at(index) == nullptr) {
return errors::Internal("");
}
return std::move(xspaces_->at(index));
}
auto xspace_from_file = std::make_unique<XSpace>();
TF_RETURN_IF_ERROR(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
xspace_paths_.at(index),
xspace_from_file.get()));
return xspace_from_file;
}
absl::StatusOr<std::unique_ptr<XSpace>> SessionSnapshot::GetXSpaceByName(
absl::string_view name) const {
if (auto it = hostname_map_.find(name); it != hostname_map_.end()) {
return GetXSpace(it->second);
}
return errors::InvalidArgument("Can not find the XSpace by name: ", name,
". The total number of XSpace is ",
xspace_paths_.size());
}
std::string SessionSnapshot::GetHostname(size_t index) const {
return GetHostnameByPath(xspace_paths_.at(index));
}
std::optional<std::string> SessionSnapshot::GetFilePath(
absl::string_view toolname, absl::string_view hostname) const {
if (!has_accessible_run_dir_) return std::nullopt;
std::string file_name = "";
if (toolname == "trace_viewer@")
file_name = absl::StrCat(hostname, ".", "SSTABLE");
if (!file_name.empty())
return tensorflow::io::JoinPath(session_run_dir_, file_name);
return std::nullopt;
}
absl::StatusOr<std::string> SessionSnapshot::GetHostDataFileName(
const StoredDataType data_type, const std::string host) const {
for (const auto& format : *kHostDataSuffixes) {
if (data_type == format.first) return absl::StrCat(host, format.second);
}
return absl::InternalError(&"Unknown StoredDataType: "[data_type]);
}
absl::StatusOr<std::optional<std::string>> SessionSnapshot::GetHostDataFilePath(
const StoredDataType data_type, const std::string host) const {
std::vector<std::string> results;
TF_RETURN_IF_ERROR(::tsl::Env::Default()->GetChildren(
std::string(GetSessionRunDir()), &results));
TF_ASSIGN_OR_RETURN(std::string filename,
GetHostDataFileName(data_type, host));
for (const std::string& path : results) {
if (absl::EndsWith(path, filename)) {
return ::tsl::profiler::ProfilerJoinPath(GetSessionRunDir(), filename);
}
}
return std::nullopt;
}
absl::StatusOr<std::pair<bool, std::string>> SessionSnapshot::HasCacheFile(
const StoredDataType data_type) const {
std::optional<std::string> filepath;
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kNoHostIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, std::string());
}
TF_ASSIGN_OR_RETURN(filepath,
GetHostDataFilePath(data_type, kAllHostsIdentifier));
if (filepath) {
return std::pair<bool, std::string>(true, filepath.value());
}
return std::pair<bool, std::string>(false, std::string());
}
}
} | #include "tensorflow/core/profiler/convert/repository.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/errors.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Eq;
TEST(Repository, GetHostName) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("hostname0"));
EXPECT_THAT(session_snapshot_or.value().GetHostname(1), Eq("hostname1"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetHostNameWithPeriods) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/127.0.0.1_6009.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
EXPECT_THAT(session_snapshot_or.value().GetHostname(0), Eq("127.0.0.1_6009"));
EXPECT_TRUE(session_snapshot_or.value().HasAccessibleRunDir());
}
TEST(Repository, GetSpaceByHostName) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname1.xplane.pb",
"log/plugins/profile/hostname0.xplane.pb"},
std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto xspace0_or = session_snapshot_or.value().GetXSpaceByName("hostname0");
TF_CHECK_OK(xspace0_or.status());
auto xspace1_or = session_snapshot_or.value().GetXSpaceByName("hostname1");
EXPECT_FALSE(session_snapshot_or.value().HasAccessibleRunDir());
TF_CHECK_OK(xspace1_or.status());
EXPECT_THAT(xspace0_or.value()->hostnames(0), Eq("hostname0"));
EXPECT_THAT(xspace1_or.value()->hostnames(0), Eq("hostname1"));
}
TEST(Repository, GetSSTableFile) {
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb"},
std::nullopt);
TF_CHECK_OK(session_snapshot_or.status());
auto sstable_path =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
auto not_found_path =
session_snapshot_or.value().GetFilePath("memory_viewer", "hostname0");
EXPECT_THAT(sstable_path, Eq("log/plugins/profile/hostname0.SSTABLE"));
EXPECT_THAT(not_found_path, Eq(std::nullopt));
}
TEST(Repository, GetSSTableFileWithXSpace) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or = SessionSnapshot::Create(
{"log/plugins/profile/hostname0.xplane.pb"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
auto file_path_init_by_xspace =
session_snapshot_or.value().GetFilePath("trace_viewer@", "hostname0");
EXPECT_THAT(file_path_init_by_xspace, Eq(std::nullopt));
}
TEST(Repository, MismatchedXSpaceAndPath) {
std::vector<std::unique_ptr<XSpace>> xspaces;
auto space1 = std::make_unique<XSpace>();
*(space1->add_hostnames()) = "hostname1";
xspaces.push_back(std::move(space1));
auto space0 = std::make_unique<XSpace>();
*(space0->add_hostnames()) = "hostname0";
xspaces.push_back(std::move(space0));
auto session_snapshot_or =
SessionSnapshot::Create({"log/plugins/profile/hostname0.xplane.pb",
"log/plugins/profile/hostname1.xplane.pb"},
std::move(xspaces));
auto error =
R"(The hostname of xspace path and preloaded xpace don't match at index: 0.
The host name of xpace path is hostname0 but the host name of preloaded xpace is hostname1.)";
EXPECT_THAT(session_snapshot_or.status(), Eq(errors::InvalidArgument(error)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/repository.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/repository_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c819c062-ecf7-46e5-a83c-b400d4ef4882 | cpp | tensorflow/tensorflow | xplane_to_memory_profile | tensorflow/core/profiler/convert/xplane_to_memory_profile.cc | tensorflow/core/profiler/convert/xplane_to_memory_profile_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr int64_t kInvalidStepId = -1;
using IndexMetaPair =
std::pair<int64_t , const MemoryActivityMetadata*>;
bool IsMemoryAllocation(int64_t event_type) {
return event_type == HostEventType::kMemoryAllocation;
}
bool IsMemoryDeallocation(int64_t event_type) {
return event_type == HostEventType::kMemoryDeallocation;
}
void UpdateProfileSummary(const MemoryAggregationStats& stats,
int64_t time_offset_ps,
MemoryProfileSummary* summary) {
summary->set_peak_bytes_usage_lifetime(stats.peak_bytes_in_use());
MemoryAggregationStats* peak_stats = summary->mutable_peak_stats();
if (stats.stack_reserved_bytes() + stats.heap_allocated_bytes() >=
peak_stats->peak_bytes_in_use()) {
*peak_stats = stats;
peak_stats->set_peak_bytes_in_use(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes());
summary->set_peak_stats_time_ps(time_offset_ps);
summary->set_memory_capacity(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes() +
stats.free_memory_bytes());
}
}
MemoryProfile GenerateMemoryProfile(const XPlane* host_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
MemoryProfile memory_profile;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t event_type =
event.Type().value_or(HostEventType::kUnknownHostEventType);
if (!(IsMemoryAllocation(event_type) ||
IsMemoryDeallocation(event_type))) {
return;
}
MemoryAggregationStats stats;
MemoryActivityMetadata metadata;
if (IsMemoryAllocation(event_type)) {
metadata.set_memory_activity(ALLOCATION);
} else if (IsMemoryDeallocation(event_type)) {
metadata.set_memory_activity(DEALLOCATION);
}
metadata.set_step_id(kInvalidStepId);
std::string memory_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kIndexOnHost:
case StatType::kDeviceOrdinal:
memory_id = absl::StrCat(stat.IntValue());
break;
case StatType::kAllocatorName:
memory_id = std::string(stat.StrOrRefValue());
break;
case StatType::kBytesReserved:
stats.set_stack_reserved_bytes(stat.IntValue());
break;
case StatType::kBytesAllocated:
stats.set_heap_allocated_bytes(stat.IntValue());
break;
case StatType::kBytesAvailable:
stats.set_free_memory_bytes(stat.IntValue());
break;
case StatType::kFragmentation:
stats.set_fragmentation(stat.DoubleValue());
break;
case StatType::kPeakBytesInUse:
stats.set_peak_bytes_in_use(stat.IntValue());
break;
case StatType::kRequestedBytes:
metadata.set_requested_bytes(stat.IntValue());
break;
case StatType::kAllocationBytes:
metadata.set_allocation_bytes(stat.IntValue());
break;
case StatType::kAddress:
metadata.set_address(stat.IntValue());
break;
case StatType::kTfOp:
metadata.set_tf_op_name(std::string(stat.StrOrRefValue()));
break;
case StatType::kGroupId:
metadata.set_step_id(stat.IntValue());
break;
case StatType::kRegionType:
metadata.set_region_type(std::string(stat.StrOrRefValue()));
break;
case StatType::kDataType:
metadata.set_data_type(tensorflow::DataTypeString(
static_cast<tensorflow::DataType>(stat.IntValue())));
break;
case StatType::kTensorShapes:
metadata.set_tensor_shape(std::string(stat.StrOrRefValue()));
break;
}
});
MemoryProfileSummary* summary =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.mutable_profile_summary();
UpdateProfileSummary(stats, event.OffsetPs(), summary);
MemoryProfileSnapshot* snapshot =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.add_memory_profile_snapshots();
snapshot->set_time_offset_ps(event.OffsetPs());
*snapshot->mutable_aggregation_stats() = std::move(stats);
*snapshot->mutable_activity_metadata() = std::move(metadata);
});
});
return memory_profile;
}
void UpdateStepId(PerAllocatorMemoryProfile* memory_profile) {
int64_t last_valid_step_id = -1;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
DCHECK(snapshot.has_activity_metadata());
if (snapshot.mutable_activity_metadata()->step_id() == kInvalidStepId) {
snapshot.mutable_activity_metadata()->set_step_id(last_valid_step_id + 1);
} else {
last_valid_step_id = snapshot.mutable_activity_metadata()->step_id();
}
}
}
void UpdateDeallocation(PerAllocatorMemoryProfile* memory_profile) {
absl::flat_hash_map<uint64 , const MemoryActivityMetadata*>
addr_metadata_map;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
uint64 address = snapshot.activity_metadata().address();
if (snapshot.activity_metadata().memory_activity() == DEALLOCATION) {
if (addr_metadata_map.contains(address)) {
const MemoryActivityMetadata* alloc_meta = addr_metadata_map[address];
snapshot.mutable_activity_metadata()->set_tf_op_name(
alloc_meta->tf_op_name());
snapshot.mutable_activity_metadata()->set_region_type(
alloc_meta->region_type());
snapshot.mutable_activity_metadata()->set_data_type(
alloc_meta->data_type());
snapshot.mutable_activity_metadata()->set_tensor_shape(
alloc_meta->tensor_shape());
addr_metadata_map.erase(address);
} else {
VLOG(2)
<< "Can't find matching memory allocation for this deallocation: "
<< snapshot.DebugString();
}
} else if (!addr_metadata_map.contains(address)) {
addr_metadata_map[address] = &snapshot.activity_metadata();
} else {
VLOG(2) << "There are two allocations recorded for the same address: "
<< address
<< ". The later allocation event is: " << snapshot.DebugString();
}
}
VLOG(2) << "Number of allocations that cannot find matching dealloctions: "
<< addr_metadata_map.size();
}
int64_t GetPeakMemoryStep(int64_t peak_bytes_profile,
const PerAllocatorMemoryProfile* memory_profile) {
int64_t peak_bytes_profile_step_id = 0;
for (const auto& snapshot : memory_profile->memory_profile_snapshots()) {
if (peak_bytes_profile ==
snapshot.aggregation_stats().heap_allocated_bytes() +
snapshot.aggregation_stats().stack_reserved_bytes()) {
DCHECK(snapshot.has_activity_metadata());
peak_bytes_profile_step_id = snapshot.activity_metadata().step_id();
}
}
return peak_bytes_profile_step_id;
}
struct MetadataComparator {
bool operator()(const IndexMetaPair& a, const IndexMetaPair& b) const {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
DCHECK_NE(a_meta, nullptr);
DCHECK_NE(b_meta, nullptr);
auto lhs =
std::make_tuple(-a_meta->allocation_bytes(), -a_meta->requested_bytes(),
a_meta->tf_op_name(), a_meta->region_type(),
a_meta->data_type(), a_meta->tensor_shape());
auto rhs =
std::make_tuple(-b_meta->allocation_bytes(), -b_meta->requested_bytes(),
b_meta->tf_op_name(), b_meta->region_type(),
b_meta->data_type(), b_meta->tensor_shape());
return lhs < rhs;
}
};
void InsertSpecialAllocations(int64_t unmapped_allocation_bytes,
int64_t step_id,
PerAllocatorMemoryProfile* memory_profile,
std::vector<IndexMetaPair>* active_allocs) {
int index = 0;
if (unmapped_allocation_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(unmapped_allocation_bytes);
special_allocation->set_allocation_bytes(unmapped_allocation_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("unused preallocated device memory");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("persist/dynamic");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
int64_t stack_bytes =
memory_profile->profile_summary().peak_stats().stack_reserved_bytes();
if (stack_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(stack_bytes);
special_allocation->set_allocation_bytes(stack_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("stack");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("stack");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
}
bool operator==(const IndexMetaPair& a, const IndexMetaPair& b) {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
return a_meta->allocation_bytes() == b_meta->allocation_bytes() &&
a_meta->requested_bytes() == b_meta->requested_bytes() &&
a_meta->tf_op_name() == b_meta->tf_op_name() &&
a_meta->region_type() == b_meta->region_type() &&
a_meta->data_type() == b_meta->data_type() &&
a_meta->tensor_shape() == b_meta->tensor_shape();
}
void ProcessActiveAllocations(int64_t peak_bytes_profile_step_id,
PerAllocatorMemoryProfile* memory_profile) {
int64_t unmapped_allocation_bytes =
memory_profile->profile_summary().peak_stats().heap_allocated_bytes();
int64_t unmapped_deallocation_bytes = 0;
absl::flat_hash_map<int64_t , IndexMetaPair> active_alloc_map;
for (int i = 0; i < memory_profile->memory_profile_snapshots_size(); i++) {
const auto& snapshot = memory_profile->memory_profile_snapshots().at(i);
DCHECK(snapshot.has_activity_metadata());
const MemoryActivityMetadata& metadata = snapshot.activity_metadata();
if (snapshot.time_offset_ps() >
memory_profile->profile_summary().peak_stats_time_ps())
break;
if (metadata.step_id() != peak_bytes_profile_step_id) continue;
if (metadata.memory_activity() == ALLOCATION) {
active_alloc_map[metadata.address()] = {i, &metadata};
unmapped_allocation_bytes -= metadata.allocation_bytes();
} else {
DCHECK_EQ(metadata.memory_activity(), DEALLOCATION);
if (active_alloc_map.contains(metadata.address())) {
active_alloc_map.erase(metadata.address());
} else {
unmapped_deallocation_bytes += metadata.allocation_bytes();
}
unmapped_allocation_bytes += metadata.allocation_bytes();
}
}
unmapped_allocation_bytes -= unmapped_deallocation_bytes;
VLOG(2) << "unmapped_allocation_bytes=" << unmapped_allocation_bytes
<< ", unmapped_deallocation_bytes=" << unmapped_deallocation_bytes;
std::vector<IndexMetaPair> active_allocs;
for (const auto& address_and_index_meta : active_alloc_map) {
active_allocs.push_back(address_and_index_meta.second);
}
InsertSpecialAllocations(unmapped_allocation_bytes,
peak_bytes_profile_step_id, memory_profile,
&active_allocs);
std::sort(active_allocs.begin(), active_allocs.end(), MetadataComparator());
for (int i = 0, end = active_allocs.size(); i < end; i++) {
ActiveAllocation* allocation = memory_profile->add_active_allocations();
allocation->set_snapshot_index(active_allocs[i].first);
if (active_allocs[i].first < 0) {
allocation->set_special_index(-active_allocs[i].first - 1);
} else {
allocation->set_special_index(-1);
}
allocation->set_num_occurrences(1);
const int last_alloc = active_allocs.size() - 1;
while (i < last_alloc && active_allocs[i] == active_allocs[i + 1]) {
allocation->set_num_occurrences(allocation->num_occurrences() + 1);
i++;
}
}
VLOG(2) << "Distinctive active allocation count="
<< memory_profile->active_allocations_size();
}
void SaveActiveAllocationSnapshots(
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots,
protobuf::RepeatedPtrField<ActiveAllocation>* active_allocations) {
std::vector<MemoryProfileSnapshot*> samples;
for (const auto& allocation : *active_allocations) {
auto orig_index = allocation.snapshot_index();
if (orig_index < 0) continue;
samples.push_back(&(*snapshots)[orig_index]);
}
int new_index = 0;
for (auto& allocation : *active_allocations) {
int64_t origin_index = allocation.snapshot_index();
if (origin_index < 0) continue;
allocation.set_snapshot_index(new_index);
new_index++;
}
protobuf::RepeatedPtrField<MemoryProfileSnapshot> new_snapshots;
new_snapshots.Reserve(samples.size());
for (const auto& sample : samples) {
*new_snapshots.Add() = std::move(*sample);
}
*snapshots = std::move(new_snapshots);
}
void SampleMemoryProfileTimeline(int64_t max_num_snapshots,
PerAllocatorMemoryProfile* memory_profile) {
const protobuf::RepeatedPtrField<MemoryProfileSnapshot>& original_snapshots =
memory_profile->memory_profile_snapshots();
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* timeline_snapshots =
memory_profile->mutable_sampled_timeline_snapshots();
int64_t snapshot_count = original_snapshots.size();
if (snapshot_count > max_num_snapshots) {
auto max_box_filter = [&](int filter_width, int count, int start) {
for (int i = 0; i < count; i++) {
const MemoryProfileSnapshot* max_snapshot =
&original_snapshots[start + filter_width * i];
int64_t max_bytes =
max_snapshot->aggregation_stats().heap_allocated_bytes() +
max_snapshot->aggregation_stats().stack_reserved_bytes();
for (int index = start + filter_width * i + 1;
index < start + filter_width * (i + 1); index++) {
int64_t bytes = original_snapshots[index]
.aggregation_stats()
.heap_allocated_bytes() +
original_snapshots[index]
.aggregation_stats()
.stack_reserved_bytes();
if (bytes > max_bytes) {
max_snapshot = &original_snapshots[index];
max_bytes = bytes;
}
}
*timeline_snapshots->Add() = *max_snapshot;
}
};
int width = snapshot_count / max_num_snapshots;
int count1 = max_num_snapshots * (width + 1) - snapshot_count;
int count2 = max_num_snapshots - count1;
max_box_filter(width, count1, 0);
max_box_filter(width + 1, count2, width * count1);
} else {
*timeline_snapshots = original_snapshots;
}
}
void ProcessMemoryProfileProto(int64_t max_num_snapshots,
MemoryProfile* memory_profile) {
memory_profile->set_num_hosts(1);
for (const auto& id_and_allocator_profile :
memory_profile->memory_profile_per_allocator()) {
if (!id_and_allocator_profile.second.memory_profile_snapshots().empty()) {
memory_profile->add_memory_ids(id_and_allocator_profile.first);
}
}
absl::c_sort(*memory_profile->mutable_memory_ids());
for (auto& id_and_allocator_profile :
*memory_profile->mutable_memory_profile_per_allocator()) {
PerAllocatorMemoryProfile* allocator_memory_profile =
&id_and_allocator_profile.second;
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots =
allocator_memory_profile->mutable_memory_profile_snapshots();
absl::c_sort(*snapshots, [](const MemoryProfileSnapshot& a,
const MemoryProfileSnapshot& b) {
return a.time_offset_ps() < b.time_offset_ps();
});
UpdateStepId(allocator_memory_profile);
UpdateDeallocation(allocator_memory_profile);
SampleMemoryProfileTimeline(max_num_snapshots, allocator_memory_profile);
int64_t peak_step_id =
GetPeakMemoryStep(allocator_memory_profile->profile_summary()
.peak_stats()
.peak_bytes_in_use(),
allocator_memory_profile);
ProcessActiveAllocations(peak_step_id, allocator_memory_profile);
SaveActiveAllocationSnapshots(
snapshots, allocator_memory_profile->mutable_active_allocations());
}
}
template <typename Proto>
Status ConvertProtoToJson(const Proto& proto_output, std::string* json_output) {
protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = protobuf::util::MessageToJsonString(proto_output, json_output,
json_options);
if (!status.ok()) {
auto error_msg = status.message();
return errors::Internal(
"Could not convert proto to JSON string: ",
absl::string_view(error_msg.data(), error_msg.length()));
}
return absl::OkStatus();
}
}
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots) {
MemoryProfile memory_profile = GenerateMemoryProfile(&host_plane);
ProcessMemoryProfileProto(max_num_snapshots, &memory_profile);
memory_profile.set_version(1);
return memory_profile;
}
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output) {
if (const XPlane* host_plane =
FindPlaneWithName(xspace, kHostThreadsPlaneName)) {
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
TF_RETURN_IF_ERROR(ConvertProtoToJson(memory_profile, json_output));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToMemoryProfile, OneAllocatorMultiActivitiesTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
40000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{3000}},
{StatType::kBytesAvailable, int64_t{5000}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{1}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "foo/bar"},
{StatType::kRegionType, "output"},
{StatType::kTensorShapes, "[3, 3, 512, 512]"}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryDeallocation",
50000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{2744}},
{StatType::kBytesAvailable, int64_t{5256}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{0}},
{StatType::kDataType, int64_t{0}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kRegionType, ""},
{StatType::kTensorShapes, ""}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
70000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{5000}},
{StatType::kBytesAvailable, int64_t{3000}},
{StatType::kPeakBytesInUse, int64_t{9500}},
{StatType::kRequestedBytes, int64_t{300}},
{StatType::kAllocationBytes, int64_t{300}},
{StatType::kAddress, int64_t{345678}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{9}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "mul_grad/Sum"},
{StatType::kRegionType, "temp"},
{StatType::kTensorShapes, "[1, 2]"}});
tsl::profiler::GroupTfEvents(&space);
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().size(), 1);
EXPECT_EQ(memory_profile.num_hosts(), 1);
EXPECT_EQ(memory_profile.memory_ids_size(), 1);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().begin()->first,
"GPU_0_bfc");
EXPECT_EQ(memory_profile.version(), 1);
const auto& allocator_memory_profile =
memory_profile.memory_profile_per_allocator().begin()->second;
EXPECT_EQ(
allocator_memory_profile.profile_summary().peak_bytes_usage_lifetime(),
9500);
EXPECT_EQ(allocator_memory_profile.profile_summary()
.peak_stats()
.peak_bytes_in_use(),
7000);
EXPECT_EQ(allocator_memory_profile.profile_summary().peak_stats_time_ps(),
70000);
EXPECT_EQ(allocator_memory_profile.sampled_timeline_snapshots_size(), 3);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots_size(), 1);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots()
.at(0)
.activity_metadata()
.tf_op_name(),
"mul_grad/Sum");
EXPECT_EQ(allocator_memory_profile.active_allocations_size(), 3);
EXPECT_EQ(
allocator_memory_profile.active_allocations().at(2).snapshot_index(), 0);
EXPECT_EQ(allocator_memory_profile.special_allocations_size(), 2);
EXPECT_EQ(allocator_memory_profile.special_allocations().at(1).tf_op_name(),
"stack");
EXPECT_EQ(
allocator_memory_profile.special_allocations().at(1).allocation_bytes(),
2000);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_memory_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_memory_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
038207ec-d0ae-40ec-acc6-a8a7e593e33f | cpp | tensorflow/tensorflow | xplane_to_kernel_stats_db | tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.cc | tensorflow/core/profiler/convert/xplane_to_kernel_stats_db_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include <functional>
#include <ostream>
#include <string>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/gpu_event_stats.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
void ConvertDeviceTraceXPlaneToKernelReports(
const XPlane& device_trace,
const std::function<void(const GpuEventStats&, KernelReport*)>&
on_kernel_fn,
KernelReportMap* reports) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) {
return;
}
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.DurationNs() == 0) return;
KernelReport kernel;
GpuEventStats stats(&event);
if (!stats.IsKernel()) return;
kernel.set_name(std::string(event.Name()));
kernel.set_is_kernel_using_tensor_core(
IsKernelUsingTensorCore(event.Name()));
kernel.set_total_duration_ns(event.DurationNs());
kernel.set_min_duration_ns(event.DurationNs());
kernel.set_max_duration_ns(event.DurationNs());
ParseKernelLaunchParams(stats.kernel_details, &kernel);
if (stats.IsTfOp()) {
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(stats.tf_op_fullname);
kernel.set_op_name(std::string(tf_op.name));
bool tensor_core_eligible =
IsEinsumTensorCoreEligible(stats.equation) ||
IsOpTensorCoreEligible(kernel.op_name());
if (!tensor_core_eligible && kernel.is_kernel_using_tensor_core()) {
VLOG(1) << "Detected new Op using TensorCores: " << kernel.op_name()
<< std::endl;
tensor_core_eligible = true;
}
kernel.set_is_op_tensor_core_eligible(tensor_core_eligible);
}
if (on_kernel_fn) {
on_kernel_fn(stats, &kernel);
}
KernelReportValue value;
value.total_duration_ns = event.DurationNs();
value.min_duration_ns = event.DurationNs();
value.max_duration_ns = event.DurationNs();
value.occurrences = 1;
InsertOrUpdateKernelReport(kernel, value, reports);
});
});
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXplaneToKernelStats, MultiKernels) {
XSpace space;
XPlane* device_trace = space.add_planes();
XPlaneBuilder device_trace_builder(device_trace);
device_trace_builder.GetOrCreateLine(0);
XLineBuilder line_builder = device_trace_builder.GetOrCreateLine(0);
CreateXEvent(&device_trace_builder, &line_builder, "kernel_name_shortest",
10000, 1000,
{{StatType::kTfOp, "mul_786"},
{StatType::kKernelDetails, R"MULTI(regs:16
static_shared:0
dynamic_shared:0
grid:1,1,1
block:1,1,1
occ_pct:50.0)MULTI"},
{StatType::kEquation, ""}});
CreateXEvent(&device_trace_builder, &line_builder, "kernel_name_middle",
20000, 2000,
{{StatType::kTfOp, "Conv2D"},
{StatType::kKernelDetails, R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:2,1,1
block:32,1,1
occ_pct=13.0)MULTI"},
{StatType::kEquation, ""}});
CreateXEvent(&device_trace_builder, &line_builder,
"volta_fp16_s884gemm_fp16_128x128_ldg8_f2f_tn",
30000, 3000,
{{StatType::kTfOp, "Einsum_80"},
{StatType::kKernelDetails, R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:3,1,1
block:64,1,1
occ_pct:25.0)MULTI"},
{StatType::kEquation, ""}});
KernelReportMap reports;
ConvertDeviceTraceXPlaneToKernelReports(*device_trace, {}, &reports);
KernelStatsDb kernel_stats;
CopyTopKDurationKernelReportsToDb(reports, &kernel_stats);
EXPECT_EQ(kernel_stats.reports_size(), 3);
{
const auto& kernel = kernel_stats.reports().at(2);
EXPECT_EQ(kernel.name(), "kernel_name_shortest");
EXPECT_EQ(kernel.registers_per_thread(), 16);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 0);
EXPECT_EQ(kernel.grid_dim().at(0), 1);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 1);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 1);
EXPECT_FALSE(kernel.is_kernel_using_tensor_core());
EXPECT_FALSE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "mul_786");
}
{
const auto& kernel = kernel_stats.reports().at(1);
EXPECT_EQ(kernel.name(), "kernel_name_middle");
EXPECT_EQ(kernel.registers_per_thread(), 32);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 16384);
EXPECT_EQ(kernel.grid_dim().at(0), 2);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 32);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 2);
EXPECT_FALSE(kernel.is_kernel_using_tensor_core());
EXPECT_TRUE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "Conv2D");
}
{
const auto& kernel = kernel_stats.reports().at(0);
EXPECT_EQ(kernel.name(), "volta_fp16_s884gemm_fp16_128x128_ldg8_f2f_tn");
EXPECT_EQ(kernel.registers_per_thread(), 32);
EXPECT_EQ(kernel.static_shmem_bytes(), 0);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 16384);
EXPECT_EQ(kernel.grid_dim().at(0), 3);
EXPECT_EQ(kernel.grid_dim().at(1), 1);
EXPECT_EQ(kernel.grid_dim().at(2), 1);
EXPECT_EQ(kernel.block_dim().at(0), 64);
EXPECT_EQ(kernel.block_dim().at(1), 1);
EXPECT_EQ(kernel.block_dim().at(2), 1);
EXPECT_EQ(kernel.total_duration_ns(), 3);
EXPECT_TRUE(kernel.is_kernel_using_tensor_core());
EXPECT_TRUE(kernel.is_op_tensor_core_eligible());
EXPECT_EQ(kernel.op_name(), "Einsum_80");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_kernel_stats_db_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf68becf-c51f-4d43-90f6-818835c3a4aa | cpp | tensorflow/tensorflow | hlo_proto_to_memory_visualization_utils | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.cc | tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils_test.cc | #include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::xla::BufferAllocationProto;
using ::xla::HeapSimulatorTrace;
using ::xla::HloInstructionProto;
using ::xla::HloProto;
using ::xla::LayoutUtil;
using ::xla::LogicalBufferProto;
using ::xla::Shape;
using ::xla::ShapeUtil;
Shape ResolveShapeIndex(const xla::ShapeProto& shape_proto,
absl::Span<const int64_t> shape_index) {
if (shape_index.empty()) return Shape(shape_proto);
int64_t i = shape_index.back();
if (i >= shape_proto.tuple_shapes_size()) {
return Shape(shape_proto);
}
return Shape(shape_proto.tuple_shapes(i));
}
std::string ShapeDescription(const Shape& shape) {
return ShapeUtil::HumanStringWithLayout(shape);
}
int64_t ShapeUnpaddedSize(Shape shape) {
LayoutUtil::SetToDefaultLayout(&shape);
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
class BufferAllocationStruct {
public:
explicit BufferAllocationStruct(const BufferAllocationProto& proto)
: buffer_allocation_((proto)) {}
bool IsIndefinite() const {
return buffer_allocation_.is_thread_local() ||
buffer_allocation_.is_entry_computation_parameter() ||
buffer_allocation_.is_constant() ||
buffer_allocation_.maybe_live_out();
}
const BufferAllocationProto& proto() const { return buffer_allocation_; }
size_t size() const { return buffer_allocation_.size(); }
int64_t color() const { return buffer_allocation_.color(); }
int64_t index() const { return buffer_allocation_.index(); }
std::optional<int64_t> heap_simulator_trace_id() const {
return heap_simulator_trace_id_;
}
void set_heap_simulator_trace_id(int64_t id) {
heap_simulator_trace_id_ = id;
}
std::string category() const {
if (buffer_allocation_.is_entry_computation_parameter()) {
return "Parameter";
} else if (buffer_allocation_.maybe_live_out()) {
return "Output";
} else if (buffer_allocation_.is_thread_local()) {
return "Thread-local";
} else if (buffer_allocation_.is_constant()) {
return "Constant";
} else {
return "Temporary";
}
}
std::string description() const {
return absl::StrFormat(
"buffer_allocation_id:%d\nsize:%d\nbuffer_counts:%d\n",
buffer_allocation_.index(), size(), buffer_allocation_.assigned_size());
}
private:
const BufferAllocationProto& buffer_allocation_;
std::optional<int64_t> heap_simulator_trace_id_;
};
struct LogicalBufferStruct {
LogicalBufferStruct(const LogicalBufferProto& p,
const BufferAllocationStruct& b,
const ::xla::HloInstructionProto& i, uint64_t offset)
: proto(p),
buffer_allocation(b),
hlo_instruction(i),
offset(offset),
shape(ResolveShapeIndex(hlo_instruction.shape(),
proto.defined_at().shape_index())) {}
absl::string_view instruction_name() const { return hlo_instruction.name(); }
int64_t color() const { return proto.color(); }
size_t size() const { return proto.size(); }
size_t unpadded_size() const { return ShapeUnpaddedSize(shape); }
int64_t inc() {
if (canonical_buffer) return canonical_buffer->inc();
return ++ref_count;
}
int64_t dec() {
if (canonical_buffer) return canonical_buffer->dec();
return --ref_count;
}
int64_t share_with(LogicalBufferStruct* buffer) {
canonical_buffer = buffer;
return canonical_buffer->inc();
}
LogicalBufferStruct* get_canonical_buffer() {
return canonical_buffer ? canonical_buffer->get_canonical_buffer() : this;
}
std::string GetInstructionNameWithShapeIndex() const {
if (proto.defined_at().shape_index().empty()) {
return std::string(instruction_name());
} else {
return absl::StrCat(instruction_name(), "{",
absl::StrJoin(proto.defined_at().shape_index(), ","),
"}");
}
}
std::string description() const {
return absl::StrFormat(
"buffer_id:%d\nhlo_op:%s\nshape:%s\nsize:%d\nunpadded_size:%d\n"
"offset:%d\nspan:(%lld,%lld)",
proto.id(), instruction_name(), ShapeDescription(shape), size(),
unpadded_size(), offset, span ? span->first : -1,
span ? span->second : -1);
}
const LogicalBufferProto& proto;
const BufferAllocationStruct& buffer_allocation;
const ::xla::HloInstructionProto& hlo_instruction;
uint64_t offset;
std::optional<std::pair<uint64_t, uint64_t>> span;
xla::Shape shape;
int64_t ref_count = 0;
LogicalBufferStruct* canonical_buffer = nullptr;
};
class HloProtoBufferWrapper {
public:
explicit HloProtoBufferWrapper(const ::xla::HloProto& hlo_proto)
: hlo_proto_(hlo_proto) {
Init();
}
int64_t GetHeapSimulatorTraceId(const int64_t memory_color) const {
int64_t id = GetHeapSimulatorTraceIdFromBufferAllocationIndex(memory_color);
if (id != -1) {
return id;
}
return GetHeapSimulatorTraceIdFromEvents(memory_color);
}
const ::xla::HloProto& GetHloProto() const { return hlo_proto_; }
std::vector<const BufferAllocationStruct*> GetBufferAllocations(
int64_t memory_color) const {
std::vector<const BufferAllocationStruct*> buffer_allocations;
for (const auto& iter : id_to_buffer_allocation_) {
if (iter.second->proto().color() != memory_color) continue;
buffer_allocations.push_back(iter.second.get());
}
return buffer_allocations;
}
LogicalBufferStruct* GetLogicalBuffer(int64_t logical_buffer_id) const {
if (!id_to_logical_buffer_.contains(logical_buffer_id)) {
LOG(DFATAL) << "logical_buffer_id " << logical_buffer_id << "not found.";
return nullptr;
}
return id_to_logical_buffer_.at(logical_buffer_id).get();
}
std::vector<const LogicalBufferStruct*> LogicalBuffersWithIndefiniteLifetime(
int64_t memory_color) const {
std::vector<const LogicalBufferStruct*> indefinite_logical_buffers;
for (const auto& buffer_assignment : GetBufferAllocations(memory_color)) {
if (!buffer_assignment->IsIndefinite()) continue;
if (buffer_assignment->proto().is_thread_local()) continue;
const LogicalBufferStruct* best_logical_buffer = nullptr;
size_t best_size = 0;
for (const auto& assigned : buffer_assignment->proto().assigned()) {
const LogicalBufferStruct* logical_buffer_struct =
GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer_struct == nullptr) continue;
if (logical_buffer_struct->size() > best_size) {
best_size = logical_buffer_struct->size();
best_logical_buffer = logical_buffer_struct;
}
}
if (best_logical_buffer) {
indefinite_logical_buffers.push_back(best_logical_buffer);
}
}
return indefinite_logical_buffers;
}
private:
void Init() {
absl::flat_hash_map<absl::string_view, const ::xla::HloInstructionProto*>
name_to_hlo;
absl::flat_hash_map<uint64_t, const ::xla::HloInstructionProto*>
unique_id_to_hlo;
for (const auto& computation : hlo_proto_.hlo_module().computations()) {
for (const auto& instruction : computation.instructions()) {
name_to_hlo[instruction.name()] = &instruction;
unique_id_to_hlo[instruction.id()] = &instruction;
}
}
absl::flat_hash_map<int64_t, const LogicalBufferProto*>
id_to_logical_buffer_proto;
for (const auto& logical_buffer :
hlo_proto_.buffer_assignment().logical_buffers()) {
id_to_logical_buffer_proto[logical_buffer.id()] = &logical_buffer;
}
for (const auto& buffer_allocation :
hlo_proto_.buffer_assignment().buffer_allocations()) {
auto& buffer_allocation_s =
id_to_buffer_allocation_[buffer_allocation.index()];
buffer_allocation_s =
std::make_unique<BufferAllocationStruct>(buffer_allocation);
for (const auto& assigned : buffer_allocation.assigned()) {
const auto id = assigned.logical_buffer_id();
if (!id_to_logical_buffer_proto.contains(id)) {
LOG(DFATAL) << "logical_buffer_id " << id << " not found.";
continue;
}
const auto* logical_buffer = id_to_logical_buffer_proto.at(id);
int64_t inst_id = logical_buffer->defined_at().instruction_id();
if (!unique_id_to_hlo.contains(inst_id)) {
LOG(DFATAL) << "instruction_id " << inst_id << " not found.";
continue;
}
const auto* instruction = unique_id_to_hlo.at(inst_id);
id_to_logical_buffer_[id] = std::make_unique<LogicalBufferStruct>(
*logical_buffer, *buffer_allocation_s, *instruction,
assigned.offset());
}
}
const auto& heap_simulator_traces =
hlo_proto_.buffer_assignment().heap_simulator_traces();
for (int64_t i = 0; i < heap_simulator_traces.size(); i++) {
if (heap_simulator_traces[i].events().empty()) continue;
int logical_buffer_id = heap_simulator_traces[i].events(0).buffer_id();
if (!id_to_logical_buffer_.contains(logical_buffer_id)) continue;
auto* logical_buffer = id_to_logical_buffer_[logical_buffer_id].get();
auto buffer_allocation_index = logical_buffer->buffer_allocation.index();
id_to_buffer_allocation_[buffer_allocation_index]
->set_heap_simulator_trace_id(i);
}
}
int64_t GetHeapSimulatorTraceIdFromEvents(const int64_t memory_color) const {
int64_t best_index = -1;
int64_t best_event_count = 0;
for (int64_t i = 0;
i < hlo_proto_.buffer_assignment().heap_simulator_traces_size(); i++) {
const auto& heap_simulator_trace =
hlo_proto_.buffer_assignment().heap_simulator_traces(i);
int64_t event_count = 0;
for (const auto& event : heap_simulator_trace.events()) {
if (!id_to_logical_buffer_.contains(event.buffer_id())) {
LOG(DFATAL) << "buffer_id " << event.buffer_id() << "not found.";
continue;
}
const auto& logical_buffer =
id_to_logical_buffer_.at(event.buffer_id());
if (logical_buffer->color() == memory_color) {
event_count++;
}
}
if (event_count > best_event_count) {
best_index = i;
best_event_count = event_count;
}
}
return best_index;
}
int64_t GetHeapSimulatorTraceIdFromBufferAllocationIndex(
const int64_t memory_color) const {
auto buffer_allocations = GetBufferAllocations(memory_color);
for (const auto* buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
if (buffer_allocation->heap_simulator_trace_id()) {
return *buffer_allocation->heap_simulator_trace_id();
}
}
return -1;
}
const ::xla::HloProto& hlo_proto_;
absl::flat_hash_map<int64_t, std::unique_ptr<LogicalBufferStruct>>
id_to_logical_buffer_;
absl::flat_hash_map<int64_t, std::unique_ptr<BufferAllocationStruct>>
id_to_buffer_allocation_;
};
double BytesToMiB(int64_t bytes) {
return static_cast<double>(bytes) / (1ULL << 20);
}
HeapObject MakeHeapObjectCommon(std::string label, int32_t color,
int64_t logical_buffer_id,
int64_t logical_buffer_size_bytes,
int64_t unpadded_shape_bytes) {
HeapObject result;
result.set_numbered(color);
result.set_label(std::move(label));
result.set_logical_buffer_id(logical_buffer_id);
result.set_logical_buffer_size_mib(BytesToMiB(logical_buffer_size_bytes));
result.set_unpadded_shape_mib(BytesToMiB(unpadded_shape_bytes));
return result;
}
HeapObject MakeHeapObject(const LogicalBufferStruct& logical_buffer,
int32_t color) {
const HloInstructionProto& hlo_instruction = logical_buffer.hlo_instruction;
std::string shape_string = ShapeDescription(logical_buffer.shape);
std::string label =
absl::StrFormat("%s: %s # %s", logical_buffer.instruction_name(),
shape_string, hlo_instruction.metadata().op_name());
HeapObject result = MakeHeapObjectCommon(
std::move(label), color, logical_buffer.proto.id(), logical_buffer.size(),
logical_buffer.unpadded_size());
result.set_instruction_name(
logical_buffer.GetInstructionNameWithShapeIndex());
result.set_group_name(logical_buffer.buffer_allocation.category());
result.set_tf_op_name(hlo_instruction.metadata().op_name());
result.set_shape_string(shape_string);
result.set_op_code(hlo_instruction.opcode());
return result;
}
BufferSpan MakeBufferSpan(int32 start, int32 limit) {
BufferSpan result;
result.set_start(start);
result.set_limit(limit);
return result;
}
void Convert(const xla::BufferAllocationProto_Assigned& assigned,
const HloProtoBufferWrapper& wrapper, LogicalBuffer* result) {
result->set_id(assigned.logical_buffer_id()),
result->set_size_mib(BytesToMiB(assigned.size()));
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer == nullptr) return;
result->set_hlo_name(std::string(logical_buffer->instruction_name()));
result->mutable_shape_index()->CopyFrom(
logical_buffer->proto.defined_at().shape_index());
result->set_shape(ShapeDescription(logical_buffer->shape));
}
bool IsReusable(const BufferAllocationProto& buffer_allocation) {
return !buffer_allocation.is_thread_local() && !buffer_allocation.is_tuple();
}
void Convert(const BufferAllocationProto& proto,
const HloProtoBufferWrapper& wrapper, BufferAllocation* result) {
result->set_id(proto.index());
result->set_size_mib(BytesToMiB(proto.size()));
if (proto.is_entry_computation_parameter()) {
result->add_attributes("entry computation parameter");
}
if (proto.maybe_live_out()) {
result->add_attributes("may-be live out");
}
if (IsReusable(proto)) {
result->add_attributes("reusable");
}
for (const auto& assigned : proto.assigned()) {
Convert(assigned, wrapper, result->add_logical_buffers());
}
if (!result->logical_buffers().empty()) {
std::string common_shape = result->logical_buffers(0).shape();
for (int64_t i = 1; i < result->logical_buffers_size(); ++i) {
if (result->logical_buffers(i).shape() != common_shape) {
common_shape = "";
break;
}
}
if (!common_shape.empty()) {
result->set_common_shape(common_shape);
}
}
}
void NoteSpecialAllocations(const HloProtoBufferWrapper& wrapper,
int64_t memory_color, int64_t small_buffer_size,
PreprocessResult* result) {
int64_t entry_parameters_bytes = 0;
int64_t non_reusable_bytes = 0;
int64_t maybe_live_out_bytes = 0;
int64_t total_buffer_allocation_bytes = 0;
int64_t indefinite_buffer_allocation_bytes = 0;
for (const auto* buffer_allocation_struct :
wrapper.GetBufferAllocations(memory_color)) {
const auto& buffer_allocation = buffer_allocation_struct->proto();
if (buffer_allocation.is_entry_computation_parameter()) {
entry_parameters_bytes += buffer_allocation.size();
}
if (!IsReusable(buffer_allocation)) {
non_reusable_bytes += buffer_allocation.size();
}
if (buffer_allocation.maybe_live_out()) {
if (buffer_allocation.size() > small_buffer_size) {
VLOG(1) << "Maybe live out buffer allocation: "
<< buffer_allocation.size()
<< " bytes :: " << buffer_allocation.ShortDebugString();
}
maybe_live_out_bytes += buffer_allocation.size();
}
if (buffer_allocation_struct->IsIndefinite()) {
indefinite_buffer_allocation_bytes += buffer_allocation.size();
Convert(buffer_allocation, wrapper, result->add_indefinite_lifetimes());
}
total_buffer_allocation_bytes += buffer_allocation.size();
}
result->set_entry_computation_parameters_mib(
BytesToMiB(entry_parameters_bytes));
result->set_non_reusable_mib(BytesToMiB(non_reusable_bytes));
result->set_maybe_live_out_mib(BytesToMiB(maybe_live_out_bytes));
result->set_total_buffer_allocation_mib(
BytesToMiB(total_buffer_allocation_bytes));
result->set_indefinite_buffer_allocation_mib(
BytesToMiB(indefinite_buffer_allocation_bytes));
}
struct HeapSimulatorStats {
explicit HeapSimulatorStats(const HloProtoBufferWrapper& wrapper)
: wrapper(wrapper) {}
void SetSimulatorTraceEventSize(int64_t size) {
simulator_trace_event_size = size;
}
void UpdateOnSimulatorEvent(const HeapSimulatorTrace::Event& event) {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) return;
seen_logical_buffers.insert(logical_buffer);
seen_buffer_allocations.insert(&logical_buffer->buffer_allocation.proto());
}
void IncreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer,
bool init_buffer_span) {
logical_buffers.push_back(canonical_logical_buffer->proto.id());
heap_size_bytes += canonical_logical_buffer->size();
unpadded_heap_size_bytes += canonical_logical_buffer->unpadded_size();
int64_t prior_peak_heap_size_bytes = peak_heap_size_bytes;
peak_heap_size_bytes = std::max(peak_heap_size_bytes, heap_size_bytes);
if (prior_peak_heap_size_bytes != peak_heap_size_bytes) {
peak_heap_size_position = heap_size_bytes_timeline.size() - 1;
peak_unpadded_heap_size_bytes = unpadded_heap_size_bytes;
VLOG(1) << absl::StrFormat("New peak heap size on %d :: %d bytes",
peak_heap_size_position, peak_heap_size_bytes);
peak_logical_buffers = logical_buffers;
}
if (init_buffer_span) {
canonical_logical_buffer->span.emplace(
heap_size_bytes_timeline.size() - 1, simulator_trace_event_size - 1);
}
}
Status DecreaseMemoryUsage(LogicalBufferStruct* canonical_logical_buffer) {
int64_t canonical_buffer_id = canonical_logical_buffer->proto.id();
logical_buffers.remove(canonical_buffer_id);
heap_size_bytes -= canonical_logical_buffer->size();
if (heap_size_bytes < 0) {
return errors::InvalidArgument(absl::StrCat(
"Heap size should be non-negative, but get: ", heap_size_bytes));
}
unpadded_heap_size_bytes -= canonical_logical_buffer->unpadded_size();
if (canonical_logical_buffer->span) {
canonical_logical_buffer->span->second =
heap_size_bytes_timeline.size() - 1;
}
return absl::OkStatus();
}
Status FinalizeMemoryUsage() {
heap_size_bytes_timeline.push_back(heap_size_bytes);
unpadded_heap_size_bytes_timeline.push_back(unpadded_heap_size_bytes);
if (seen_buffer_allocations.size() != 1) {
return errors::InvalidArgument(
absl::StrCat("All heap simulation should work out of a single buffer "
"allocation, actual seen_buffer_allocations.size():",
seen_buffer_allocations.size()));
}
VLOG(1) << "Found " << peak_logical_buffers.size()
<< " logical buffers alive at point of peak heap usage.";
VLOG(1) << "Peak logical buffers: ["
<< absl::StrJoin(peak_logical_buffers, ", ") << "]";
return absl::OkStatus();
}
int64_t heap_size_bytes = 0;
int64_t unpadded_heap_size_bytes = 0;
int64_t peak_heap_size_bytes = 0;
int64_t peak_unpadded_heap_size_bytes = 0;
std::list<int64_t> logical_buffers;
std::list<int64_t> peak_logical_buffers;
std::vector<int64_t> heap_size_bytes_timeline;
std::vector<int64_t> unpadded_heap_size_bytes_timeline;
int64_t peak_heap_size_position = 0;
absl::flat_hash_set<const LogicalBufferStruct*> seen_logical_buffers;
absl::flat_hash_set<const BufferAllocationProto*> seen_buffer_allocations;
const HloProtoBufferWrapper& wrapper;
int64_t simulator_trace_event_size;
};
Status ProcessHeapSimulatorTrace(const HloProtoBufferWrapper& wrapper,
const int64_t memory_color,
HeapSimulatorStats* stats) {
int64_t heap_simulator_trace_id =
wrapper.GetHeapSimulatorTraceId(memory_color);
if (heap_simulator_trace_id < 0 ||
heap_simulator_trace_id >= wrapper.GetHloProto()
.buffer_assignment()
.heap_simulator_traces_size()) {
return absl::OkStatus();
}
const auto& trace =
wrapper.GetHloProto().buffer_assignment().heap_simulator_traces(
heap_simulator_trace_id);
stats->SetSimulatorTraceEventSize(trace.events_size());
for (const auto& event : trace.events()) {
stats->UpdateOnSimulatorEvent(event);
LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(event.buffer_id());
if (logical_buffer == nullptr) {
continue;
}
if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {
logical_buffer->inc();
stats->IncreaseMemoryUsage(logical_buffer,
true);
} else if (event.kind() == HeapSimulatorTrace::Event::FREE) {
auto ref_count = logical_buffer->dec();
if (ref_count < 0) {
return errors::InvalidArgument(absl::StrCat(
"Buffer ", logical_buffer->proto.id(), "is freed multiple times."));
}
if (ref_count == 0) {
auto& canonical_buffer = *logical_buffer->get_canonical_buffer();
TF_RETURN_IF_ERROR(stats->DecreaseMemoryUsage(&canonical_buffer));
}
} else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {
int64_t canonical_buffer_id = event.share_with_canonical_id();
LogicalBufferStruct* canonical_buffer =
wrapper.GetLogicalBuffer(canonical_buffer_id);
if (canonical_buffer == nullptr) {
continue;
}
auto ref_count = logical_buffer->share_with(canonical_buffer);
if (ref_count == 1) {
stats->IncreaseMemoryUsage(canonical_buffer,
false);
}
} else {
return errors::InvalidArgument(
absl::StrCat("Unhandled event kind: ", event.kind()));
}
}
TF_RETURN_IF_ERROR(stats->FinalizeMemoryUsage());
return absl::OkStatus();
}
struct PeakUsageSnapshot {
PeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
int64_t small_buffer_size)
: wrapper(wrapper),
simulator_stats(simulator_stats),
small_buffer_size(small_buffer_size) {}
void AddHeapObject(const LogicalBufferStruct& logical_buffer) {
if (logical_buffer.size() < small_buffer_size) {
total_small_buffer_size_bytes += logical_buffer.size();
} else {
max_heap_objects.push_back(MakeHeapObject(logical_buffer, colorno++));
}
}
void FinalizeBufferUsage() {
for (const int64_t logical_buffer_id :
simulator_stats.peak_logical_buffers) {
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(logical_buffer_id);
if (logical_buffer == nullptr) return;
AddHeapObject(*logical_buffer);
}
if (total_small_buffer_size_bytes != 0) {
max_heap_objects.push_back(MakeHeapObjectCommon(
absl::StrFormat("small (<%d bytes)", small_buffer_size), colorno++,
-1, total_small_buffer_size_bytes,
0));
}
}
std::vector<HeapObject> max_heap_objects;
int64_t indefinite_memory_usage_bytes = 0;
int64_t total_small_buffer_size_bytes = 0;
int32_t colorno = 0;
const HloProtoBufferWrapper& wrapper;
const HeapSimulatorStats& simulator_stats;
const int64_t small_buffer_size;
};
void CreatePeakUsageSnapshot(const HloProtoBufferWrapper& wrapper,
int64_t memory_color,
PeakUsageSnapshot* peak_snapshot) {
for (const auto* logical_buffer :
wrapper.LogicalBuffersWithIndefiniteLifetime(memory_color)) {
const auto& buffer_allocation = logical_buffer->buffer_allocation;
peak_snapshot->indefinite_memory_usage_bytes += buffer_allocation.size();
peak_snapshot->AddHeapObject(*logical_buffer);
}
peak_snapshot->FinalizeBufferUsage();
}
void ConvertAllocationTimeline(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
const int64_t memory_color,
PreprocessResult* result) {
const char* lb_colors[] = {
"antiquewhite3",
"aqua",
"aquamarine",
"bisque",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"gold",
"goldenrod",
"green",
"greenyellow",
"goldenrod",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory3",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightpink",
"limegreen",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lime",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"mediumpurple",
"midnightblue",
"mediumvioletred",
"mistyrose",
"moccasin",
"olive",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peachpuff",
"pink",
"plum",
"powderblue",
"purple",
"rebeccapurple",
"red",
"rosybrown",
"royalblue",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"skyblue",
"tan",
"teal",
"turquoise",
"tomato",
"violet",
"violetred",
"yellow",
};
struct RenderOptions {
size_t graph_width = 2048;
size_t graph_height = 2048;
} render_options;
const char* ba_colors[] = {
"azure",
"beige",
"cornsilk",
};
int num_lb_colors = sizeof(lb_colors) / sizeof(lb_colors[0]);
int num_ba_colors = sizeof(ba_colors) / sizeof(ba_colors[0]);
std::vector<size_t> buffer_allocation_offsets;
size_t total_y_size = 0;
size_t total_x_size = 0;
std::vector<std::string> rects;
auto buffer_allocations = wrapper.GetBufferAllocations(memory_color);
const auto& heap_simulator_traces =
wrapper.GetHloProto().buffer_assignment().heap_simulator_traces();
for (const auto& buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
auto heap_simulator_trace_id = buffer_allocation->heap_simulator_trace_id();
if (!heap_simulator_trace_id) continue;
buffer_allocation_offsets.push_back(total_y_size);
total_y_size += buffer_allocation->size();
if (*heap_simulator_trace_id >= heap_simulator_traces.size()) {
LOG(DFATAL) << "heap_simulator_trace_id " << *heap_simulator_trace_id
<< " out of bounds.";
continue;
}
total_x_size = std::max<size_t>(
total_x_size,
heap_simulator_traces.at(*heap_simulator_trace_id).events_size());
}
if (!total_y_size || !total_x_size) return;
double scale_x =
static_cast<double>(render_options.graph_width) / total_x_size;
double scale_y =
static_cast<double>(render_options.graph_height) / total_y_size;
int node_id = 0;
auto add_rect = [&](size_t x, size_t y, size_t width, size_t height,
const string& description, const char* color) {
size_t center_x = x + (width >> 1);
size_t center_y = y + (height >> 1);
int pos_x = center_x * scale_x;
int pos_y = center_y * scale_y;
int rect_w = width * scale_x;
int rect_h = height * scale_y;
if (height * scale_y < 0.5) return;
rect_h = std::max(rect_h, 1);
std::string rect = absl::StrFormat(
R"("%d" [tooltip="%s", pos="%d,%d!", width="%d!", height="%d!", color=%s];)",
node_id++, description, pos_x, pos_y, rect_w, rect_h, color);
rects.push_back(rect);
};
int buffer_id = 0;
for (const auto& buffer_allocation : buffer_allocations) {
if (buffer_allocation->IsIndefinite()) continue;
auto buffer_allocation_offset = buffer_allocation_offsets[buffer_id++];
add_rect(0, buffer_allocation_offset, total_x_size,
buffer_allocation->size(), buffer_allocation->description(),
ba_colors[buffer_id % num_ba_colors]);
for (const auto& assigned : buffer_allocation->proto().assigned()) {
const LogicalBufferStruct* logical_buffer =
wrapper.GetLogicalBuffer(assigned.logical_buffer_id());
if (logical_buffer == nullptr) continue;
if (!logical_buffer->span || logical_buffer->canonical_buffer) continue;
size_t width = logical_buffer->span->second - logical_buffer->span->first;
size_t height = buffer_allocation_offset + logical_buffer->size();
add_rect(logical_buffer->span->first, logical_buffer->offset, width,
height, logical_buffer->description(),
lb_colors[node_id % num_lb_colors]);
}
}
VLOG(1) << "rects:" << rects.size();
result->set_allocation_timeline(
absl::StrFormat("graph G {\n node [shape=box,style=filled];\n %s\n}",
absl::StrJoin(rects, "\n")));
}
void GeneratePreprocessResult(const HloProtoBufferWrapper& wrapper,
const HeapSimulatorStats& simulator_stats,
const PeakUsageSnapshot& peak_snapshot,
const int64_t memory_color,
PreprocessResult* result) {
result->set_module_name(wrapper.GetHloProto().hlo_module().name());
result->set_entry_computation_name(
wrapper.GetHloProto().hlo_module().entry_computation_name());
std::vector<const HeapObject*> max_heap_by_size;
max_heap_by_size.reserve(peak_snapshot.max_heap_objects.size());
for (const auto& object : peak_snapshot.max_heap_objects) {
max_heap_by_size.push_back(&object);
}
std::sort(max_heap_by_size.begin(), max_heap_by_size.end(),
[](const HeapObject* a, const HeapObject* b) {
return a->logical_buffer_size_mib() >
b->logical_buffer_size_mib();
});
std::vector<int> max_heap_to_by_size;
max_heap_to_by_size.reserve(max_heap_by_size.size());
for (const auto& object : peak_snapshot.max_heap_objects) {
auto it =
std::find(max_heap_by_size.begin(), max_heap_by_size.end(), &object);
int index = std::distance(max_heap_by_size.begin(), it);
max_heap_to_by_size.push_back(index);
}
std::vector<int> by_size_to_max_heap;
for (const auto* object : max_heap_by_size) {
int index = object - &peak_snapshot.max_heap_objects[0];
by_size_to_max_heap.push_back(index);
}
*result->mutable_max_heap() = {peak_snapshot.max_heap_objects.begin(),
peak_snapshot.max_heap_objects.end()};
result->mutable_max_heap_by_size()->Reserve(max_heap_by_size.size());
for (const HeapObject* o : max_heap_by_size) {
*result->add_max_heap_by_size() = *o;
}
*result->mutable_max_heap_to_by_size() = {max_heap_to_by_size.begin(),
max_heap_to_by_size.end()};
*result->mutable_by_size_to_max_heap() = {by_size_to_max_heap.begin(),
by_size_to_max_heap.end()};
size_t timeline_size = simulator_stats.heap_size_bytes_timeline.size();
double add_mib = BytesToMiB(peak_snapshot.indefinite_memory_usage_bytes);
result->mutable_heap_sizes()->Reserve(timeline_size);
result->mutable_unpadded_heap_sizes()->Reserve(timeline_size);
for (size_t i = 0; i < timeline_size; i++) {
result->add_heap_sizes(
BytesToMiB(simulator_stats.heap_size_bytes_timeline[i]) + add_mib);
result->add_unpadded_heap_sizes(
BytesToMiB(simulator_stats.unpadded_heap_size_bytes_timeline[i]) +
add_mib);
}
result->set_peak_heap_mib(BytesToMiB(simulator_stats.peak_heap_size_bytes) +
add_mib);
result->set_peak_unpadded_heap_mib(
BytesToMiB(simulator_stats.peak_unpadded_heap_size_bytes) + add_mib);
result->set_peak_heap_size_position(simulator_stats.peak_heap_size_position);
for (const auto* logical_buffer : simulator_stats.seen_logical_buffers) {
if (!logical_buffer->span) continue;
(*result->mutable_logical_buffer_spans())[logical_buffer->proto.id()] =
MakeBufferSpan(logical_buffer->span->first,
logical_buffer->span->second);
}
NoteSpecialAllocations(wrapper, memory_color, peak_snapshot.small_buffer_size,
result);
ConvertAllocationTimeline(wrapper, simulator_stats, memory_color, result);
}
}
absl::StatusOr<PreprocessResult> ConvertHloProtoToPreprocessResult(
const HloProto& hlo_proto, int64_t small_buffer_size,
int64_t memory_color) {
HloProtoBufferWrapper wrapper(hlo_proto);
HeapSimulatorStats simulator_stats(wrapper);
auto status =
ProcessHeapSimulatorTrace(wrapper, memory_color, &simulator_stats);
if (!status.ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Failed to process heap simulator trace: ", status.message()));
}
PeakUsageSnapshot peak_snapshot(wrapper, simulator_stats, small_buffer_size);
CreatePeakUsageSnapshot(wrapper, memory_color, &peak_snapshot);
PreprocessResult result;
GeneratePreprocessResult(wrapper, simulator_stats, peak_snapshot,
memory_color, &result);
return result;
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.h"
#include <string>
#include <gmock/gmock.h>
#include "absl/strings/str_format.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/memory_viewer_preprocess.pb.h"
#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
static constexpr char kHLOBase[] = R"pb(
hlo_module {
name: "test_module"
entry_computation_name: "test_computation"
computations {
name: "test_computation"
instructions {
name: "fusion.1"
id: 0
shape { tuple_shapes { element_type: U64 } }
}
instructions {
name: "fusion.2"
id: 1
shape { tuple_shapes { element_type: U64 } }
}
}
}
buffer_assignment {
buffer_allocations {
index: 0
size: 1048576
color: 0
assigned { logical_buffer_id: 1 offset: 0 size: 524288 }
assigned { logical_buffer_id: 2 offset: 524288 size: 524288 }
}
logical_buffers {
id: 1
size: 524288
color: 0
defined_at { instruction_id: 0 shape_index: 0 }
}
logical_buffers {
id: 2
size: 524288
color: 0
defined_at { instruction_id: 1 shape_index: 0 }
}
heap_simulator_traces { %s }
}
)pb";
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_1) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
}
TEST(MemoryViewerTest, TestHeapSimulatorTraceShareWith_2) {
static constexpr char kHeapSimulatorTrace[] = R"pb(
events { kind: ALLOC buffer_id: 1 }
events { kind: FREE buffer_id: 1 }
events { kind: SHARE_WITH buffer_id: 2 share_with_canonical_id: 1 }
events { kind: FREE buffer_id: 2 }
)pb";
std::string hlo_string = absl::StrFormat(kHLOBase, kHeapSimulatorTrace);
xla::HloProto hlo_proto;
ASSERT_TRUE(
proto_utils::ParseTextFormatFromString(hlo_string, &hlo_proto).ok());
TF_ASSERT_OK_AND_ASSIGN(
PreprocessResult preprocess_result,
ConvertHloProtoToPreprocessResult(hlo_proto, 0));
EXPECT_EQ(preprocess_result.peak_heap_mib(), 0.5);
EXPECT_FALSE(preprocess_result.allocation_timeline().empty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_memory_visualization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1624f2bd-0f8b-4ecc-937f-e20454f99463 | cpp | tensorflow/tensorflow | xplane_to_op_metrics_db | tensorflow/core/profiler/convert/xplane_to_op_metrics_db.cc | tensorflow/core/profiler/convert/xplane_to_op_metrics_db_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/op_stack.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/cost_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/op_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr uint64_t kRootSymbolId = 0;
enum TfActivityType { kTfOpBegin, kTfOpEnd };
struct TfActivity {
uint64 timestamp_ps;
uint32 tf_op_id;
TfActivityType activity_type;
tsl::profiler::TfOp tf_op;
bool is_eager;
};
struct TfOpInfo {
explicit TfOpInfo(uint64 ts) : start_timestamp_ps(ts) {}
uint64 start_timestamp_ps;
uint64 children_duration_ps = 0;
};
void ProcessOneTfActivity(const TfActivity& activity,
OpStack<TfOpInfo>* tf_op_stack,
TfMetricsDbData* tf_metrics_data) {
uint32 tf_op_id = activity.tf_op_id;
switch (activity.activity_type) {
case kTfOpBegin: {
tf_op_stack->Push(tf_op_id,
std::make_unique<TfOpInfo>(activity.timestamp_ps));
break;
}
case kTfOpEnd: {
std::unique_ptr<TfOpInfo> info = tf_op_stack->Pop(tf_op_id);
if (info == nullptr) {
VLOG(1) << "No begin event found for TF activity id=" << tf_op_id
<< " name=" << activity.tf_op.name
<< " type=" << activity.tf_op.type;
break;
}
tsl::profiler::Timespan tf_op_span = tsl::profiler::PicoSpan(
info->start_timestamp_ps, activity.timestamp_ps);
tf_metrics_data->tf_metrics_db_builder.EnterOp(
activity.tf_op.name, activity.tf_op.type, activity.is_eager,
tf_op_span.duration_ps(), info->children_duration_ps);
TfOpInfo* parent_info = tf_op_stack->Top();
if (parent_info != nullptr) {
parent_info->children_duration_ps += tf_op_span.duration_ps();
}
if (tsl::profiler::IsInfeedEnqueueOp(activity.tf_op.type)) {
tf_metrics_data->tf_metrics_db_builder.EnterHostInfeedEnqueue(
tf_op_span);
}
break;
}
}
}
void ProcessTfActivities(std::vector<TfActivity>* tf_activities,
TfMetricsDbData* tf_metrics_db_data) {
if (tf_activities->empty()) return;
absl::c_stable_sort(*tf_activities,
[](const TfActivity& a, const TfActivity& b) {
return a.timestamp_ps < b.timestamp_ps;
});
OpStack<TfOpInfo> tf_op_stack;
for (const auto& tf_activity : *tf_activities) {
ProcessOneTfActivity(tf_activity, &tf_op_stack, tf_metrics_db_data);
}
SetTotalTimePs(
tf_metrics_db_data->tf_metrics_db,
tf_activities->back().timestamp_ps - tf_activities->front().timestamp_ps);
}
void CollectTfActivities(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops,
std::vector<TfActivity>* tf_activities) {
uint32 tf_op_id = 0;
if (IsDerivedThreadId(line.Id())) return;
tf_activities->reserve(line.NumEvents() * 2);
line.ForEachEvent(
[&tf_ops, &tf_op_id, &tf_activities](const XEventVisitor& event) {
const tsl::profiler::TfOp* tf_op = gtl::FindOrNull(tf_ops, event.Id());
if (tf_op != nullptr) {
++tf_op_id;
bool is_eager = false;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kIsEager)) {
is_eager = stat->IntValue();
}
tsl::profiler::Timespan span = event.GetTimespan();
tf_activities->push_back(
{span.begin_ps(), tf_op_id, kTfOpBegin, *tf_op, is_eager});
tf_activities->push_back(
{span.end_ps(), tf_op_id, kTfOpEnd, *tf_op, is_eager});
}
if (auto tf_op_stat = event.GetStat(StatType::kTfOp);
tf_op_stat.has_value()) {
++tf_op_id;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(tf_op_stat->StrOrRefValue());
tsl::profiler::Timespan span = event.GetTimespan();
tf_activities->push_back(
{span.begin_ps(), tf_op_id, kTfOpBegin, tf_op, false});
tf_activities->push_back(
{span.end_ps(), tf_op_id, kTfOpEnd, tf_op, false});
}
});
}
}
absl::flat_hash_map<int64_t, tsl::profiler::TfOp>
CollectTfOpsFromHostThreadsXPlane(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops;
for (const auto& id_metadata : host_trace.event_metadata()) {
const XEventMetadata& metadata = id_metadata.second;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(metadata.name());
if (tf_op.category != tsl::profiler::Category::kUnknown) {
tf_ops.try_emplace(metadata.id(), tf_op);
}
}
return tf_ops;
}
TfMetricsDbData ConvertHostThreadsXLineToTfMetricsDbData(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops) {
TfMetricsDbData tf_metrics_db_data;
std::vector<TfActivity> tf_activities;
CollectTfActivities(line, tf_ops, &tf_activities);
ProcessTfActivities(&tf_activities, &tf_metrics_db_data);
return tf_metrics_db_data;
}
void ConsumeTfMetricsDbData(TfMetricsDbData src, OpMetricsDbCombiner* dst) {
AddIdleOp(src.tf_metrics_db);
dst->Combine(src.tf_metrics_db, false);
src.tf_metrics_db.Clear();
}
OpMetricsDb ConvertHostThreadsXPlaneToOpMetricsDb(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops =
CollectTfOpsFromHostThreadsXPlane(host_trace);
OpMetricsDb result;
OpMetricsDbCombiner combiner(&result);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&tf_ops, &combiner](const XLineVisitor& line) {
ConsumeTfMetricsDbData(
ConvertHostThreadsXLineToTfMetricsDbData(line, tf_ops), &combiner);
});
return result;
}
OpMetricsDb ConvertTpuDeviceTraceXPlaneToOpMetricsDb(
const XPlane& device_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
using OpMetricBySymbol =
absl::flat_hash_map<uint64_t, OpMetrics>;
XEventsOpMetricsDbBuilder builder;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent(
[&](const XEventVisitor& event) { builder.AddOpMetric(event); });
});
return builder.Finalize(
plane.GetStat(StatType::kTotalProfileDurationPs)->IntOrUintValue());
}
OpMetricsDb ConvertDeviceTraceXPlaneToOpMetricsDb(const XPlane& device_trace) {
OpMetricsDb result;
DeviceOpMetricsDbBuilder device_op_metrics_db_builder(&result);
int64_t first_op_offset_ps = kint64max;
int64_t last_op_offset_ps = 0;
TfOpRoofLineCostEstimator op_level_cost_estimator;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) return;
line.ForEachEvent([&](const XEventVisitor& event) {
first_op_offset_ps = std::min(first_op_offset_ps, event.OffsetPs());
last_op_offset_ps = std::max(last_op_offset_ps, event.EndOffsetPs());
absl::string_view tf_op_full_name;
bool is_eager = false;
int64_t program_id = 0;
absl::string_view deduplicated_name = "";
event.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type() == StatType::kTfOp) {
tf_op_full_name = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kIsEager) {
is_eager = stat.IntValue();
} else if (stat.Type() == StatType::kProgramId) {
program_id = stat.IntOrUintValue();
} else if (stat.Type() == StatType::kDeduplicatedName) {
deduplicated_name = stat.StrOrRefValue();
}
});
if (tf_op_full_name.empty()) return;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(tf_op_full_name);
TfOpRoofLineCostEstimator::OpRoofLineStats costs;
if (tf_op.category != tsl::profiler::Category::kUnknown) {
costs = op_level_cost_estimator.Predict(event);
}
device_op_metrics_db_builder.EnterOp(
program_id,
absl::StrCat(tf_op.name, "/", event.Name()),
tf_op.type,
tf_op_full_name, deduplicated_name, is_eager,
1, event.DurationPs(),
0, costs.flops, costs.bytes_accessed);
});
});
SetTotalTimePs(
result, last_op_offset_ps ? last_op_offset_ps - first_op_offset_ps : 0);
AddIdleOp(result);
return result;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
#if defined(PLATFORM_GOOGLE)
using ::testing::EqualsProto;
#endif
void AddTensorFlowTpuOpEvent(std::string&& name, std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
std::string&& hlo_category, uint64 flops,
uint64 bytes_accessed, int64_t occurences,
int64_t self_duration, int64_t program_id,
int64_t symbol_id, XPlaneBuilder* plane,
XLineBuilder* line) {
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
event.SetNumOccurrences(occurences);
XStatsBuilder<XEventMetadata> event_metadata(
plane->GetOrCreateEventMetadata(name), plane);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
tf_op_fullname);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloCategory)),
hlo_category);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kFlops)), flops);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kSymbolId)),
symbol_id);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void AddTensorFlowOpEvent(std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
bool on_device, absl::string_view kernel_name,
XPlaneBuilder* plane, XLineBuilder* line) {
absl::string_view name = on_device ? kernel_name : tf_op_fullname;
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
if (!on_device) return;
event.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*plane->GetOrCreateStatMetadata(std::move(tf_op_fullname)));
}
void AddXlaCpuOpEvent(std::string&& hlo_op_name, std::string&& tf_op,
int64_t start_timestamp_ns, int64_t duration_ns,
XPlaneBuilder* plane, XLineBuilder* line) {
XEventBuilder event =
line->AddEvent(*plane->GetOrCreateEventMetadata(hlo_op_name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
event.ParseAndAddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)), tf_op);
}
TEST(ConvertXPlaneToOpMetricsDb, HostOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
constexpr int64_t kTfOp1StartNs = 100000;
constexpr int64_t kTfOp1DurationNs = 8000;
constexpr int64_t kTfOp2StartNs = 110000;
constexpr int64_t kTfOp2DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateHostXPlane(&xspace);
XPlaneBuilder host_plane(xplane);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread1);
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kTfOp2StartNs,
kTfOp2DurationNs, false,
"", &host_plane, &thread2);
OpMetricsDb op_metrics = ConvertHostThreadsXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(3, op_metrics.metrics_db_size());
uint64 total_op_duration =
tsl::profiler::NanoToPico(kTfOp1DurationNs * 2 + kTfOp2DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kTfOp2StartNs - kTfOp1StartNs + kTfOp2DurationNs + kTfOp1DurationNs);
EXPECT_EQ(total_duration, op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(kTfOp1, op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp1DurationNs) * 2, op_1.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(1);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(2000), idle.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(2);
EXPECT_EQ(kTfOp2, op_2.name());
EXPECT_EQ(kTfOp2, op_2.category());
EXPECT_EQ(1, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp2DurationNs), op_2.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, DeviceOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
static constexpr char kKernel1[] = "kernel1";
static constexpr char kKernel2[] = "kernel2";
static constexpr char kKernel3[] = "kernel3";
constexpr int64_t kKernel1StartNs = 100000;
constexpr int64_t kKernel1DurationNs = 8000;
constexpr int64_t kKernel2StartNs = 110000;
constexpr int64_t kKernel2DurationNs = 10000;
constexpr int64_t kKernel3StartNs = 120000;
constexpr int64_t kKernel3DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateGpuXPlane(&xspace, 0);
XPlaneBuilder device_plane(xplane);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream1);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream1);
XLineBuilder stream2 = device_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kKernel3StartNs,
kKernel3DurationNs, true, kKernel3,
&device_plane, &stream2);
OpMetricsDb op_metrics = ConvertDeviceTraceXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(4, op_metrics.metrics_db_size());
uint64 total_op_duration = tsl::profiler::NanoToPico(
kKernel1DurationNs * 2 + kKernel2DurationNs * 2 + kKernel3DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kKernel3StartNs + kKernel3DurationNs - kKernel1StartNs);
EXPECT_EQ(std::max(total_duration, total_op_duration),
op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel1), op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel1DurationNs) * 2, op_1.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(1);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel2), op_2.name());
EXPECT_EQ(kTfOp1, op_2.category());
EXPECT_EQ(2, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel2DurationNs) * 2, op_2.time_ps());
const OpMetrics& op_3 = op_metrics.metrics_db().at(2);
EXPECT_EQ(absl::StrCat(kTfOp2, "/", kKernel3), op_3.name());
EXPECT_EQ(kTfOp2, op_3.category());
EXPECT_EQ(1, op_3.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel3DurationNs), op_3.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(3);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(0), idle.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, TpuDeviceOpMetricsDb) {
XSpace xspace;
XPlane* xplane = GetOrCreateTpuXPlane(&xspace, 0, "TPU V4",
0,
0);
XPlaneBuilder device_plane(xplane);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kTotalProfileDurationPs)),
1000);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowTpuOpEvent("MatMul", "while:MatMul", 0, 10, "MatMul", 34, 45, 2,
5, 1, 1, &device_plane, &stream1);
OpMetricsDb op_metrics = ConvertTpuDeviceTraceXPlaneToOpMetricsDb(*xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(op_metrics,
EqualsProto(R"pb(metrics_db {
hlo_module_id: 1
self_time_ps: 10000
flops: 68
occurrences: 2
name: "MatMul"
time_ps: 10000
category: "MatMul"
provenance: "while:MatMul"
min_time_ps: 10000
}
metrics_db { name: "IDLE" category: "IDLE" }
total_time_ps: 10000
total_op_time_ps: 10000
)pb"));
#endif
}
TEST(ConvertXPlaneToOpMetricsDb, HostXPlaneWithXlaOps) {
XPlane xplane;
XPlaneBuilder plane(&xplane);
XLineBuilder line = plane.GetOrCreateLine(10);
AddXlaCpuOpEvent("xla_op", "tf_op", 100000, 8000, &plane, &line);
AddXlaCpuOpEvent("xla_op2", "tf_op2", 110000, 10000, &plane, &line);
OpMetricsDb op_metrics = ConvertHostThreadsXPlaneToOpMetricsDb(xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(op_metrics, EqualsProto(R"pb(metrics_db {
self_time_ps: 8000000
occurrences: 1
name: "tf_op"
time_ps: 8000000
}
metrics_db {
self_time_ps: 10000000
occurrences: 1
name: "tf_op2"
time_ps: 10000000
}
metrics_db {
self_time_ps: 2000000
name: "IDLE"
time_ps: 2000000
category: "IDLE"
}
total_time_ps: 20000000
total_op_time_ps: 18000000
precision_stats {}
)pb"));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_metrics_db.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_metrics_db_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
404138cb-ac73-4e58-893c-69a8a5dcda55 | cpp | tensorflow/tensorflow | dcn_utils | tensorflow/core/profiler/convert/dcn_utils.cc | tensorflow/core/profiler/convert/dcn_utils_test.cc | #include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::MicroToNano;
using tsl::profiler::StatType;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XStatVisitor;
DcnMessage CreateDcnMessageFromStats(const XEventVisitor& event_visitor) {
DcnMessage dcn_message;
event_visitor.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type()) return;
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kDcnLabel: {
dcn_message.collective_name = stat.ToString();
break;
}
case StatType::kDcnSourceSliceId: {
dcn_message.slice_src = stat.IntValue();
break;
}
case StatType::kDcnSourcePerSliceDeviceId: {
dcn_message.tpu_src = stat.IntValue();
break;
}
case StatType::kDcnDestinationSliceId: {
dcn_message.slice_dst = stat.IntValue();
break;
}
case StatType::kDcnDestinationPerSliceDeviceId: {
dcn_message.tpu_dst = stat.IntValue();
break;
}
case StatType::kDcnChunk: {
dcn_message.chunk_id = stat.IntValue();
break;
}
case StatType::kDcnLoopIndex: {
dcn_message.loop_index_id = stat.IntValue();
break;
}
case StatType::kPayloadSizeBytes: {
dcn_message.size_bytes = stat.IntValue();
break;
}
case StatType::kDuration: {
dcn_message.duration_us = stat.IntOrUintValue();
dcn_message.start_timestamp_ns =
event_visitor.TimestampNs() - MicroToNano(dcn_message.duration_us);
dcn_message.end_timestamp_ns = event_visitor.TimestampNs();
break;
}
default:
break;
}
});
return dcn_message;
}
void SetMessageValidity(DcnMessage& dcn_message) {
if (dcn_message.collective_name.empty() || dcn_message.slice_src == -1 ||
dcn_message.tpu_src == -1 || dcn_message.slice_dst == -1 ||
dcn_message.tpu_dst == -1 || dcn_message.size_bytes == -1) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_BAD_KEY;
} else if (dcn_message.duration_us == 0) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_CLOCK_SKEW;
} else if (dcn_message.slice_src == dcn_message.slice_dst) {
dcn_message.validity_info = DCN_MESSAGE_VALID_LOOPBACK;
} else {
dcn_message.validity_info = DCN_MESSAGE_VALID;
}
}
}
DcnMessage GetDcnMessageFromXEvent(const XEventVisitor& event_visitor) {
DcnMessage dcn_message = CreateDcnMessageFromStats(event_visitor);
SetMessageValidity(dcn_message);
return dcn_message;
}
bool IsDcnEvent(const tsl::profiler::XEventVisitor& event) {
return absl::StartsWith(event.Name(), "MegaScale:");
}
}
} | #include "tensorflow/core/profiler/convert/dcn_utils.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
using tsl::profiler::XPlaneVisitor;
void PopulateXPlane(XPlane &xplane, absl::string_view event_name, int offset,
absl::string_view label, int64_t source_slice_id,
int64_t source_per_slice_device_id,
int64_t destination_slice_id,
int64_t destination_per_slice_device_id, int64_t chunk,
int64_t loop_index, int64_t payload_size,
int64_t duration) {
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata *event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name(std::string(event_name));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata);
event_builder.SetOffsetNs(offset);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), label);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"),
source_slice_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
source_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"),
destination_slice_id);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
destination_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), chunk);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), loop_index);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), duration);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"),
payload_size);
}
TEST(DcnUtilsTest, IsDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 0, "test", 0, 0, 0, 0, 0, 0, 0,
0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_TRUE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, IsNotDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, "test", 0, "test", 0, 0, 0, 0, 0, 0, 0, 0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_FALSE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 100000, "all-reduce.273_312", 2,
3, 1, 3, 0, 24, 32768, 50);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEventLoopBack) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 5000000, "all-gather.1234", 2, 3,
2, 1, 4, 40, 1000, 1000);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96adaac4-6e20-4641-ba4a-c5b9cc5e9ff3 | cpp | tensorflow/tensorflow | dcn_analysis | tensorflow/core/profiler/convert/dcn_analysis.cc | tensorflow/core/profiler/convert/dcn_analysis_test.cc | #include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
namespace tensorflow {
namespace profiler {
using tsl::profiler::kMaxCollectivesToDisplay;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::LineIdType;
using tsl::profiler::MicroToNano;
void DcnBurstManager::ResetBurstState() {
active_burst_messages_ = 0;
straggler_idx_ = 0;
active_burst_.num_messages = 0;
active_burst_.max_overlapping_messages = 0;
active_burst_.start_timestamp_ns = 0;
active_burst_.end_timestamp_ns = 0;
active_burst_.burst_size_bytes = 0;
}
void DcnBurstManager::CreateBursts(const TimestampMap& tm_events) {
ResetBurstState();
for (const auto& tm_event : tm_events) {
if (active_burst_messages_ < 0) {
LOG_FIRST_N(WARNING, 10)
<< "Negative messages in burst, bursts will be incorrect.";
}
if (active_burst_messages_ == 0) {
active_burst_.start_timestamp_ns = tm_event.first;
}
active_burst_messages_ += tm_event.second->message_diff;
if (tm_event.second->message_diff > 0) {
active_burst_.num_messages += tm_event.second->message_diff;
active_burst_.burst_size_bytes += tm_event.second->size_diff;
} else {
Straggler straggler = {tm_event.second->duration_ns,
tm_event.second->timestamp_ns,
tm_event.second->size_diff * (-1),
tm_event.second->src_slice_id};
active_burst_.stragglers[straggler_idx_] = straggler;
straggler_idx_ = (straggler_idx_ + 1) % kMaxStragglersPerBurst;
}
active_burst_.max_overlapping_messages =
std::max(active_burst_.max_overlapping_messages,
static_cast<uint64_t>(active_burst_messages_));
if (active_burst_messages_ == 0) {
active_burst_.end_timestamp_ns = tm_event.first;
total_latency_ +=
(active_burst_.end_timestamp_ns - active_burst_.start_timestamp_ns);
bursts_.emplace_back(std::move(active_burst_));
ResetBurstState();
}
}
}
DcnEventsProcessor::DcnEventsProcessor(uint32_t num_tpu_tensor_cores,
bool is_megacore)
: num_tpu_tensor_cores_(num_tpu_tensor_cores), is_megacore_(is_megacore) {
registered_dcn_messages_.push_back(kMegaScaleDcnReceive);
tpu_collective_ts_map_.resize(num_tpu_tensor_cores_);
tpu_collective_bursts_.resize(num_tpu_tensor_cores_);
}
void DcnEventsProcessor::SetupMessageInfo(const XPlaneVisitor& plane) {
plane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
if (std::find(registered_dcn_messages_.begin(),
registered_dcn_messages_.end(),
event_metadata.Name()) != registered_dcn_messages_.end()) {
megascale_msg_[event_metadata.Name()] = event_metadata.Id();
}
});
}
uint32_t DcnEventsProcessor::FindTpuIdx(int tpu) {
uint32_t num_tpus = num_tpu_tensor_cores_;
if (is_megacore_) {
num_tpus /= 2;
}
uint32_t tpu_idx = tpu % num_tpus;
if (is_megacore_) {
tpu_idx = tpu_idx * 2;
}
return tpu_idx;
}
void DcnEventsProcessor::GenerateTimestampEvents(
const DcnMessage& dcn_message) {
std::shared_ptr<TimestampEvent> start_event(
new TimestampEvent{dcn_message.start_timestamp_ns, 0, 1,
dcn_message.size_bytes, dcn_message.slice_src});
std::shared_ptr<TimestampEvent> end_event(new TimestampEvent{
dcn_message.end_timestamp_ns,
static_cast<uint64_t>(MicroToNano(dcn_message.duration_us)), -1,
-1 * dcn_message.size_bytes, dcn_message.slice_src});
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> start_event_entry =
std::make_pair(dcn_message.start_timestamp_ns, start_event);
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> end_event_entry =
std::make_pair(dcn_message.end_timestamp_ns, end_event);
host_ts_map_.insert(start_event_entry);
host_ts_map_.insert(end_event_entry);
const std::string& collective_name = dcn_message.collective_name;
uint32_t tpu_idx = FindTpuIdx(dcn_message.tpu_dst);
auto& m = tpu_collective_ts_map_[tpu_idx][collective_name];
m.insert(start_event_entry);
m.insert(end_event_entry);
}
void DcnEventsProcessor::PrintTimestampEvents() {
for (const auto& host_ts : host_ts_map_) {
LOG(INFO) << host_ts.first << ": " << host_ts.second->timestamp_ns << " "
<< host_ts.second->duration_ns << " "
<< host_ts.second->message_diff << " "
<< host_ts.second->size_diff << " "
<< host_ts.second->src_slice_id;
}
for (uint32_t tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
LOG(INFO) << "TPU: " << tpu_idx;
for (const auto& col_id : tpu_collective_ts_map_[tpu_idx]) {
LOG(INFO) << col_id.first;
for (const auto& tpu_col_ts :
tpu_collective_ts_map_[tpu_idx][col_id.first]) {
LOG(INFO) << tpu_col_ts.first << ": " << tpu_col_ts.second->timestamp_ns
<< " " << tpu_col_ts.second->duration_ns << " "
<< tpu_col_ts.second->message_diff << " "
<< tpu_col_ts.second->size_diff << " "
<< tpu_col_ts.second->src_slice_id;
}
}
}
}
uint32_t DcnEventsProcessor::NumCollectivesQualified(
const std::vector<uint64_t>& latencies) {
uint32_t num_collectives_qualified = 0;
uint32_t max_collectives = kMaxCollectivesToDisplay - 1;
for (const auto& lat : latencies) {
if (lat < host_dcn_bursts_.TotalLatency() * 0.05) {
return num_collectives_qualified;
} else if (lat < host_dcn_bursts_.TotalLatency() * 0.2 &&
num_collectives_qualified >= (max_collectives / 2)) {
return num_collectives_qualified;
} else if (num_collectives_qualified >= max_collectives) {
return num_collectives_qualified;
} else {
num_collectives_qualified++;
}
}
return latencies.size();
}
void DcnEventsProcessor::QualifyCollectives() {
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
std::vector<uint64_t> latency_to_order;
latency_to_order.reserve(tpu_collective_bursts_[tpu_idx].size());
for (const auto& col_info : tpu_collective_bursts_[tpu_idx]) {
latency_to_order.emplace_back(col_info.second.TotalLatency());
}
std::sort(latency_to_order.begin(), latency_to_order.end(),
std::greater<uint64_t>());
uint32_t num_collectives_qualified =
NumCollectivesQualified(latency_to_order);
if (num_collectives_qualified > 0) {
uint32_t min_latency_to_qualify =
latency_to_order[num_collectives_qualified - 1];
uint32_t col_num = 0;
for (auto& col_info : tpu_collective_bursts_[tpu_idx]) {
if (col_info.second.TotalLatency() >= min_latency_to_qualify) {
col_info.second.SetToDisplay(true);
if (++col_num == kMaxCollectivesToDisplay - 1) break;
}
}
}
}
}
void DcnEventsProcessor::GenerateBursts() {
host_dcn_bursts_.CreateBursts(host_ts_map_);
host_dcn_bursts_.SetToDisplay(true);
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
for (const auto& col_info : tpu_collective_ts_map_[tpu_idx]) {
tpu_collective_bursts_[tpu_idx][col_info.first].CreateBursts(
tpu_collective_ts_map_[tpu_idx][col_info.first]);
}
}
QualifyCollectives();
}
void DcnEventsProcessor::ProcessReceiveMessages(const XPlaneVisitor& plane) {
plane.ForEachLine([&](const XLineVisitor& line) {
uint32_t recv_msg_id = megascale_msg_[kMegaScaleDcnReceive];
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Id() == recv_msg_id) {
DcnMessage dcn_message = GetDcnMessageFromXEvent(event);
if (dcn_message.validity_info == DCN_MESSAGE_VALID) {
GenerateTimestampEvents(dcn_message);
}
received_messages_.emplace_back(std::move(dcn_message));
}
});
});
GenerateBursts();
}
absl::string_view DcnEventsProcessor::GetBwInfo(bool is_per_tpu,
const DcnBurst& burst,
float& burst_mean_bw,
float& burst_bw_utilization) {
absl::string_view bw_level;
uint32_t bw_divider = 1;
burst_mean_bw = static_cast<float>(burst.burst_size_bytes) /
(burst.end_timestamp_ns - burst.start_timestamp_ns);
if (is_per_tpu) {
bw_divider = num_tpu_tensor_cores_;
if (is_megacore_) {
bw_divider /= 2;
}
}
if (burst_mean_bw < kLimitLowHostDcnBw / bw_divider) {
bw_level = "Low BW";
} else if (burst_mean_bw < kLimitMedHostDcnBw / bw_divider) {
bw_level = "Med BW";
} else {
bw_level = "High BW";
}
burst_bw_utilization = burst_mean_bw / (kMaxHostDcnBw / bw_divider);
return bw_level;
}
void DcnEventsProcessor::AddHostDcnTrafficToXPlane(XPlane* host_xplane) {
if (!host_dcn_bursts_.ToDisplay()) return;
XPlaneBuilder plane_builder(host_xplane);
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnHostTraffic);
line.SetNameIfEmpty("DCN Host Bandwidth");
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
for (const auto& host_burst : host_dcn_bursts_.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(false, host_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(host_burst.start_timestamp_ns);
event.SetDurationNs(host_burst.end_timestamp_ns -
host_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, host_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
host_burst.max_overlapping_messages);
uint32_t avg_message_size =
host_burst.burst_size_bytes / host_burst.num_messages;
event.AddStatValue(*avg_msg_size_stat_metadata, avg_message_size);
}
}
void DcnEventsProcessor::AddUnqualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnCollectiveTrafficMax);
line.SetNameIfEmpty("Remaining collectives");
line.SetTimestampNs(0);
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (col_item.second.ToDisplay()) continue;
for (const auto& col_burst : col_item.second.GetBursts()) {
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata(col_item.first);
uint32_t stragglers_processed = 0;
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& straggler : col_burst.stragglers) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns - 10000);
straggler_event.SetDurationNs(10000);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
if (++stragglers_processed >= col_burst.num_messages) break;
}
}
}
}
void DcnEventsProcessor::AddQualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
uint32_t total_collectives = 0;
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (!col_item.second.ToDisplay()) continue;
const std::string& col_name = col_item.first;
XLineBuilder line = plane_builder.GetOrCreateLine(
LineIdType::kDcnCollectiveTraffic + total_collectives++);
line.SetNameIfEmpty(col_name);
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
XStatMetadata* straggler_details_metadata =
plane_builder.GetOrCreateStatMetadata("Straggler info:");
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& col_burst : col_item.second.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(true, col_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(col_burst.start_timestamp_ns);
event.SetDurationNs(col_burst.end_timestamp_ns -
col_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, col_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
col_burst.max_overlapping_messages);
event.AddStatValue(*avg_msg_size_stat_metadata,
col_burst.burst_size_bytes / col_burst.num_messages);
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata("Straggler");
uint32_t stragglers_processed = 0;
std::string straggler_details = "Stragglers:\n";
for (const auto& straggler : col_burst.stragglers) {
if (straggler.end_timestamp_ns == col_burst.end_timestamp_ns) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns -
straggler.duration_ns);
straggler_event.SetDurationNs(straggler.duration_ns);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
}
straggler_details +=
" Src slice: " + std::to_string(straggler.src_slice_id) +
" -- Duration (ns): " + std::to_string(straggler.duration_ns) +
" -- [Send Timestamp, Recv Timestamp]: [" +
std::to_string(straggler.end_timestamp_ns - straggler.duration_ns) +
", " + std::to_string(straggler.end_timestamp_ns) + "]\n";
if (++stragglers_processed >= col_burst.num_messages) break;
}
event.AddStatValue(*straggler_details_metadata, straggler_details);
}
}
}
void DcnEventsProcessor::AddTpuCollectiveDcnTrafficToXPlane(
XPlane* device_xplane) {
XPlaneBuilder plane_builder(device_xplane);
auto tpu = tsl::profiler::GetTensorCoreId(plane_builder.Name());
if (!tpu.has_value()) return;
uint32_t tpu_idx = FindTpuIdx(tpu.value());
AddQualifiedCollectivesToXPlane(plane_builder, tpu_idx);
AddUnqualifiedCollectivesToXPlane(plane_builder, tpu_idx);
}
}
} | #include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
using tensorflow::profiler::DCN_MESSAGE_INVALID_BAD_KEY;
using tensorflow::profiler::DCN_MESSAGE_INVALID_CLOCK_SKEW;
using tensorflow::profiler::DCN_MESSAGE_VALID;
using tensorflow::profiler::DCN_MESSAGE_VALID_LOOPBACK;
using tensorflow::profiler::XEventBuilder;
using tensorflow::profiler::XEventMetadata;
using tensorflow::profiler::XLineBuilder;
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XPlaneBuilder;
using tensorflow::profiler::XPlaneVisitor;
using tensorflow::profiler::XSpace;
using ::testing::FieldsAre;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::kMegaScaleDcnSend;
TEST(DcnAnalysis, SetupMessageInfoTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder host_trace_builder(host_trace);
XEventMetadata *event_metadata_1 =
host_trace_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XEventMetadata *event_metadata_2 =
host_trace_builder.GetOrCreateEventMetadata(2);
event_metadata_2->set_name(std::string(kMegaScaleDcnSend));
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor( 4,
false);
dcn_events_processor.SetupMessageInfo(plane);
ASSERT_FALSE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnSend));
ASSERT_TRUE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnReceive));
ASSERT_FALSE(dcn_events_processor.HasDcnMessages("Another Message"));
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnReceive), 1);
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnSend),
std::nullopt);
}
TEST(DcnAnalysis, CreateMessageTestValidMessages) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder_0 = xplane_builder.GetOrCreateLine(0);
XLineBuilder xline_builder_1 = xplane_builder.GetOrCreateLine(1);
XEventBuilder event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(100000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 24);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 32768);
event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(175000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"super-collective.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 112);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 34);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1);
event_builder = xline_builder_1.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(150000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 9);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 0);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 75);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 10);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 3);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
EXPECT_THAT(dcn_events_processor.GetMessage(1),
FieldsAre("super-collective.1234",
112, 1, 34, 2,
125000, 175000, 50,
1, 4, 0,
DCN_MESSAGE_VALID));
EXPECT_THAT(
dcn_events_processor.GetMessage(2),
FieldsAre("super-collective",
9, 3, 0, 0,
75000, 150000,
75,
10, -1, -1,
DCN_MESSAGE_VALID));
TimestampMap host_ts_map = dcn_events_processor.HostTsMap();
ASSERT_EQ(host_ts_map.size(), 6);
for (const auto &ts_map_item : host_ts_map) {
ASSERT_EQ(ts_map_item.first, ts_map_item.second->timestamp_ns);
if (ts_map_item.first == 50000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 32768);
} else if (ts_map_item.first == 125000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 1);
} else if (ts_map_item.first == 75000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 10);
} else if (ts_map_item.first == 100000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -32768);
} else if (ts_map_item.first == 175000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -1);
} else if (ts_map_item.first == 150000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 75000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -10);
} else {
FAIL() << "Unexpected timestamp entry.";
}
}
const std::vector<DcnBurst> &host_bursts =
dcn_events_processor.GetHostBursts();
ASSERT_EQ(host_bursts.size(), 1);
ASSERT_EQ(host_bursts[0].num_messages, 3);
ASSERT_EQ(host_bursts[0].start_timestamp_ns, 50000);
ASSERT_EQ(host_bursts[0].end_timestamp_ns, 175000);
ASSERT_EQ(host_bursts[0].burst_size_bytes, 32779);
ASSERT_EQ(host_bursts[0].max_overlapping_messages, 2);
}
TEST(DcnAnalysis, CreateLoopBackMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(5000000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-gather.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 2);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 40);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 1000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1000);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 1);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
TEST(DcnAnalysis, CreateZeroDurationMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(20000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 25);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 512);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 1,
20000, 20000,
0,
512, 0, 25,
DCN_MESSAGE_INVALID_CLOCK_SKEW));
}
TEST(DcnAnalysis, CreateMissingKeyTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(50000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 10);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 100);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("",
-1, -1, -1, -1,
40000, 50000,
10,
100, -1, -1,
DCN_MESSAGE_INVALID_BAD_KEY));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
732385c8-9ae6-42da-980c-ef56b8243f9a | cpp | tensorflow/tensorflow | xplane_to_op_stats | tensorflow/core/profiler/convert/xplane_to_op_stats.cc | tensorflow/core/profiler/convert/xplane_to_op_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_kernel_stats_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/utils/device_caps_utils.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_map.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::FindPlanesWithPrefix;
using tsl::profiler::FindTensorCorePlanes;
std::string Hostname(const XSpace& space) {
if (space.hostnames().empty()) return "localhost";
DCHECK_EQ(space.hostnames_size(), 1);
const std::string& hostname = space.hostnames(0);
return hostname;
}
}
PerfEnv MakePerfEnv(double peak_tera_flops_per_second,
std::vector<double> peak_bws) {
PerfEnv result;
result.set_peak_tera_flops_per_second(peak_tera_flops_per_second);
for (const auto bw : peak_bws) {
result.add_peak_bws_giga_bytes_per_second(bw);
}
result.set_ridge_point(tsl::profiler::TeraToGiga(peak_tera_flops_per_second) /
peak_bws[MemBwType::MEM_BW_TYPE_HBM_RW]);
return result;
}
PerfEnv GetPerfEnvFromXPlane(const XPlane& device_plane) {
DeviceCapabilities cap = GetDeviceCaps(device_plane);
if (!absl::StartsWith(device_plane.name(), kTpuPlanePrefix)) {
double peak_tera_flops_per_second =
cap.num_cores() *
tsl::profiler::GigaToTera(GetFlopMaxThroughputPerSM(cap));
double hbm_bw_giga_bytes_per_second =
tsl::profiler::UniToGiga(cap.memory_bandwidth());
double shm_giga_bytes_per_second =
cap.num_cores() *
tsl::profiler::UniToGiga(GetSharedMemoryBandwidthPerSM(cap));
return MakePerfEnv(peak_tera_flops_per_second,
{hbm_bw_giga_bytes_per_second,
shm_giga_bytes_per_second,
shm_giga_bytes_per_second});
} else {
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(&device_plane);
auto peak_tera_flops_per_second =
visitor.GetStat(StatType::kDevCapPeakTeraflopsPerSecond);
auto peak_tera_flops_per_second_val =
peak_tera_flops_per_second.has_value()
? peak_tera_flops_per_second->DoubleValue()
: 0.0;
auto peak_hbm_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakHbmBwGigabytesPerSecond);
auto peak_hbm_bw_giga_bytes_per_second_val =
peak_hbm_bw_giga_bytes_per_second.has_value()
? peak_hbm_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_rd_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramRdBwGigabytesPerSecond);
auto peak_sram_rd_bw_giga_bytes_per_second_val =
peak_sram_rd_bw_giga_bytes_per_second.has_value()
? peak_sram_rd_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
auto peak_sram_wr_bw_giga_bytes_per_second =
visitor.GetStat(StatType::kDevCapPeakSramWrBwGigabytesPerSecond);
auto peak_sram_wr_bw_giga_bytes_per_second_val =
peak_sram_wr_bw_giga_bytes_per_second.has_value()
? peak_sram_wr_bw_giga_bytes_per_second->DoubleValue()
: 0.0;
return MakePerfEnv(peak_tera_flops_per_second_val,
{peak_hbm_bw_giga_bytes_per_second_val,
peak_sram_rd_bw_giga_bytes_per_second_val,
peak_sram_wr_bw_giga_bytes_per_second_val});
}
}
void SetRunEnvironment(const XSpace& space, RunEnvironment* env) {
env->set_host_count(1);
env->set_task_count(1);
env->mutable_hostnames()->insert({Hostname(space), true});
std::vector<const XPlane*> gpu_planes =
FindPlanesWithPrefix(space, kGpuPlanePrefix);
if (!gpu_planes.empty()) {
absl::string_view gpu_model =
GpuModelName(GetDeviceCaps(*gpu_planes.front()));
if (!gpu_model.empty()) {
env->set_device_type(std::string(gpu_model));
} else {
env->set_device_type("GPU");
}
env->set_device_core_count(gpu_planes.size());
} else if (std::vector<const XPlane*> tpu_planes =
FindTensorCorePlanes(space);
!tpu_planes.empty()) {
XPlaneVisitor visitor =
tsl::profiler::CreateTfXPlaneVisitor(tpu_planes.at(0));
auto xstat = visitor.GetStat(StatType::kDeviceTypeString);
if (xstat.has_value()) {
env->set_device_type(std::string(xstat->StrOrRefValue()));
}
env->set_device_core_count(tpu_planes.size());
} else {
env->set_device_type("CPU");
env->set_device_core_count(0);
}
}
void PropagateXSpaceDiagnosticsToOpStats(const XSpace& space,
OpStats* op_stats) {
if (!space.errors().empty()) {
absl::flat_hash_set<std::string> unique_errors;
unique_errors.insert(space.errors().begin(), space.errors().end());
*op_stats->mutable_diagnostics()->mutable_errors() = {unique_errors.begin(),
unique_errors.end()};
}
if (!space.warnings().empty()) {
absl::flat_hash_set<std::string> unique_warnings;
unique_warnings.insert(space.warnings().begin(), space.warnings().end());
*op_stats->mutable_diagnostics()->mutable_warnings() = {
unique_warnings.begin(), unique_warnings.end()};
}
}
void SetProgramIdToNameMap(const HloProtoMap& hlo_proto_map,
tensorflow::profiler::OpStats& op_stats) {
auto& program_id_to_name_map = *op_stats.mutable_program_id_to_name_map();
for (const auto& [program_id, hlo_proto] : hlo_proto_map) {
program_id_to_name_map[program_id] = hlo_proto->hlo_module().name();
}
}
OpStats ConvertXSpaceToOpStats(const XSpace& space,
const OpStatsOptions& options) {
OpStats op_stats;
StepEvents step_events;
PropagateXSpaceDiagnosticsToOpStats(space, &op_stats);
OpMetricsDbCombiner op_metrics_db_combiner(
op_stats.mutable_device_op_metrics_db());
SetRunEnvironment(space, op_stats.mutable_run_environment());
KernelReportMap reports;
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(space, kTpuPlanePrefix);
const bool is_gpu = device_planes.empty();
if (is_gpu) {
device_planes = FindPlanesWithPrefix(space, kGpuPlanePrefix);
}
const bool is_tpu = !is_gpu;
for (const XPlane* device_trace : device_planes) {
XPlane aggregated_xplane;
bool use_aggregated_xplane = false;
if (options.generate_op_metrics_db) {
if (!op_stats.has_perf_env()) {
*op_stats.mutable_perf_env() = GetPerfEnvFromXPlane(*device_trace);
}
if (!is_tpu) {
OpMetricsDb device_op_metrics_db =
ConvertDeviceTraceXPlaneToOpMetricsDb(*device_trace);
op_metrics_db_combiner.Combine(device_op_metrics_db);
} else {
AggregateXPlane(*device_trace, aggregated_xplane);
use_aggregated_xplane = true;
OpMetricsDb device_op_metrics_db =
ConvertTpuDeviceTraceXPlaneToOpMetricsDb(aggregated_xplane);
op_metrics_db_combiner.Combine(device_op_metrics_db);
}
}
if (options.generate_step_db) {
StepEvents device_step_events = ConvertDeviceTraceXPlaneToStepEvents(
use_aggregated_xplane ? aggregated_xplane : *device_trace);
if (is_tpu) {
IntersectCombineStepEvents(device_step_events, &step_events);
} else {
UnionCombineStepEvents(device_step_events, &step_events);
}
}
if (options.generate_kernel_stats_db) {
ConvertDeviceTraceXPlaneToKernelReports(*device_trace,
{}, &reports);
}
}
if (options.generate_kernel_stats_db) {
CopyTopKDurationKernelReportsToDb(reports,
op_stats.mutable_kernel_stats_db());
}
bool has_device = !device_planes.empty();
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
if (options.generate_op_metrics_db) {
*op_stats.mutable_host_op_metrics_db() =
ConvertHostThreadsXPlaneToOpMetricsDb(*host_plane);
}
if (options.generate_step_db && !has_device) {
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, nullptr);
UnionCombineStepEvents(host_step_events, &step_events);
}
XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
auto stat = visitor.GetStat(StatType::kMatrixUnitUtilizationPercent);
if (stat.has_value()) {
op_stats.mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(stat->DoubleValue());
}
}
if (options.generate_step_db) {
if (is_tpu) {
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, false, step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(step_events);
} else {
StepEvents nonoverlapped_step_events =
ToNonOverlappedStepEvents(step_events);
*op_stats.mutable_step_db() = ConvertStepEventsToStepDb(
has_device, options.maybe_drop_incomplete_steps,
nonoverlapped_step_events);
*op_stats.mutable_device_op_metrics_db()->mutable_precision_stats() =
ComputePrecisionStats(nonoverlapped_step_events);
}
}
if (!is_tpu) {
CoreDetails& details =
(*op_stats.mutable_core_id_to_details())[kDefaultGpuLocalCoreId];
details.set_hostname(Hostname(space));
}
HloProtoMap hlo_proto_map;
hlo_proto_map.AddHloProtosFromXSpace(space);
SetProgramIdToNameMap(hlo_proto_map, op_stats);
return op_stats;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/multi_xplanes_to_op_stats.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/step_events_to_steps_db.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
TEST(ConvertXPlaneToOpStats, GpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(125.34, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(139.26, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, GpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane1.AddStatValue(*device_plane1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
XPlaneBuilder device_plane2(
GetOrCreateGpuXPlane(space.get(), 1));
device_plane2.AddStatValue(*device_plane2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kDevVendor)),
kDeviceVendorNvidia);
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("Nvidia GPU", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
TEST(ConvertXPlaneToOpStats, GpuStepDbTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
auto space = std::make_unique<XSpace>();
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(space.get()));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kCorrelationId}});
XPlaneBuilder device_plane_builder(
GetOrCreateGpuXPlane(space.get(), 0));
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kCorrelationId}});
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 0);
PrecisionStats precision_stats =
op_stats.device_op_metrics_db().precision_stats();
EXPECT_EQ(precision_stats.compute_16bit_ps(), 0);
EXPECT_EQ(precision_stats.compute_32bit_ps(), 40);
}
TEST(ConvertXPlaneToOpStats, PropagateAndDedupErrors) {
XSpace space;
static constexpr char kError[] = "host: error";
*space.add_errors() = kError;
*space.add_errors() = kError;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(1, op_stats.diagnostics().errors_size());
EXPECT_EQ(kError, op_stats.diagnostics().errors(0));
}
TEST(ConvertXPlaneToOpStats, Hostnames) {
XSpace space;
static constexpr char kHost[] = "host1";
*space.add_hostnames() = kHost;
OpStats op_stats = ConvertXSpaceToOpStats(space, OpStatsOptions());
EXPECT_EQ(
kHost,
op_stats.core_id_to_details().at(kDefaultGpuLocalCoreId).hostname());
}
void BuildXSpaceForTest(XSpace& xspace, absl::string_view hostname) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 456;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&xspace));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kStepId}});
auto executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &executor_thread, "aaa:bbb", 30, 70);
xspace.add_hostnames(std::string(hostname));
}
TEST(ConvertXPlaneToOpStats, TestConvertMultiXSpacesToCombinedOpStats) {
static constexpr char kHost1[] = "host1";
static constexpr char kHost2[] = "host2";
auto xspace1 = std::make_unique<XSpace>();
auto xspace2 = std::make_unique<XSpace>();
BuildXSpaceForTest(*xspace1, kHost1);
BuildXSpaceForTest(*xspace2, kHost2);
std::vector<std::string> xspace_paths;
xspace_paths.push_back("host1.pb");
xspace_paths.push_back("host2.pb");
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace1));
xspaces.push_back(std::move(xspace2));
auto session_snapshot_or =
SessionSnapshot::Create(std::move(xspace_paths), std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats combined_op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &combined_op_stats))
<< "Failed to convert multi XSpace to OpStats";
ASSERT_EQ(combined_op_stats.host_op_metrics_db().metrics_db_size(), 2);
const auto& metric = combined_op_stats.host_op_metrics_db().metrics_db(1);
EXPECT_EQ(metric.name(), "aaa");
EXPECT_EQ(metric.category(), "bbb");
EXPECT_EQ(metric.self_time_ps(), 140);
ASSERT_EQ(combined_op_stats.step_db().step_sequence_size(), 1);
ASSERT_EQ(
combined_op_stats.step_db().step_sequence(0).step_info_per_core_size(),
2);
const auto& step_info_per_core =
combined_op_stats.step_db().step_sequence(0).step_info_per_core();
EXPECT_TRUE(step_info_per_core.contains(kDefaultGpuLocalCoreId));
EXPECT_TRUE(step_info_per_core.contains(1000 + kDefaultGpuLocalCoreId));
const auto& core_details_map = combined_op_stats.core_id_to_details();
EXPECT_EQ(kHost1, core_details_map.at(kDefaultGpuLocalCoreId).hostname());
EXPECT_EQ(kHost2,
core_details_map.at(1000 + kDefaultGpuLocalCoreId).hostname());
}
TEST(ConvertXPlaneToOpStats, RunEnvironmentExtractedFromTpuPlane) {
XSpace xspace;
for (int i : {0, 1, 2, 3}) {
GetOrCreateTpuXPlane(&xspace, i, "TPU V4", 0, 0);
}
OpStats op_stats = ConvertXSpaceToOpStats(xspace, OpStatsOptions());
EXPECT_EQ(op_stats.run_environment().device_type(), "TPU V4");
EXPECT_EQ(op_stats.run_environment().device_core_count(), 4);
}
TEST(ConvertXPlaneToOpStats, TpuPerfEnv) {
auto space = std::make_unique<XSpace>();
constexpr double kMaxError = 0.01;
constexpr int kClockRateKHz = 1530000;
constexpr int kCoreCount = 80;
constexpr uint64 kMemoryBandwidthBytesPerSecond =
uint64{900} * 1000 * 1000 * 1000;
constexpr int kComputeCapMajor = 7;
constexpr int kComputeCapMinor = 0;
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 900.0;
XPlaneBuilder device_plane(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("clock_rate"),
kClockRateKHz);
device_plane.AddStatValue(*device_plane.GetOrCreateStatMetadata("core_count"),
kCoreCount);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("memory_bandwidth"),
kMemoryBandwidthBytesPerSecond);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_major"),
kComputeCapMajor);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata("compute_cap_minor"),
kComputeCapMinor);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
const PerfEnv& perf_env = op_stats.perf_env();
EXPECT_NEAR(141, perf_env.peak_tera_flops_per_second(), kMaxError);
EXPECT_NEAR(
900,
perf_env.peak_bws_giga_bytes_per_second(MemBwType::MEM_BW_TYPE_HBM_RW),
kMaxError);
EXPECT_NEAR(156.67, perf_env.ridge_point(), kMaxError);
}
TEST(ConvertXPlaneToOpStats, TpuRunEnvironment) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(
session_snapshot_or.value(), OpStatsOptions(), &op_stats));
const RunEnvironment& run_env = op_stats.run_environment();
EXPECT_EQ("TPU V4", run_env.device_type());
EXPECT_EQ(1, run_env.host_count());
EXPECT_EQ(1, run_env.task_count());
EXPECT_EQ(2, run_env.device_core_count());
}
TEST(ConvertXPlaneToOpStats, TpuDeviceTraceToStepDb) {
auto space = std::make_unique<XSpace>();
constexpr double kDevCapPeakTeraflopsPerSecond = 141.0;
constexpr double kDevCapPeakHbmBwGigabytesPerSecond = 1000.0;
XPlaneBuilder xplane_builder(GetOrCreateTpuXPlane(
space.get(), 0, "TPU V4",
kDevCapPeakTeraflopsPerSecond, kDevCapPeakHbmBwGigabytesPerSecond));
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("op_name");
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSymbolId)),
1);
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs)),
10);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"tf_op_name");
stats.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloCategory)),
"category");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(0);
event.SetDurationNs(10);
OpStatsOptions options;
options.generate_op_metrics_db = true;
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(space));
auto session_snapshot_or =
SessionSnapshot::Create({"test_xspace"}, std::move(xspaces));
TF_CHECK_OK(session_snapshot_or.status());
OpStats op_stats;
TF_CHECK_OK(ConvertMultiXSpacesToCombinedOpStats(session_snapshot_or.value(),
options, &op_stats));
EXPECT_THAT(op_stats.device_op_metrics_db().metrics_db(),
UnorderedElementsAre(Property(&OpMetrics::name, "op_name"),
Property(&OpMetrics::name, "IDLE")));
}
TEST(ConvertXPlaneToOpStats, TpuMultiDeviceStepDbTest) {
auto space = std::make_unique<XSpace>();
XPlaneBuilder device_plane_builder1(
GetOrCreateTpuXPlane(space.get(), 0, "TPU V4", 0, 0));
XPlaneBuilder device_plane_builder2(
GetOrCreateTpuXPlane(space.get(), 1, "TPU V4", 0, 0));
device_plane_builder1.ReserveLines(1);
device_plane_builder2.ReserveLines(1);
XStatMetadata* kGroupId1 = device_plane_builder1.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = device_plane_builder1.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventMetadata* event_metadata =
device_plane_builder1.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId1, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
line = device_plane_builder2.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XStatMetadata* kGroupId2 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata2 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 1");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId2, 1);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XStatMetadata* kGroupId3 = device_plane_builder2.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
XEventMetadata* event_metadata3 =
device_plane_builder2.GetOrCreateEventMetadata(2);
event_metadata3->set_name("Step 2");
XEventBuilder event_builder3 = line.AddEvent(*event_metadata3);
event_builder3.AddStatValue(*kGroupId3, 2);
event_builder3.SetDurationNs(100);
event_builder3.SetOffsetNs(300);
OpStatsOptions options;
options.generate_op_metrics_db = true;
options.generate_step_db = true;
OpStats op_stats = ConvertXSpaceToOpStats(*space, options);
const StepDatabaseResult& step_db = op_stats.step_db();
EXPECT_EQ(step_db.step_sequence_size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6f511ca0-49b7-4aac-b89e-b29692c7d1e9 | cpp | tensorflow/tensorflow | xplane_to_tf_functions | tensorflow/core/profiler/convert/xplane_to_tf_functions.cc | tensorflow/core/profiler/convert/xplane_to_tf_functions_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <algorithm>
#include <ostream>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
std::pair<TfFunctionExecutionMode, TfFunctionCompiler> Decode(
absl::string_view function_name, absl::string_view mode) {
if (mode == "eager") return {EAGER_MODE, INVALID_COMPILER};
if (mode == "concrete") return {CONCRETE_MODE, INVALID_COMPILER};
if (mode == "traced-xla") return {TRACED_MODE, XLA_COMPILER};
if (mode == "traced-nonXla") return {TRACED_MODE, OTHER_COMPILER};
if (mode == "notTraced-xla") return {NOT_TRACED_MODE, XLA_COMPILER};
if (mode == "notTraced-nonXla") return {NOT_TRACED_MODE, OTHER_COMPILER};
LOG(ERROR) << absl::StrCat("tf-function '", function_name,
"' has an unexpected execution mode '", mode, "'")
<< std::endl;
return {INVALID_MODE, INVALID_COMPILER};
DCHECK(false);
}
double ComputeExpensiveCallPercent(const TfFunction& tf_function) {
uint64 total_call_time_ps = 0;
uint64 expensive_call_time_ps = 0;
for (const auto& mode_metrics : tf_function.metrics()) {
const auto mode = mode_metrics.first;
const auto& metrics = mode_metrics.second;
total_call_time_ps += metrics.self_time_ps();
if (mode == TRACED_MODE || mode == EAGER_MODE) {
expensive_call_time_ps += metrics.self_time_ps();
}
}
return tsl::profiler::SafeDivide(100.0 * expensive_call_time_ps,
total_call_time_ps);
}
struct ActivationRecord {
std::string function_name;
tsl::profiler::Timespan timespan;
TfFunctionExecutionMode execution_mode;
TfFunctionCompiler compiler;
int64_t tracing_count;
uint64 children_duration_ps;
ActivationRecord()
: function_name(""),
execution_mode(INVALID_MODE),
compiler(INVALID_COMPILER),
tracing_count(0),
children_duration_ps(0) {}
ActivationRecord(absl::string_view name,
const tsl::profiler::Timespan& timespan,
TfFunctionExecutionMode exe_mode,
TfFunctionCompiler compiler, int64_t tracing_cnt)
: function_name(std::string(name)),
timespan(timespan),
execution_mode(exe_mode),
compiler(compiler),
tracing_count(tracing_cnt),
children_duration_ps(0) {}
std::string DebugString() const {
return absl::StrCat("{", function_name, ", ",
TfFunctionExecutionMode_Name(execution_mode), ", ",
TfFunctionCompiler_Name(compiler),
", tracing_count:", tracing_count,
", children_duration:", children_duration_ps,
" ps, timespan:", timespan.DebugString(), "}");
}
};
struct EntryOrExit {
bool is_entry;
int64_t index;
uint64 timestamp_ps;
EntryOrExit() : is_entry(false), index(-1), timestamp_ps(0) {}
EntryOrExit(bool is_entry, int64_t index, uint64 timestamp_ps)
: is_entry(is_entry), index(index), timestamp_ps(timestamp_ps) {}
std::string DebugString() const {
std::string entry_or_exit = is_entry ? "entry, " : "exit, ";
return absl::StrCat("{", entry_or_exit, "idx:", index,
", timestamp:", timestamp_ps, "}");
}
};
TfFunctionCompiler CombineCompilers(TfFunctionCompiler a,
TfFunctionCompiler b) {
if (a == INVALID_COMPILER) return b;
if (b == INVALID_COMPILER) return a;
if (a == b) return a;
return MIXED_COMPILER;
}
void CombineTfFunctionMetrics(const TfFunctionMetrics& src,
TfFunctionMetrics* dst) {
dst->set_count(src.count() + dst->count());
dst->set_self_time_ps(src.self_time_ps() + dst->self_time_ps());
}
void CombineTfFunction(const TfFunction& src, TfFunction* dst) {
dst->set_total_tracing_count(
std::max(src.total_tracing_count(), dst->total_tracing_count()));
dst->set_compiler(CombineCompilers(src.compiler(), dst->compiler()));
for (const auto& mode_metrics : src.metrics()) {
int32_t execution_mode = mode_metrics.first;
const TfFunctionMetrics& src_metrics = mode_metrics.second;
TfFunctionMetrics* dst_metrics =
gtl::FindOrNull(*dst->mutable_metrics(), execution_mode);
if (dst_metrics == nullptr) {
(*dst->mutable_metrics())[execution_mode] = src_metrics;
} else {
CombineTfFunctionMetrics(src_metrics, dst_metrics);
}
}
dst->set_expensive_call_percent(ComputeExpensiveCallPercent(*dst));
}
class TfFunctionExecutions {
public:
explicit TfFunctionExecutions(const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
absl::string_view mode;
int64_t tracing_count = 0;
event.ForEachStat([&mode, &tracing_count](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kTfFunctionCall:
mode = stat.StrOrRefValue();
break;
case StatType::kTfFunctionTracingCount:
tracing_count = stat.IntValue();
break;
}
});
if (mode.empty()) return;
int64_t index = activations_.size();
auto timespan = event.GetTimespan();
auto mode_compiler = Decode(event.Name(), mode);
ActivationRecord activation_record =
ActivationRecord(event.Name(), timespan, mode_compiler.first,
mode_compiler.second, tracing_count);
activations_.push_back(activation_record);
EntryOrExit entry_point =
EntryOrExit(true, index, timespan.begin_ps());
EntryOrExit exit_point =
EntryOrExit(false, index, timespan.end_ps());
points_.push_back(entry_point);
points_.push_back(exit_point);
});
auto ascending_in_timestamp = [](const EntryOrExit& a,
const EntryOrExit& b) {
return a.timestamp_ps < b.timestamp_ps;
};
absl::c_sort(points_, ascending_in_timestamp);
CalculateChildrenDurations();
}
std::string DebugString() const {
std::string result = "\nActivations:\n";
for (int i = 0, end = activations_.size(); i < end; i++) {
absl::StrAppend(&result, "[", i, "] ", activations_[i].DebugString(),
"\n");
}
absl::StrAppend(&result, "tf-function Entry/Exit Points:\n");
for (const auto& pt : points_) {
absl::StrAppend(&result, pt.DebugString(), "\n");
}
return result;
}
TfFunctionDb ConvertToTfFunctionDb() {
TfFunctionDb result;
for (const auto& record : activations_) {
TfFunction* fun = &(*result.mutable_tf_functions())[record.function_name];
fun->set_total_tracing_count(
std::max(static_cast<int64_t>(fun->total_tracing_count()),
record.tracing_count));
fun->set_compiler(CombineCompilers(fun->compiler(), record.compiler));
uint64 self_time_ps =
record.timespan.duration_ps() - record.children_duration_ps;
TfFunctionMetrics* metrics =
&(*fun->mutable_metrics())[record.execution_mode];
metrics->set_count(metrics->count() + 1);
metrics->set_self_time_ps(metrics->self_time_ps() + self_time_ps);
}
for (auto& name_fun : *result.mutable_tf_functions()) {
TfFunction& fun = name_fun.second;
fun.set_expensive_call_percent(ComputeExpensiveCallPercent(fun));
}
return result;
}
void CalculateChildrenDurations() {
std::stack<int64_t> call_stack;
for (const auto& pt : points_) {
if (pt.is_entry) {
call_stack.push(pt.index);
} else {
DCHECK(call_stack.top() == pt.index);
uint64 call_duration = activations_[pt.index].timespan.duration_ps();
call_stack.pop();
if (!call_stack.empty()) {
activations_[call_stack.top()].children_duration_ps += call_duration;
}
}
}
}
private:
std::vector<ActivationRecord> activations_;
std::vector<EntryOrExit> points_;
};
}
std::string DebugString(const TfFunctionDb& tf_function_db) {
std::string str;
protobuf::TextFormat::PrintToString(tf_function_db, &str);
return str;
}
void CombineTfFunctionDb(const TfFunctionDb& src, TfFunctionDb* dst) {
for (const auto& name_function : src.tf_functions()) {
const auto& name = name_function.first;
const auto& src_fun = name_function.second;
TfFunction* dst_fun = gtl::FindOrNull(*dst->mutable_tf_functions(), name);
if (dst_fun == nullptr) {
(*dst->mutable_tf_functions())[name] = src_fun;
} else {
CombineTfFunction(src_fun, dst_fun);
}
}
}
TfFunctionDb ConvertHostThreadsXLineToTfFunctionDb(const XLineVisitor& line) {
TfFunctionExecutions tf_function_executions = TfFunctionExecutions(line);
return tf_function_executions.ConvertToTfFunctionDb();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include <string>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_function.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
const absl::string_view kEager = "eager";
const absl::string_view kConcrete = "concrete";
const absl::string_view kTracedNonXla = "traced-nonXla";
const absl::string_view kTracedXla = "traced-xla";
const absl::string_view kNotTracedNonXla = "notTraced-nonXla";
const absl::string_view kNotTracedXla = "notTraced-xla";
constexpr double kMaxError = 0.001;
TfFunctionDb ConvertXSpaceToTfFunctionDb(const XSpace& space) {
TfFunctionDb result;
const XPlane* host_plane = FindPlaneWithName(space, kHostThreadsPlaneName);
if (host_plane) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_plane);
plane.ForEachLine([&result](const XLineVisitor& line) {
TfFunctionDb tf_function_db = ConvertHostThreadsXLineToTfFunctionDb(line);
CombineTfFunctionDb(tf_function_db, &result);
});
}
return result;
}
TEST(ConvertXPlaneToTfFunctions, CombineTwoThreads) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(2);
std::string kFunctionName = "decrement";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
150, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread, kFunctionName,
200, 80, kTracedNonXla, 3);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
20, 100, kTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
160, 20, kNotTracedNonXla, 2);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread, kFunctionName,
210, 80, kTracedXla, 4);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kFunctionName), 1);
const TfFunction& tf_function =
tf_function_db.tf_functions().at(kFunctionName);
EXPECT_EQ(tf_function.total_tracing_count(), 4);
EXPECT_EQ(tf_function.compiler(), MIXED_COMPILER);
EXPECT_NEAR(tf_function.expensive_call_percent(), 90, kMaxError);
const auto& metrics = tf_function.metrics();
EXPECT_EQ(metrics.size(), 2);
EXPECT_EQ(metrics.count(TRACED_MODE), 1);
EXPECT_EQ(metrics.count(NOT_TRACED_MODE), 1);
const auto& traced_mode = metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 4);
EXPECT_EQ(traced_mode.self_time_ps(), 360);
const auto& not_traced_mode = metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 2);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, NestedFunctions) {
XSpace space;
XPlaneBuilder host_plane_builder(space.add_planes());
host_plane_builder.SetName(kHostThreadsPlaneName);
host_plane_builder.ReserveLines(1);
std::string kOuterFunctionName = "outer";
std::string kInnerFunctionName = "inner";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kOuterFunctionName, 10, 100, kTracedNonXla, 1);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kInnerFunctionName, 30, 40, kNotTracedXla, 0);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kOuterFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kInnerFunctionName), 1);
const TfFunction& outer =
tf_function_db.tf_functions().at(kOuterFunctionName);
EXPECT_EQ(outer.total_tracing_count(), 1);
EXPECT_EQ(outer.compiler(), OTHER_COMPILER);
EXPECT_NEAR(outer.expensive_call_percent(), 100, kMaxError);
const auto& outer_metrics = outer.metrics();
EXPECT_EQ(outer_metrics.size(), 1);
EXPECT_EQ(outer_metrics.count(TRACED_MODE), 1);
const auto& traced_mode = outer_metrics.at(TRACED_MODE);
EXPECT_EQ(traced_mode.count(), 1);
EXPECT_EQ(traced_mode.self_time_ps(), 60);
const TfFunction& inner =
tf_function_db.tf_functions().at(kInnerFunctionName);
EXPECT_EQ(inner.total_tracing_count(), 0);
EXPECT_EQ(inner.compiler(), XLA_COMPILER);
EXPECT_NEAR(inner.expensive_call_percent(), 0, kMaxError);
const auto& inner_metrics = inner.metrics();
EXPECT_EQ(inner_metrics.size(), 1);
EXPECT_EQ(inner_metrics.count(NOT_TRACED_MODE), 1);
const auto& not_traced_mode = inner_metrics.at(NOT_TRACED_MODE);
EXPECT_EQ(not_traced_mode.count(), 1);
EXPECT_EQ(not_traced_mode.self_time_ps(), 40);
}
TEST(ConvertXPlaneToTfFunctions, EagerPlusConcrete) {
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
std::string kEagerFunctionName = "i_am_eager";
std::string kConcreteFunctionName = "i_am_concrete";
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateTfFunctionCallEvent(&host_plane_builder, &main_thread,
kEagerFunctionName, 10, 200, kEager);
auto other_thread = host_plane_builder.GetOrCreateLine(1);
CreateTfFunctionCallEvent(&host_plane_builder, &other_thread,
kConcreteFunctionName, 20, 40, kConcrete);
TfFunctionDb tf_function_db = ConvertXSpaceToTfFunctionDb(space);
EXPECT_EQ(tf_function_db.tf_functions().size(), 2);
EXPECT_EQ(tf_function_db.tf_functions().count(kEagerFunctionName), 1);
EXPECT_EQ(tf_function_db.tf_functions().count(kConcreteFunctionName), 1);
const TfFunction& eager =
tf_function_db.tf_functions().at(kEagerFunctionName);
EXPECT_EQ(eager.total_tracing_count(), 0);
EXPECT_EQ(eager.compiler(), INVALID_COMPILER);
EXPECT_NEAR(eager.expensive_call_percent(), 100, kMaxError);
const auto& eager_metrics = eager.metrics();
EXPECT_EQ(eager_metrics.size(), 1);
EXPECT_EQ(eager_metrics.count(EAGER_MODE), 1);
const auto& eager_mode = eager_metrics.at(EAGER_MODE);
EXPECT_EQ(eager_mode.count(), 1);
EXPECT_EQ(eager_mode.self_time_ps(), 200);
const TfFunction& concrete =
tf_function_db.tf_functions().at(kConcreteFunctionName);
EXPECT_EQ(concrete.total_tracing_count(), 0);
EXPECT_EQ(concrete.compiler(), INVALID_COMPILER);
EXPECT_NEAR(concrete.expensive_call_percent(), 0, kMaxError);
const auto& concrete_metrics = concrete.metrics();
EXPECT_EQ(concrete_metrics.size(), 1);
EXPECT_EQ(concrete_metrics.count(CONCRETE_MODE), 1);
const auto& concrete_mode = concrete_metrics.at(CONCRETE_MODE);
EXPECT_EQ(concrete_mode.count(), 1);
EXPECT_EQ(concrete_mode.self_time_ps(), 40);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_functions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_functions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
32138b08-241d-42a6-8f88-50a041a5c0d0 | cpp | tensorflow/tensorflow | xplane_to_tf_data_stats | tensorflow/core/profiler/convert/xplane_to_tf_data_stats.cc | tensorflow/core/profiler/convert/xplane_to_tf_data_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include <algorithm>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/utils/html_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
const int64_t kSlowCallThresholdPs = 50 * 1000000;
namespace {
bool IsRootIteratorEvent(const XEventVisitor& iterator_event) {
std::vector<absl::string_view> split_result =
absl::StrSplit(iterator_event.Name(), "::");
return split_result.size() == 2;
}
bool IsAsyncIterator(absl::string_view iterator_event_name) {
static auto* kAsyncIterators = new absl::flat_hash_set<absl::string_view>(
{"Prefetch", "ParallelInterleave", "ParallelMap", "ParseExample",
"MapAndBatch", "DataService", "LegacyParallelInterleave",
"ParallelBatch"});
return kAsyncIterators->contains(iterator_event_name);
}
void SetIteratorMetadata(int64_t id, const XEventVisitor& event,
IteratorMetadata* metadata) {
metadata->set_id(id);
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) {
metadata->set_parent_id(parent_id_stat->IntValue());
}
metadata->set_name(tsl::profiler::IteratorName(event.Name()));
metadata->set_long_name(event.Name().data(), event.Name().size());
metadata->set_is_async(IsAsyncIterator(metadata->name()));
}
std::optional<int64_t> FindDeviceInputPipeline(const XEventVisitor& event) {
if (event.Type() == HostEventType::kDeviceInputPipelineSecondIterator) {
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) return parent_id_stat->IntValue();
}
return std::nullopt;
}
void ProcessEventForest(
const tsl::profiler::EventForest& event_forest,
absl::flat_hash_set<int64_t>* device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
const tsl::profiler::EventNodeMap& event_node_map =
event_forest.GetEventNodeMap();
auto* iterator_event_list =
gtl::FindOrNull(event_node_map, HostEventType::kIterator);
if (!iterator_event_list) return;
for (const tsl::profiler::EventNode& iterator_event : *iterator_event_list) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
}
if (IsRootIteratorEvent(iterator_event_visitor)) {
(*root_iterator_event_map)[iterator_id].push_back(&iterator_event);
}
}
auto* device_input_pipeline_second_iterator_events = gtl::FindOrNull(
event_node_map, HostEventType::kDeviceInputPipelineSecondIterator);
if (!device_input_pipeline_second_iterator_events) return;
for (const tsl::profiler::EventNode& iterator_event :
*device_input_pipeline_second_iterator_events) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
std::optional<int64_t> device_input_pipeline_id =
FindDeviceInputPipeline(iterator_event_visitor);
if (device_input_pipeline_id.has_value()) {
device_input_pipeline_ids->insert(*device_input_pipeline_id);
}
}
}
}
void SetInputPipelineMetadata(int64_t id, int64_t name_id,
bool is_device_input_pipeline,
InputPipelineMetadata* metadata) {
constexpr absl::string_view kHostInputPipelinePrefix = "Host:";
constexpr absl::string_view kDeviceInputPipelinePrefix = "Device:";
metadata->set_id(id);
if (is_device_input_pipeline) {
metadata->set_type(InputPipelineMetadata::DEVICE);
metadata->set_name(absl::StrCat(kDeviceInputPipelinePrefix, name_id));
} else {
metadata->set_type(InputPipelineMetadata::HOST);
metadata->set_name(absl::StrCat(kHostInputPipelinePrefix, name_id));
}
}
void ProcessIteratorEvent(const tsl::profiler::EventNode& iterator_event,
InputPipelineStat* input_pipeline_stat,
bool is_blocking, int level = 0) {
if (level > 100) return;
const XEventVisitor& visitor = iterator_event.GetEventVisitor();
auto iterator_id_stat = visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) return;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = input_pipeline_stat->mutable_iterator_stats()->insert(
{iterator_id, IteratorStat()});
IteratorStat& iterator_stat = result.first->second;
if (result.second) {
iterator_stat.set_id(iterator_id);
iterator_stat.set_start_time_ps(visitor.TimestampPs());
}
iterator_stat.set_duration_ps(iterator_stat.duration_ps() +
visitor.DurationPs());
int64_t self_time_ps = visitor.DurationPs();
tsl::profiler::Timespan self_time_span = visitor.GetTimespan();
for (const tsl::profiler::EventNode* child : iterator_event.GetChildren()) {
const XEventVisitor& child_visitor = child->GetEventVisitor();
if (tsl::profiler::ParseTfOpFullname(child_visitor.Name()).category ==
tsl::profiler::Category::kTfData) {
int64_t overlap_duration_ps =
self_time_span.OverlappedDurationPs(child_visitor.GetTimespan());
ProcessIteratorEvent(*child, input_pipeline_stat,
is_blocking && overlap_duration_ps, level + 1);
self_time_ps -= overlap_duration_ps;
}
}
iterator_stat.set_self_time_ps(iterator_stat.self_time_ps() + self_time_ps);
iterator_stat.set_is_blocking(iterator_stat.is_blocking() || is_blocking);
iterator_stat.set_num_calls(iterator_stat.num_calls() + 1);
}
void SetBottleneckIteratorId(InputPipelineStat* input_pipeline_stat) {
int64_t bottleneck_iterator_id = 0;
int64_t max_self_time = 0;
for (const auto& pair : input_pipeline_stat->iterator_stats()) {
const auto& id = pair.first;
const auto& iterator_stat = pair.second;
if (iterator_stat.is_blocking() &&
iterator_stat.self_time_ps() > max_self_time) {
bottleneck_iterator_id = id;
max_self_time = iterator_stat.self_time_ps();
}
}
input_pipeline_stat->set_bottleneck_iterator_id(bottleneck_iterator_id);
input_pipeline_stat->set_bottleneck_iterator_latency_ps(max_self_time);
}
void ProcessInputPipelines(
const absl::flat_hash_set<int64_t>& device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
auto* input_pipelines = tf_data_stats->mutable_input_pipelines();
int64_t num_host_input_pipelines = 0;
int64_t num_device_input_pipelines = 0;
for (auto& id_and_events : *root_iterator_event_map) {
auto& root_iterator_id = id_and_events.first;
auto& root_iterator_events = id_and_events.second;
absl::c_sort(root_iterator_events, [](const tsl::profiler::EventNode* lhs,
const tsl::profiler::EventNode* rhs) {
return lhs->GetEventVisitor().DurationPs() >
rhs->GetEventVisitor().DurationPs();
});
auto result =
input_pipelines->insert({root_iterator_id, InputPipelineStats()});
InputPipelineStats& input_pipeline_stats = result.first->second;
InputPipelineMetadata* metadata = input_pipeline_stats.mutable_metadata();
if (result.second) {
bool is_device_input_pipeline =
device_input_pipeline_ids.contains(root_iterator_id);
int64_t name_id = is_device_input_pipeline ? num_device_input_pipelines++
: num_host_input_pipelines++;
SetInputPipelineMetadata(root_iterator_id, name_id,
is_device_input_pipeline, metadata);
}
int64_t sum_latency_ps = 0;
int64_t min_latency_ps = INT64_MAX;
int64_t max_latency_ps = 0;
int64_t num_slow_calls = 0;
for (const tsl::profiler::EventNode* root_iterator_event :
root_iterator_events) {
InputPipelineStat* stat = input_pipeline_stats.add_stats();
ProcessIteratorEvent(*root_iterator_event, stat,
true);
SetBottleneckIteratorId(stat);
int64_t latency_ps = root_iterator_event->GetEventVisitor().DurationPs();
sum_latency_ps += latency_ps;
min_latency_ps = std::min(min_latency_ps, latency_ps);
max_latency_ps = std::max(max_latency_ps, latency_ps);
if (latency_ps > kSlowCallThresholdPs) num_slow_calls++;
}
input_pipeline_stats.set_avg_latency_ps(sum_latency_ps /
root_iterator_events.size());
input_pipeline_stats.set_min_latency_ps(min_latency_ps);
input_pipeline_stats.set_max_latency_ps(max_latency_ps);
input_pipeline_stats.set_num_slow_calls(num_slow_calls);
}
}
void SetBottleneckAnalysis(CombinedTfDataStats* combined_tf_data_stats) {
struct InputPipeline {
InputPipeline(absl::string_view host_name,
absl::string_view input_pipeline_name, int64_t max_latency_ps,
absl::string_view iterator_name,
absl::string_view iterator_long_name,
int64_t iterator_latency_ps)
: host_name(host_name),
input_pipeline_name(input_pipeline_name),
max_latency_ps(max_latency_ps),
iterator_name(iterator_name),
iterator_long_name(iterator_long_name),
iterator_latency_ps(iterator_latency_ps) {}
absl::string_view host_name;
absl::string_view input_pipeline_name;
int64_t max_latency_ps;
absl::string_view iterator_name;
absl::string_view iterator_long_name;
int64_t iterator_latency_ps;
bool operator<(const InputPipeline& rhs) const {
return max_latency_ps > rhs.max_latency_ps;
}
};
std::vector<InputPipeline> slow_input_pipelines;
for (const auto& host_name_and_tf_data_stats :
combined_tf_data_stats->tf_data_stats()) {
absl::string_view host_name = host_name_and_tf_data_stats.first;
const TfDataStats& tf_data_stats = host_name_and_tf_data_stats.second;
for (const auto& id_and_stats : tf_data_stats.input_pipelines()) {
const InputPipelineStats& input_pipeline_stats = id_and_stats.second;
if (input_pipeline_stats.metadata().type() ==
InputPipelineMetadata::DEVICE) {
continue;
}
const InputPipelineStat& input_pipeline_stat =
input_pipeline_stats.stats(0);
const IteratorMetadata& metadata = tf_data_stats.iterator_metadata().at(
input_pipeline_stat.bottleneck_iterator_id());
slow_input_pipelines.emplace_back(
host_name, input_pipeline_stats.metadata().name(),
input_pipeline_stats.max_latency_ps(), metadata.name(),
metadata.long_name(),
input_pipeline_stat.bottleneck_iterator_latency_ps());
}
}
std::sort(slow_input_pipelines.begin(), slow_input_pipelines.end());
for (const auto& input_pipeline : slow_input_pipelines) {
TfDataBottleneckAnalysis* bottleneck_analysis =
combined_tf_data_stats->add_bottleneck_analysis();
bottleneck_analysis->set_host(input_pipeline.host_name.data(),
input_pipeline.host_name.size());
bottleneck_analysis->set_input_pipeline(
input_pipeline.input_pipeline_name.data(),
input_pipeline.input_pipeline_name.size());
bottleneck_analysis->set_max_latency_ps(input_pipeline.max_latency_ps);
bottleneck_analysis->set_iterator_name(input_pipeline.iterator_name.data(),
input_pipeline.iterator_name.size());
bottleneck_analysis->set_iterator_long_name(
input_pipeline.iterator_long_name.data(),
input_pipeline.iterator_long_name.size());
bottleneck_analysis->set_iterator_latency_ps(
input_pipeline.iterator_latency_ps);
}
}
std::string GetSuggestion(BottleneckType type) {
constexpr absl::string_view kPlaybookLink =
"https:
constexpr absl::string_view kPlaybookSourceDatasetLink =
"https:
"data_performance_analysis#source_datasets";
constexpr absl::string_view kPlaybookCpuUtilizationLink =
"https:
"data_performance_analysis#3_are_you_reaching_high_cpu_utilization";
constexpr absl::string_view kPlaybookTransformationLink =
"https:
"data_performance_analysis#transformation_datasets";
constexpr absl::string_view kTfGuideParallelDataExtractionLink =
"https:
"data_performance#parallelizing_data_extraction";
constexpr absl::string_view kTfGuideParallelTransformationLink =
"https:
"data_performance#parallelizing_data_transformation";
constexpr absl::string_view kTfGuideCacheLink =
"https:
constexpr absl::string_view kTfDataServiceLink =
"https:
"service?version=nightly";
switch (type) {
case BottleneckType::kSlowSource:
return absl::StrFormat(
"1. Check the locality of a host and input data. Ideally, they "
"should be in the same cell (or very close, like the same "
"region).<br/>"
"2. Parallelize reading from this dataset source. See %s and %s for "
"more details.<br/>",
AnchorElement(kPlaybookSourceDatasetLink, "here"),
AnchorElement(kTfGuideParallelDataExtractionLink, "here"));
case BottleneckType::kSlowDataService:
return absl::StrFormat(
"1. Fetching data from tf.data service took a while. Profile the "
"tf.data service worker to analyze the issue further.<br/>"
"2. See %s for more details on tf.data service.<br/>"
"3. See %s for other suggestions.",
AnchorElement(kTfDataServiceLink, "this"),
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowRemoteSource:
return absl::StrFormat(
"1. The remote data source is slow. Profile its host to analyze the "
"issue further.<br/>"
"2. See %s for other suggestions.",
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowTransformationWithParallelVersion:
return absl::StrFormat(
"1. Parallelize this transformation by setting "
"<code>num_parallel_calls=tf.data.experimental.AUTOTUNE</code>. See "
"%s for more details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kTfGuideParallelTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookTransformationLink, "here"));
case BottleneckType::kSlowTransformationWithoutParallelVersion:
return absl::StrFormat(
"1. This transformation is inherently sequential. Add outer "
"parallelism by running multiple copies of the input pipeline over "
"sharded inputs and combining the results. See %s for more "
"details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kPlaybookTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookCpuUtilizationLink, "here"));
default:
return absl::StrFormat("See %s for suggestions.",
AnchorElement(kPlaybookLink, "this"));
}
}
void SetSuggestion(CombinedTfDataStats* combined_tf_data_stats) {
for (TfDataBottleneckAnalysis& bottleneck_analysis :
*combined_tf_data_stats->mutable_bottleneck_analysis()) {
bottleneck_analysis.set_suggestion(
GetSuggestion(GetBottleneckType(bottleneck_analysis.iterator_name())));
}
}
void SetSummary(CombinedTfDataStats* combined_tf_data_stats) {
int64_t max_latency_ps = 0;
if (combined_tf_data_stats->bottleneck_analysis_size()) {
max_latency_ps =
combined_tf_data_stats->bottleneck_analysis().at(0).max_latency_ps();
}
if (max_latency_ps > kSlowCallThresholdPs) {
combined_tf_data_stats->set_is_input_bound(true);
combined_tf_data_stats->set_summary(
"Your profile has a tf.data input pipeline slower than 50 us. For each "
"slow input pipeline, below shows a bottleneck in the input pipeline "
"and a suggestion on how to fix it.");
} else if (max_latency_ps > 0) {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"Your profile does not have any tf.data input pipeline slower than 50 "
"us. Your job could be still input bound if this profile didn't "
"capture all workers.");
} else {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"No tf.data activity captured in your profile. If your job uses "
"tf.data, try to capture a longer profile.");
}
}
}
BottleneckType GetBottleneckType(absl::string_view bottleneck_iterator_name) {
static auto* kBottleneckTypeMap = new absl::flat_hash_map<absl::string_view,
BottleneckType>(
{
{"TFRecord", BottleneckType::kSlowSource},
{"SSTable", BottleneckType::kSlowSource},
{"RecordIO", BottleneckType::kSlowSource},
{"Spanner", BottleneckType::kSlowSource},
{"TFColumn", BottleneckType::kSlowSource},
{"SleepwalkRemoteDataset", BottleneckType::kSlowSource},
{"TextLine", BottleneckType::kSlowSource},
{"StitchedTimelineDataset", BottleneckType::kSlowSource},
{"DateKeyDataset", BottleneckType::kSlowSource},
{"CapacitorProto", BottleneckType::kSlowSource},
{"LMDB", BottleneckType::kSlowSource},
{"ExternalDataset", BottleneckType::kSlowSource},
{"PearModel", BottleneckType::kSlowSource},
{"FixedLengthRecordV2", BottleneckType::kSlowSource},
{"FromTensor", BottleneckType::kSlowSource},
{"TensorSlice", BottleneckType::kSlowSource},
{"Generator", BottleneckType::kSlowSource},
{"SyntheticDatasetOp", BottleneckType::kSlowSource},
{"DataService", BottleneckType::kSlowDataService},
{"GuzzlerDataGuzzlerRemoteDataset", BottleneckType::kSlowRemoteSource},
{"ReverbDataset", BottleneckType::kSlowRemoteSource},
{"DatasetSampleGame", BottleneckType::kSlowRemoteSource},
{"Courier", BottleneckType::kSlowRemoteSource},
{"ReverbEpisodeDataset", BottleneckType::kSlowRemoteSource},
{"Map", BottleneckType::kSlowTransformationWithParallelVersion},
{"Interleave", BottleneckType::kSlowTransformationWithParallelVersion},
{"Filter", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Batch", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Unbatch", BottleneckType::kSlowTransformationWithoutParallelVersion}});
if (auto type =
gtl::FindOrNull(*kBottleneckTypeMap, bottleneck_iterator_name)) {
return *type;
}
return BottleneckType::kOther;
}
void CombinedTfDataStatsBuilder::Add(absl::string_view host_name,
XPlane* host_plane) {
TfDataStats& tf_data_stats =
(*combined_tf_data_stats_
->mutable_tf_data_stats())[std::string(host_name)];
tsl::profiler::EventForest event_forest;
event_forest.AddPlanes(tsl::profiler::CreateTfXPlaneVisitor, {host_plane});
event_forest.ConnectEvents();
event_forest.ConnectTfDataEvents();
absl::flat_hash_set<int64_t> device_input_pipeline_ids;
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>
root_iterator_event_map;
ProcessEventForest(event_forest, &device_input_pipeline_ids,
&root_iterator_event_map, &tf_data_stats);
ProcessInputPipelines(device_input_pipeline_ids, &root_iterator_event_map,
&tf_data_stats);
}
void CombinedTfDataStatsBuilder::Finalize() {
SetBottleneckAnalysis(combined_tf_data_stats_);
if (generate_suggestion_) SetSuggestion(combined_tf_data_stats_);
SetSummary(combined_tf_data_stats_);
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::EqualsProto;
TEST(XPlaneToTfDataStatsTest, HostInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kFirstElementId = 100;
constexpr int64_t kSecondElementId = 200;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
100000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 80000000, 20000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
200000000, 20000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 210000000, 10000000,
{{StatType::kElementId, kSecondElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 0, 80000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 0, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kSecondElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::Prefetch::Range"
iterator_latency_ps: 80000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::Prefetch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 60000000
min_latency_ps: 20000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 20000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 200000000
duration_ps: 20000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: false
num_calls: 1
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, DeviceInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
30000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
100000000, 100000000,
{{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 180000000, 20000000,
{{StatType::kElementId, kElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Generator", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Generator"
long_name: "Iterator::Prefetch::Generator"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: DEVICE name: "Device:0" }
avg_latency_ps: 65000000
min_latency_ps: 30000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 100000000
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 30000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 30000000
self_time_ps: 30000000
is_blocking: true
num_calls: 1
}
}
}
}
}
}
}
summary: "No tf.data activity captured in your profile. If your job uses tf.data, try to capture a longer profile."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, MapAndBatch) {
constexpr int64_t kMapAndBatchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
XLineBuilder consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::MapAndBatch",
0, 100000000, {{StatType::kStepId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kMapAndBatchConsume, 80000000, 20000000,
{{StatType::kElementId, kElementId}});
XLineBuilder producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 0, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 0, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 40000000, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 40000000, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::MapAndBatch::Range"
iterator_latency_ps: 60000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "MapAndBatch"
long_name: "Iterator::MapAndBatch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::MapAndBatch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 100000000
min_latency_ps: 100000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 60000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 40000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 60000000
self_time_ps: 60000000
is_blocking: true
num_calls: 2
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_data_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_data_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
00b7dd90-cd98-4777-86c0-fa8ab725430b | cpp | tensorflow/tensorflow | xplane_to_step_events | tensorflow/core/profiler/convert/xplane_to_step_events.cc | tensorflow/core/profiler/convert/xplane_to_step_events_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
inline bool IsExplicitHostStepMarker(absl::string_view event_name) {
return (absl::StartsWith(event_name, "train") ||
absl::StartsWith(event_name, "test") ||
absl::StartsWith(event_name, "TraceContext")) &&
!absl::StrContains(event_name, "/");
}
inline bool IsRealCpuCompute(absl::string_view event_name) {
bool not_real = absl::StartsWith(event_name, "EagerExecute") ||
absl::StartsWith(event_name, "EagerLocalExecute") ||
absl::StartsWith(event_name, "EagerKernelExecute") ||
absl::StartsWith(event_name, "FunctionRun") ||
IsExplicitHostStepMarker(event_name);
return !not_real;
}
uint64 ParseNumBytesFromMemcpyDetail(absl::string_view memcpy_detail) {
const std::vector<absl::string_view> params =
absl::StrSplit(memcpy_detail, absl::ByAnyChar(":\n"));
for (uint32 ii = 0; ii < params.size(); ii += 2) {
if (params[ii] != "num_bytes") continue;
uint64 value = 0;
if (absl::SimpleAtoi(params[ii + 1], &value)) return value;
break;
}
return 0ULL;
}
EventType ClassifyGpuCompute(absl::string_view event_name,
absl::string_view tensor_shapes) {
if (tensor_shapes.empty()) {
return (absl::StrContains(event_name, "half") ||
absl::StrContains(event_name, "fp16"))
? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
} else {
return (absl::StrContains(tensor_shapes, "half")) ? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
}
}
EventType ClassifyGpuEvent(absl::string_view event_name,
absl::string_view tensor_shapes) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyDToHOp(tf_op)) {
return DEVICE_TO_HOST;
} else if (tsl::profiler::IsMemcpyDToDOp(tf_op)) {
return DEVICE_TO_DEVICE;
} else if (absl::StartsWithIgnoreCase(event_name, "nccl")) {
return DEVICE_COLLECTIVES;
} else {
return ClassifyGpuCompute(event_name, tensor_shapes);
}
}
EventType ClassifyCpuEvent(absl::string_view event_name, bool has_device,
bool has_correlation_id) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsInfeedEnqueueOp(tf_op) ||
tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyHToHOp(tf_op)) {
return HOST_TO_HOST;
} else if (has_device && (has_correlation_id ||
absl::StartsWithIgnoreCase(
event_name, "ExecutorState::Process"))) {
return HOST_PREPARE;
} else if (absl::StartsWithIgnoreCase(event_name, "IteratorGetNext")) {
return HOST_WAIT_INPUT;
} else {
return HOST_COMPUTE;
}
}
}
StepEvents ConvertHostThreadsXLineToStepEvents(
const XLineVisitor& line, const StepEvents* device_step_events) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view step_name;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kStepName:
step_name = stat.StrOrRefValue();
break;
}
});
if (group_id < 0) return;
bool has_device = (device_step_events != nullptr);
if (has_device && !device_step_events->contains(group_id)) return;
if (IsExplicitHostStepMarker(event.Name())) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kExplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (!step_name.empty()) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kImplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (IsRealCpuCompute(event.Name())) {
result[group_id].AddEvent(EventTypeSpan(
ClassifyCpuEvent(event.Name(), has_device, correlation_id >= 0),
event.GetTimespan()));
}
if (!step_name.empty()) {
result[group_id].SetStepName(std::string(step_name));
}
});
return result;
}
StepEvents ConvertHostThreadsXPlaneToStepEvents(
const XPlane& host_trace, const StepEvents* device_step_events) {
StepEvents host_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
StepEvents thread_step_events =
ConvertHostThreadsXLineToStepEvents(line, device_step_events);
UnionCombineStepEvents(thread_step_events, &host_step_events);
});
return host_step_events;
}
StepEvents ConvertDeviceStepInfoToStepMarkers(const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
if (std::optional<XStatVisitor> stat = event.GetStat(StatType::kGroupId)) {
result[stat->IntValue()].AddMarker(
StepMarker(StepMarkerType::kDeviceStepMarker, event.Name(),
event.GetTimespan()));
}
});
return result;
}
StepEvents ConvertDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view tensor_shapes;
absl::string_view memcpy_details;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kTensorShapes:
tensor_shapes = stat.StrOrRefValue();
break;
case StatType::kMemcpyDetails:
memcpy_details = stat.StrOrRefValue();
break;
}
});
if (correlation_id >= 0 && group_id >= 0) {
EventType event_type = ClassifyGpuEvent(event.Name(), tensor_shapes);
EventTypeSpan event_type_span(event_type, event.GetTimespan());
result[group_id].AddEvent(event_type_span);
switch (event_type) {
case DEVICE_COLLECTIVES: {
AllReduceInfo collective_ops;
collective_ops.set_start_time_ps(event.TimestampPs());
collective_ops.set_end_time_ps(event.EndOffsetPs());
result[group_id].AddCollectiveOpEvent(device_id, collective_ops);
break;
}
case HOST_TO_DEVICE:
case DEVICE_TO_DEVICE:
case DEVICE_TO_HOST: {
uint64 bytes_transferred =
ParseNumBytesFromMemcpyDetail(memcpy_details);
result[group_id].AddDeviceMemoryTransferEvent(
event_type, event.GetTimespan(), bytes_transferred);
break;
}
default:
return;
}
}
});
return result;
}
StepEvents ConvertTpuDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
absl::flat_hash_map<int64_t , XEventsOpMetricsDbBuilder>
op_metrics_builder;
line.ForEachEvent([&](const XEventVisitor& event) {
auto group_id = event.GetStat(StatType::kGroupId);
if (!group_id.has_value()) return;
op_metrics_builder[group_id->IntOrUintValue()].AddOpMetric(event);
});
for (auto& [group_id, builder] : op_metrics_builder) {
result[group_id].SetPerCoreOpMetricsDb(builder.Finalize(), device_id);
}
return result;
}
StepEvents ConvertDeviceTraceXPlaneToStepEvents(const XPlane& device_trace) {
StepEvents device_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
std::optional<int> tpu_core_id = tsl::profiler::GetTensorCoreId(plane.Name());
std::optional<int> sc_core_id = tsl::profiler::GetSparseCoreId(plane.Name());
plane.ForEachLine([&](const XLineVisitor& line) {
int64_t line_id = line.Id();
if (line_id == kThreadIdStepInfo ||
(tpu_core_id.has_value() &&
line.Name() == tsl::profiler::kStepLineName) ||
(sc_core_id.has_value() &&
line.Name() == tsl::profiler::kSparseCoreStepLineName)) {
StepEvents step_marker_events = ConvertDeviceStepInfoToStepMarkers(line);
UnionCombineStepEvents(step_marker_events, &device_step_events);
} else if (IsDerivedThreadId(line_id)) {
return;
} else {
StepEvents stream_step_events;
if (tpu_core_id.has_value()) {
stream_step_events =
ConvertTpuDeviceTraceXLineToStepEvents(*tpu_core_id, line);
IntersectCombineStepEvents(stream_step_events, &device_step_events);
} else if (sc_core_id.has_value()) {
stream_step_events = ConvertTpuDeviceTraceXLineToStepEvents(
kSparseCoreIndexStart + *sc_core_id, line);
IntersectCombineStepEvents(stream_step_events, &device_step_events);
} else {
stream_step_events =
ConvertDeviceTraceXLineToStepEvents(plane.Id(), line);
UnionCombineStepEvents(stream_step_events, &device_step_events);
}
}
});
return device_step_events;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kFirstStepNum = 123;
constexpr int64_t kSecondStepNum = 456;
constexpr int64_t kFirstStepId = 0;
constexpr int64_t kSecondStepId = 1;
constexpr int64_t kFirstCorrelationId = 100;
constexpr int64_t kSecondCorrelationId = 200;
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kFirstStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kFirstStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
300, 100, {{StatType::kStepNum, kSecondStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
310, 90,
{{StatType::kStepId, kSecondStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kSecondStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kFirstStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kFirstCorrelationId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 320, 20,
{{StatType::kStepId, kSecondStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kSecondStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 330, 10,
{{StatType::kCorrelationId, kSecondCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kFirstCorrelationId}});
tsl::profiler::GroupTfEvents(&space);
StepEvents device_step_events =
ConvertDeviceTraceXPlaneToStepEvents(*device_plane);
EXPECT_EQ(device_step_events.size(), 1);
EXPECT_EQ(device_step_events[0].Events().size(), 1);
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, &device_step_events);
EXPECT_EQ(host_step_events.size(), 1);
EXPECT_EQ(host_step_events[0].Markers().size(), 1);
EXPECT_EQ(host_step_events[0].Events().size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_step_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_step_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10c3c9a1-2e41-4864-a9be-cd9d7a285c38 | cpp | tensorflow/tensorflow | xplane_to_tool_names | tensorflow/core/profiler/convert/xplane_to_tool_names.cc | tensorflow/core/profiler/convert/xplane_to_tool_names_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include "tensorflow/core/profiler/convert/xplane_to_hlo.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
namespace tensorflow {
namespace profiler {
absl::StatusOr<std::string> GetAvailableToolNames(
const SessionSnapshot& session_snapshot) {
std::vector<std::string> tools;
bool is_cloud_vertex_ai = !session_snapshot.HasAccessibleRunDir();
if (session_snapshot.XSpaceSize() != 0) {
tools.reserve(11);
tools.push_back(is_cloud_vertex_ai ? "trace_viewer" : "trace_viewer@");
tools.push_back("overview_page");
tools.push_back("input_pipeline_analyzer");
tools.push_back("framework_op_stats");
tools.push_back("memory_profile");
tools.push_back("pod_viewer");
tools.push_back("tf_data_bottleneck_analysis");
tools.push_back("op_profile");
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(0));
if (!FindPlanesWithPrefix(*xspace, kGpuPlanePrefix).empty()) {
tools.push_back("kernel_stats");
}
TF_ASSIGN_OR_RETURN(bool has_hlo,
ConvertMultiXSpaceToHloProto(session_snapshot));
if (has_hlo) {
tools.push_back("memory_viewer");
tools.push_back("graph_viewer");
}
TF_ASSIGN_OR_RETURN(bool has_dcn_collective_stats,
HasDcnCollectiveStatsInMultiXSpace(session_snapshot));
if (has_dcn_collective_stats) {
tools.push_back("dcn_collective_stats");
}
}
return absl::StrJoin(tools, ",");
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
struct XPlaneToToolsTestCase {
std::string test_name;
std::string_view plane_name;
bool has_hlo_module;
bool has_dcn_collective_stats;
std::vector<std::string> expected_tools;
};
SessionSnapshot CreateSessionSnapshot(std::unique_ptr<XSpace> xspace,
bool has_hlo_module,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "hostname.xplane.pb"),
&xplane_file)
.IgnoreError();
std::vector<std::string> paths = {path};
if (has_hlo_module) {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "module_name.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "NO_MODULE.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
}
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"), &xplane_file)
.IgnoreError();
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot.status());
return std::move(session_snapshot.value());
}
using XPlaneToToolsTest = ::testing::TestWithParam<XPlaneToToolsTestCase>;
TEST_P(XPlaneToToolsTest, ToolsList) {
const XPlaneToToolsTestCase& test_case = GetParam();
auto xspace = std::make_unique<XSpace>();
FindOrAddMutablePlaneWithName(xspace.get(), test_case.plane_name);
SessionSnapshot sessionSnapshot =
CreateSessionSnapshot(std::move(xspace), test_case.has_hlo_module,
test_case.has_dcn_collective_stats);
absl::StatusOr<std::string> toolsString =
GetAvailableToolNames(sessionSnapshot);
ASSERT_TRUE(toolsString.ok());
std::vector<std::string> tools = absl::StrSplit(toolsString.value(), ',');
std::vector<std::string> expected_tools = {"trace_viewer",
"overview_page",
"input_pipeline_analyzer",
"framework_op_stats",
"memory_profile",
"pod_viewer",
"tf_data_bottleneck_analysis",
"op_profile"};
expected_tools.insert(expected_tools.end(), test_case.expected_tools.begin(),
test_case.expected_tools.end());
EXPECT_THAT(tools, ::testing::UnorderedElementsAreArray(expected_tools));
}
INSTANTIATE_TEST_SUITE_P(
XPlaneToToolsTests, XPlaneToToolsTest,
::testing::ValuesIn<XPlaneToToolsTestCase>({
{"ToolsForTpuWithoutHloModule", kTpuPlanePrefix, false, false, {}},
{"ToolsForTpuWithHloModule",
kTpuPlanePrefix,
true,
false,
{"graph_viewer", "memory_viewer"}},
{"ToolsForGpuWithoutHloModule",
kGpuPlanePrefix,
false,
false,
{"kernel_stats"}},
{"ToolsForGpuWithHloModule",
kGpuPlanePrefix,
true,
false,
{"kernel_stats", "graph_viewer", "memory_viewer"}},
{"ToolsForTpuWithDcnCollectiveStats",
kTpuPlanePrefix,
false,
true,
{"dcn_collective_stats"}},
}),
[](const ::testing::TestParamInfo<XPlaneToToolsTest::ParamType>& info) {
return info.param.test_name;
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tool_names.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tool_names_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c60900b9-5117-4a35-921c-e7e73ded1358 | cpp | tensorflow/tensorflow | xplane_to_dcn_collective_stats | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.cc | tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/dcn_slack_analysis_combiner.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xspace_to_dcn_slack_analysis.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
bool HasDcnCollectiveStatsInXSpace(const XSpace& xspace) {
if (const tensorflow::profiler::XPlane* xplane = FindPlaneWithName(
xspace, tensorflow::profiler::kHostThreadsPlaneName);
xplane != nullptr) {
for (const auto& [_, metadata] : xplane->event_metadata()) {
if (absl::StartsWith(metadata.name(), "MegaScale:")) {
return true;
}
}
}
return false;
}
absl::StatusOr<bool> GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(
const SessionSnapshot& session_snapshot) {
DcnSlackAnalysisCombiner combiner;
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (!HasDcnCollectiveStatsInXSpace(*xspace)) {
DcnSlackAnalysis dcnSlackAnalysis;
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier, dcnSlackAnalysis));
return false;
}
DcnSlackAnalysis dcnSlackAnalysis =
ConvertXSpaceToDcnSlackAnalysis(*xspace, nullptr, nullptr);
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, dcnSlackAnalysis));
combiner.Combine(dcnSlackAnalysis);
}
DcnSlackAnalysis dcnSlackAnalysis = combiner.Finalize();
TF_RETURN_IF_ERROR(WriteBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier, dcnSlackAnalysis));
return true;
}
}
absl::StatusOr<bool> HasDcnCollectiveStatsInMultiXSpace(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
for (int idx = 0; idx < session_snapshot.XSpaceSize(); idx++) {
std::string hostname = session_snapshot.GetHostname(idx);
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(idx));
if (HasDcnCollectiveStatsInXSpace(*xspace)) {
return true;
}
}
return false;
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<bool> ConvertMultiXSpaceToDcnCollectiveStats(
const SessionSnapshot& session_snapshot) {
std::pair<bool, std::string> hasCacheFile;
TF_ASSIGN_OR_RETURN(hasCacheFile, session_snapshot.HasCacheFile(
StoredDataType::DCN_COLLECTIVE_STATS));
if (!hasCacheFile.first) {
return GetDcnCollectiveStatsFromMultiXSpaceAndSaveToFile(session_snapshot);
}
if (hasCacheFile.second.empty()) {
return false;
} else {
return true;
}
}
absl::StatusOr<DcnSlackAnalysis> GetDcnSlackAnalysisByHostName(
const SessionSnapshot& session_snapshot, const std::string hostname) {
TF_ASSIGN_OR_RETURN(bool hasDcnCollectiveStats,
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot));
DcnSlackAnalysis dcnSlackAnalysis;
if (hasDcnCollectiveStats) {
TF_RETURN_IF_ERROR(ReadBinaryProto(session_snapshot,
StoredDataType::DCN_COLLECTIVE_STATS,
hostname, &dcnSlackAnalysis));
}
return dcnSlackAnalysis;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/protobuf/dcn_slack_analysis.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
DcnSlackAnalysis CreateDcnSlackAnalysisProto() {
DcnSlackAnalysis dcn_slack_analysis;
DcnSlackSummary* dcn_slack_summary =
dcn_slack_analysis.add_dcn_slack_summary();
dcn_slack_summary->set_rendezvous("collective");
dcn_slack_summary->set_recv_op_name("recv-done");
dcn_slack_summary->set_send_op_name("send");
dcn_slack_summary->set_slack_us(2);
dcn_slack_summary->set_observed_duration_us(12);
dcn_slack_summary->set_stall_duration_us(5);
dcn_slack_summary->set_occurrences(4);
dcn_slack_summary->set_bytes_transmitted_over_network(819200);
return dcn_slack_analysis;
}
SessionSnapshot CreateSessionSnapshot(bool create_cache_file,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
std::vector<std::string> paths = {absl::StrCat(path, "hostname.xplane.pb")};
auto xspace = std::make_unique<XSpace>();
XPlane* xplane = FindOrAddMutablePlaneWithName(xspace.get(), "/host:CPU");
if (has_dcn_collective_stats) {
XPlaneBuilder xplane_builder(xplane);
xplane_builder.GetOrCreateEventMetadata("MegaScale:");
}
if (create_cache_file) {
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
}
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot_status =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot_status.status());
SessionSnapshot session_snapshot = std::move(session_snapshot_status.value());
if (has_dcn_collective_stats) {
DcnSlackAnalysis dcn_slack_analysis = CreateDcnSlackAnalysisProto();
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, "hostname", dcn_slack_analysis));
TF_CHECK_OK(session_snapshot.WriteBinaryProto(
DCN_COLLECTIVE_STATS, kAllHostsIdentifier, dcn_slack_analysis));
}
return session_snapshot;
}
TEST(ConvertXplaneToDcnCollectiveStats,
HasAllHostsDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats, HasNoHostDcnCollectiveStatsCacheFile) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileButTraceHasDcnCollectiveStats) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), true);
}
TEST(ConvertXplaneToDcnCollectiveStats,
NoCacheFileNoDcnCollectiveStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
HasDcnCollectiveStatsInMultiXSpace(session_snapshot);
EXPECT_EQ(status.value(), false);
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, true);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> all_hosts_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kAllHostsIdentifier);
absl::StatusOr<std::optional<std::string>> host_filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
"hostname");
EXPECT_EQ(status.value(), true);
TF_EXPECT_OK(all_hosts_filepath.status());
EXPECT_TRUE(all_hosts_filepath.value().has_value());
EXPECT_FALSE(all_hosts_filepath.value().value().empty());
TF_EXPECT_OK(host_filepath.status());
EXPECT_TRUE(host_filepath.value().has_value());
EXPECT_FALSE(host_filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
ConvertXSpaceToDcnCollectiveStatsWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<bool> status =
ConvertMultiXSpaceToDcnCollectiveStats(session_snapshot);
absl::StatusOr<std::optional<std::string>> filepath =
session_snapshot.GetHostDataFilePath(StoredDataType::DCN_COLLECTIVE_STATS,
kNoHostIdentifier);
EXPECT_EQ(status.value(), false);
TF_EXPECT_OK(filepath.status());
EXPECT_TRUE(filepath.value().has_value());
EXPECT_FALSE(filepath.value().value().empty());
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsNotPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(false, false);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 0);
}
TEST(ConvertXplaneToDcnCollectiveStats,
GetHostDcnSlackAnalysisWhenStatsPresent) {
SessionSnapshot session_snapshot = CreateSessionSnapshot(true, true);
absl::StatusOr<DcnSlackAnalysis> host_dcn_slack_analysis =
GetDcnSlackAnalysisByHostName(session_snapshot, "hostname");
TF_EXPECT_OK(host_dcn_slack_analysis.status());
EXPECT_EQ(host_dcn_slack_analysis.value().dcn_slack_summary_size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
429e12ca-dd05-4f23-8bf5-04fa9872c11b | cpp | tensorflow/tensorflow | op_stats_combiner | tensorflow/core/profiler/convert/op_stats_combiner.cc | tensorflow/core/profiler/convert/op_stats_combiner_test.cc | #include "tensorflow/core/profiler/convert/op_stats_combiner.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/xplane_to_tf_functions.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/power_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/topology.pb.h"
#include "tensorflow/core/profiler/utils/hardware_type_utils.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/step_intersection.h"
namespace tensorflow {
namespace profiler {
namespace {
void CombinePerCoreStepInfo(
int src_host_id, const PerCoreStepInfo& src, bool use_incomplete_step,
PerCoreStepInfo* dst,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
OpMetricsDbCombiner* hlo_metrics_db_per_step_combiner) {
CombineCoreIdMap(src_host_id, src.step_info_per_core(),
dst->mutable_step_info_per_core());
uint32 new_step_num = dst->step_num();
for (auto& percore_stepinfo : *dst->mutable_step_info_per_core()) {
auto& stepinfo = percore_stepinfo.second;
stepinfo.set_step_num(new_step_num);
}
if (!use_incomplete_step) {
hlo_metrics_db_complete_steps_only_combiner->Combine(src.hlo_metrics_db());
}
hlo_metrics_db_per_step_combiner->Combine(src.hlo_metrics_db());
CombineCoreIdMap(src_host_id, src.all_reduce_db_per_core(),
dst->mutable_all_reduce_db_per_core());
CombineCoreIdMap(src_host_id, src.core_id_to_replica_id_map(),
dst->mutable_core_id_to_replica_id_map());
}
void CombineStepDatabase(
int src_host_id, const StepIntersection& step_intersection,
const StepDatabaseResult& src, StepDatabaseResult* dst,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
std::vector<OpMetricsDbCombiner>* hlo_metrics_db_per_step_combiners) {
if (src.use_incomplete_step()) dst->set_use_incomplete_step(true);
uint32 src_first_step_idx = step_intersection.FirstStepIndex(src_host_id);
for (uint32 i = 0; i < step_intersection.NumSteps(); i++) {
CombinePerCoreStepInfo(
src_host_id, src.step_sequence(src_first_step_idx + i),
src.use_incomplete_step(), dst->mutable_step_sequence(i),
hlo_metrics_db_complete_steps_only_combiner,
&(*hlo_metrics_db_per_step_combiners)[i]);
}
}
void CombinePowerMetrics(const RunEnvironment& src, RunEnvironment* dst) {
const size_t src_hosts = src.hostnames_size();
const size_t dst_hosts = dst->hostnames_size();
const double src_weight = src_hosts * 1.0 / (src_hosts + dst_hosts);
const double dst_weight = dst_hosts * 1.0 / (src_hosts + dst_hosts);
for (const auto& src_metric : src.power_metrics().power_component_metrics()) {
for (auto& dst_metric :
*dst->mutable_power_metrics()->mutable_power_component_metrics()) {
if (src_metric.component_name() != dst_metric.component_name()) continue;
dst_metric.set_max_power(
std::max(src_metric.max_power(), dst_metric.max_power()));
dst_metric.set_avg_power(src_metric.avg_power() * src_weight +
dst_metric.avg_power() * dst_weight);
}
}
}
void CombineRunEnvironment(const RunEnvironment& src, RunEnvironment* dst) {
dst->mutable_hostnames()->insert(src.hostnames().begin(),
src.hostnames().end());
dst->set_host_count(dst->hostnames_size());
if (src.device_type() != "CPU" && src.device_type() != "Device") {
dst->set_device_type(src.device_type());
dst->set_device_core_count(src.device_core_count() +
dst->device_core_count());
dst->set_replica_count(std::max(src.replica_count(), dst->replica_count()));
dst->set_num_cores_per_replica(
std::max(src.num_cores_per_replica(), dst->num_cores_per_replica()));
*dst->mutable_system_topology() = src.system_topology();
} else if (dst->device_type().empty()) {
dst->set_device_type(src.device_type());
}
dst->set_task_count(src.task_count() + dst->task_count());
if (src.host_independent_job_info().profile_duration_ms() > 0) {
(*dst->mutable_host_independent_job_info()) =
src.host_independent_job_info();
}
for (const auto& job_info : src.host_dependent_job_info()) {
*(dst->add_host_dependent_job_info()) = job_info;
}
dst->set_host_trace_level(src.host_trace_level());
dst->set_is_training(src.is_training());
CombinePowerMetrics(src, dst);
}
void CombinePerfEnv(const PerfEnv& src, PerfEnv* dst) {
if (src.peak_tera_flops_per_second() > 0) {
dst->set_peak_tera_flops_per_second(src.peak_tera_flops_per_second());
}
if (src.peak_bws_giga_bytes_per_second_size() > 0 &&
dst->peak_bws_giga_bytes_per_second_size() == 0) {
*dst->mutable_peak_bws_giga_bytes_per_second() =
src.peak_bws_giga_bytes_per_second();
}
if (src.ridge_point() > 0) {
dst->set_ridge_point(src.ridge_point());
}
}
void CombineDiagnostics(const Diagnostics& src, Diagnostics* dst) {
dst->mutable_info()->MergeFrom(src.info());
dst->mutable_warnings()->MergeFrom(src.warnings());
dst->mutable_errors()->MergeFrom(src.errors());
}
void CombineOpStats(
bool no_accelerator_in_system, int src_host_id, HardwareType hardware_type,
const StepIntersection& step_intersection, const OpStats& src, OpStats* dst,
OpMetricsDbCombiner* host_op_metrics_db_combiner,
OpMetricsDbCombiner* device_op_metrics_db_combiner,
OpMetricsDbCombiner* hlo_metrics_db_complete_steps_only_combiner,
std::vector<OpMetricsDbCombiner>* hlo_metrics_db_per_step_combiners) {
host_op_metrics_db_combiner->Combine(src.host_op_metrics_db(),
false);
device_op_metrics_db_combiner->Combine(src.device_op_metrics_db());
if (!IsCoordinator(no_accelerator_in_system, hardware_type)) {
CombineStepDatabase(src_host_id, step_intersection, src.step_db(),
dst->mutable_step_db(),
hlo_metrics_db_complete_steps_only_combiner,
hlo_metrics_db_per_step_combiners);
}
CombineRunEnvironment(src.run_environment(), dst->mutable_run_environment());
CombinePerfEnv(src.perf_env(), dst->mutable_perf_env());
CombineDiagnostics(src.diagnostics(), dst->mutable_diagnostics());
dst->mutable_kernel_stats_db()->mutable_reports()->MergeFrom(
src.kernel_stats_db().reports());
CombineTfFunctionDb(src.tf_function_db(), dst->mutable_tf_function_db());
CombineCoreIdMap(src_host_id, src.core_id_to_details(),
dst->mutable_core_id_to_details());
dst->mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(
dst->performance_counter_result().matrix_unit_utilization_percent() +
src.performance_counter_result().matrix_unit_utilization_percent());
}
}
bool IsCoordinator(bool no_accelerator_in_system, HardwareType hardware_type) {
return !HasDevice(hardware_type) && !no_accelerator_in_system;
}
bool NoAcceleratorInSystem(const std::vector<OpStatsInfo>& all_op_stats_info) {
for (const auto& op_stats_info : all_op_stats_info) {
if (HasDevice(op_stats_info.hardware_type)) {
return false;
}
}
return true;
}
uint32 GlobalCoreId(int host_id, uint32 device_ordinal) {
constexpr uint32 kMaxDevicesPerHost = 1000;
return host_id * kMaxDevicesPerHost + device_ordinal;
}
StepIntersection ComputeStepIntersectionToMergeOpStats(
const std::vector<OpStatsInfo>& all_op_stats_info,
uint32 max_step_per_host) {
bool no_accelerator_in_system = NoAcceleratorInSystem(all_op_stats_info);
absl::flat_hash_map<uint32, const StepDatabaseResult*> per_host_step_db;
for (const auto& op_stats_info : all_op_stats_info) {
if (IsCoordinator(no_accelerator_in_system, op_stats_info.hardware_type))
continue;
per_host_step_db[op_stats_info.src_host_id] =
&op_stats_info.op_stats->step_db();
}
return StepIntersection(max_step_per_host, per_host_step_db);
}
void CombineAllOpStats(const std::vector<OpStatsInfo>& all_op_stats_info,
const StepIntersection& step_intersection,
OpStats* combined_op_stats) {
if (all_op_stats_info.size() == 1) {
*combined_op_stats = *all_op_stats_info[0].op_stats;
return;
}
StepDatabaseResult* combined_step_db = combined_op_stats->mutable_step_db();
for (uint32 dst_step_num : step_intersection.DstStepNumbers()) {
combined_step_db->add_step_sequence()->set_step_num(dst_step_num);
}
combined_step_db->set_num_steps_dropped(step_intersection.StepsDropped());
combined_step_db->set_empty_intersect(step_intersection.EmptyIntersect());
OpMetricsDbCombiner host_op_metrics_db_combiner(
combined_op_stats->mutable_host_op_metrics_db());
OpMetricsDbCombiner device_op_metrics_db_combiner(
combined_op_stats->mutable_device_op_metrics_db());
OpMetricsDbCombiner hlo_metrics_db_complete_steps_only_combiner(
combined_op_stats->mutable_hlo_metrics_db_complete_steps_only());
std::vector<OpMetricsDbCombiner> hlo_metrics_db_per_step_combiners;
hlo_metrics_db_per_step_combiners.reserve(
combined_step_db->step_sequence_size());
for (PerCoreStepInfo& step_info :
*combined_step_db->mutable_step_sequence()) {
hlo_metrics_db_per_step_combiners.emplace_back(
step_info.mutable_hlo_metrics_db());
}
bool no_accelerator_in_system = NoAcceleratorInSystem(all_op_stats_info);
for (const auto& op_stats_info : all_op_stats_info) {
CombineOpStats(no_accelerator_in_system, op_stats_info.src_host_id,
op_stats_info.hardware_type, step_intersection,
*op_stats_info.op_stats, combined_op_stats,
&host_op_metrics_db_combiner, &device_op_metrics_db_combiner,
&hlo_metrics_db_complete_steps_only_combiner,
&hlo_metrics_db_per_step_combiners);
}
SortAndKeepTopKDurationKernelReportsInDb(
combined_op_stats->mutable_kernel_stats_db());
combined_op_stats->mutable_performance_counter_result()
->set_matrix_unit_utilization_percent(
combined_op_stats->performance_counter_result()
.matrix_unit_utilization_percent() /
all_op_stats_info.size());
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_combiner.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/hardware_types.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/step_intersection.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(CombineAllOpStatsTest, CombineRunEnvironment) {
OpStats dst_op_stats, op_stats_1, op_stats_2;
op_stats_1.mutable_run_environment()
->mutable_host_independent_job_info()
->set_profile_duration_ms(100);
op_stats_2.mutable_run_environment()
->mutable_host_independent_job_info()
->set_profile_duration_ms(0);
OpStatsInfo op_stats_info_1(&op_stats_1, TPU, 0),
op_stats_info_2(&op_stats_2, TPU, 0);
std::vector<OpStatsInfo> all_op_stats_info = {op_stats_info_1,
op_stats_info_2};
StepDatabaseResult dummy_step_db_result;
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
result.insert({0, &dummy_step_db_result});
StepIntersection dummy_step_intersection = StepIntersection(1, result);
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats);
EXPECT_EQ(100, dst_op_stats.run_environment()
.host_independent_job_info()
.profile_duration_ms());
}
TEST(CombineAllOpStatsTest, CombineRunEnvironmentWithUnknownDevice) {
OpStats dst_op_stats, op_stats_1, op_stats_2;
op_stats_1.mutable_run_environment()->set_device_type("TPU");
op_stats_2.mutable_run_environment()->set_device_type("Device");
OpStatsInfo op_stats_info_1(&op_stats_1, TPU, 0),
op_stats_info_2(&op_stats_2, TPU, 0);
std::vector<OpStatsInfo> all_op_stats_info = {op_stats_info_1,
op_stats_info_2};
StepDatabaseResult dummy_step_db_result;
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
result.insert({0, &dummy_step_db_result});
StepIntersection dummy_step_intersection = StepIntersection(1, result);
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats);
EXPECT_EQ("TPU", dst_op_stats.run_environment().device_type());
}
TEST(CombineAllOpStatsTest, CombinePerfEnvOrderZero) {
OpStats dst_op_stats1, dst_op_stats2, op_stats_1, op_stats_2;
op_stats_1.mutable_perf_env()->set_peak_tera_flops_per_second(100);
op_stats_2.mutable_perf_env()->set_peak_tera_flops_per_second(0);
absl::flat_hash_map<uint32 , const StepDatabaseResult*> result;
StepIntersection dummy_step_intersection = StepIntersection(1, result);
OpStatsInfo op_stats_info_1(&op_stats_1, TPU, 0),
op_stats_info_2(&op_stats_2, TPU, 0);
std::vector<OpStatsInfo> all_op_stats_info = {op_stats_info_1,
op_stats_info_2};
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats1);
EXPECT_EQ(100, dst_op_stats1.perf_env().peak_tera_flops_per_second());
all_op_stats_info = {
op_stats_info_2,
op_stats_info_1,
};
CombineAllOpStats(all_op_stats_info, dummy_step_intersection, &dst_op_stats2);
EXPECT_EQ(100, dst_op_stats2.perf_env().peak_tera_flops_per_second());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc1c3508-aaa8-4b66-b170-3982278d9920 | cpp | tensorflow/tensorflow | op_stats_to_tf_stats | tensorflow/core/profiler/convert/op_stats_to_tf_stats.cc | tensorflow/core/profiler/convert/op_stats_to_tf_stats_test.cc | #include "tensorflow/core/profiler/convert/op_stats_to_tf_stats.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_to_record.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_stats.pb.h"
#include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const int kMaxNumOfOps = 500;
TfStatsRecord ConvertOpMetricsToTfStatsRecord(
bool on_device, const OpMetrics& metrics,
double ridge_point_operational_intensity) {
TfStatsRecord record;
record.set_host_or_device(on_device ? "Device" : "Host");
record.set_is_eager(metrics.is_eager());
record.set_op_type(metrics.category());
record.set_op_name(metrics.name());
SetExecutionTimes(metrics, &record);
SetRooflineMetrics(metrics, ridge_point_operational_intensity, &record);
return record;
}
TfStatsTable GenerateTfStatsTable(
const OpMetricsDb& host_tf_metrics_db,
const OpMetricsDb& device_tf_metrics_db,
const KernelStatsByOpName& kernel_stats_by_op_name, double ridge_point,
bool exclude_idle) {
TfStatsTable tf_stats_table;
TfStatsRecord sentinel;
sentinel.set_rank(0);
sentinel.set_device_cumulative_total_self_time_as_fraction(0.0);
sentinel.set_host_cumulative_total_self_time_as_fraction(0.0);
const TfStatsRecord* prev_record = &sentinel;
uint64 total_device_time_ps = TotalTimePs(device_tf_metrics_db, exclude_idle);
double total_device_time_us =
tsl::profiler::PicoToMicro(total_device_time_ps);
for (const OpMetrics* metrics :
SortedOpMetricsDb(device_tf_metrics_db, kMaxNumOfOps)) {
if (exclude_idle && IsIdleOp(*metrics)) continue;
TfStatsRecord* record = tf_stats_table.add_tf_stats_record();
*record = ConvertOpMetricsToTfStatsRecord(
true, *metrics, ridge_point);
auto iter = kernel_stats_by_op_name.find(record->op_name());
if (iter != kernel_stats_by_op_name.end()) {
record->set_gpu_tensorcore_utilization(
tsl::profiler::SafeDivide(iter->second.tensor_core_duration_ns,
iter->second.total_duration_ns));
} else {
record->set_gpu_tensorcore_utilization(0.0);
}
SetRankAndDeviceTimeFractions(total_device_time_us, *prev_record, record);
prev_record = record;
}
uint64 total_host_time_ps = TotalTimePs(host_tf_metrics_db, exclude_idle);
double total_host_time_us = tsl::profiler::PicoToMicro(total_host_time_ps);
for (const OpMetrics* metrics : tensorflow::profiler::SortedOpMetricsDb(
host_tf_metrics_db, kMaxNumOfOps)) {
if (exclude_idle && IsIdleOp(*metrics)) continue;
TfStatsRecord* record = tf_stats_table.add_tf_stats_record();
*record = ConvertOpMetricsToTfStatsRecord(
false, *metrics, ridge_point);
record->set_gpu_tensorcore_utilization(0.0);
SetRankAndHostTimeFractions(total_host_time_us, *prev_record, record);
prev_record = record;
}
return tf_stats_table;
}
}
TfStatsDatabase ConvertOpStatsToTfStats(const OpStats& op_stats) {
const OpMetricsDb& host_tf_metrics_db = op_stats.host_op_metrics_db();
OpMetricsDb device_tf_metrics_db =
CreateTfMetricsDbFromDeviceOpMetricsDb(op_stats.device_op_metrics_db());
double ridge_point = op_stats.perf_env().ridge_point();
KernelStatsByOpName kernel_stats_by_op_name =
GroupKernelReportsByOpName(op_stats.kernel_stats_db());
TfStatsDatabase tf_stats_db;
*tf_stats_db.mutable_with_idle() = GenerateTfStatsTable(
host_tf_metrics_db, device_tf_metrics_db, kernel_stats_by_op_name,
ridge_point, false);
*tf_stats_db.mutable_without_idle() = GenerateTfStatsTable(
host_tf_metrics_db, device_tf_metrics_db, kernel_stats_by_op_name,
ridge_point, true);
tf_stats_db.set_device_type(op_stats.run_environment().device_type());
return tf_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_tf_stats.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/xplane_to_op_stats.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/tf_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
XEventBuilder AddTensorFlowOpEvent(std::string&& tf_op_fullname,
int64_t start_timestamp_ns,
int64_t duration_ns, bool on_device,
absl::string_view kernel_name,
XPlaneBuilder* plane, XLineBuilder* line) {
absl::string_view name = on_device ? kernel_name : tf_op_fullname;
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
if (!on_device) return event;
event.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*plane->GetOrCreateStatMetadata(std::move(tf_op_fullname)));
return event;
}
void AddTensorFlowOpEventWithKernelDetails(std::string&& tf_op_fullname,
int64_t start_timestamp_ns,
int64_t duration_ns, bool on_device,
absl::string_view kernel_name,
absl::string_view kernel_details,
XPlaneBuilder* plane,
XLineBuilder* line) {
XEventBuilder event =
AddTensorFlowOpEvent(std::move(tf_op_fullname), start_timestamp_ns,
duration_ns, on_device, kernel_name, plane, line);
if (!on_device) return;
event.ParseAndAddStatValue(*plane->GetOrCreateStatMetadata("kernel_details"),
kernel_details);
}
TEST(OpStatsToTfStats, GpuTfStats) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
static constexpr char kTfOp3[] = "Conv2D";
static constexpr char kKernel1[] = "kernel1";
static constexpr char kKernel2[] = "kernel2";
static constexpr char kKernel3[] = "kernel3";
static constexpr char kKernel4[] = "volta_fp16_s884gemm";
static constexpr char kKernel5[] = "kernel5";
constexpr int64_t kKernel1StartNs = 100000;
constexpr int64_t kKernel1DurationNs = 8000;
constexpr int64_t kKernel2StartNs = 110000;
constexpr int64_t kKernel2DurationNs = 10000;
constexpr int64_t kKernel3StartNs = 120000;
constexpr int64_t kKernel3DurationNs = 10000;
constexpr int64_t kKernel4StartNs = 130000;
constexpr int64_t kKernel4DurationNs = 10000;
constexpr int64_t kKernel5StartNs = 150000;
constexpr int64_t kKernel5DurationNs = 10000;
const std::string kKernelDetails = R"MULTI(regs:32
static_shared:0
dynamic_shared:16384
grid:2,1,1
block:32,1,1
occ_pct:100)MULTI";
XSpace space;
XPlaneBuilder device_plane(
GetOrCreateGpuXPlane(&space, 0));
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream1);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream1);
XLineBuilder stream2 = device_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kKernel3StartNs,
kKernel3DurationNs, true, kKernel3,
&device_plane, &stream2);
AddTensorFlowOpEventWithKernelDetails(
absl::StrCat(kTfOp3, ":", kTfOp3), kKernel4StartNs, kKernel4DurationNs,
true, kKernel4, kKernelDetails, &device_plane, &stream2);
AddTensorFlowOpEventWithKernelDetails(
absl::StrCat(kTfOp3, ":", kTfOp3), kKernel5StartNs, kKernel5DurationNs,
true, kKernel5, kKernelDetails, &device_plane, &stream2);
OpStatsOptions options;
options.generate_kernel_stats_db = true;
options.generate_op_metrics_db = true;
const OpStats op_stats = ConvertXSpaceToOpStats(space, options);
const TfStatsDatabase tf_stats = ConvertOpStatsToTfStats(op_stats);
EXPECT_EQ(tf_stats.device_type(), op_stats.run_environment().device_type());
EXPECT_EQ(4, tf_stats.with_idle().tf_stats_record_size());
const TfStatsRecord& record_0 = tf_stats.with_idle().tf_stats_record(0);
EXPECT_EQ(kTfOp1, record_0.op_name());
EXPECT_EQ(kTfOp1, record_0.op_type());
EXPECT_EQ(2, record_0.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel1DurationNs) * 2 +
tsl::profiler::NanoToMicro(kKernel2DurationNs) * 2,
record_0.total_self_time_in_us());
const TfStatsRecord& record_1 = tf_stats.with_idle().tf_stats_record(1);
EXPECT_EQ(kTfOp3, record_1.op_name());
EXPECT_EQ(kTfOp3, record_1.op_type());
EXPECT_EQ(1, record_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel4DurationNs) +
tsl::profiler::NanoToMicro(kKernel5DurationNs),
record_1.total_self_time_in_us());
EXPECT_DOUBLE_EQ(0.5, record_1.gpu_tensorcore_utilization());
const TfStatsRecord& record_2 = tf_stats.with_idle().tf_stats_record(2);
EXPECT_EQ(kTfOp2, record_2.op_name());
EXPECT_EQ(kTfOp2, record_2.op_type());
EXPECT_EQ(1, record_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToMicro(kKernel3DurationNs),
record_2.total_self_time_in_us());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_tf_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_tf_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e6903fa-08d5-4c96-8b91-e495d0ae1ec7 | cpp | tensorflow/tensorflow | op_stats_to_pod_viewer | tensorflow/core/profiler/convert/op_stats_to_pod_viewer.cc | tensorflow/core/profiler/convert/op_stats_to_pod_viewer_test.cc | #include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include <utility>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsSequence ConvertOpStatsToPodStatsSequence(const OpStats& op_stats,
PodStatsDatabase pod_stats) {
PodStatsSequence result_db;
int i = 0;
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
PodStatsMap* pod_stats_map = result_db.add_pod_stats_map();
pod_stats_map->set_step_num(step_sequence.step_num());
for (const auto& entry : step_sequence.step_info_per_core()) {
PodStatsRecord& record =
(*pod_stats_map->mutable_pod_stats_per_core())[entry.first];
DCHECK_LE(i, pod_stats.pod_stats_record_size());
record = std::move(*pod_stats.mutable_pod_stats_record(i++));
}
}
return result_db;
}
}
PodViewerDatabase ConvertOpStatsToPodViewer(const OpStats& op_stats) {
PodViewerDatabase database;
database.set_device_type(op_stats.run_environment().device_type());
PodStatsDatabase pod_stats = ConvertOpStatsToPodStats(op_stats);
database.mutable_step_breakdown_events()->Swap(
pod_stats.mutable_step_breakdown_events());
*database.mutable_pod_stats_sequence() =
ConvertOpStatsToPodStatsSequence(op_stats, std::move(pod_stats));
PopulateStepDiagnostics(op_stats, database.mutable_diagnostics());
return database;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_viewer.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodViewer, GpuPodViewer) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.pod_stats_sequence().pod_stats_map_size());
const PodStatsMap& pod_stats_map =
pod_viewer_db.pod_stats_sequence().pod_stats_map(0);
EXPECT_EQ(kStepNum, pod_stats_map.step_num());
const PodStatsRecord& record = pod_stats_map.pod_stats_per_core().at(kCoreId);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodViewer, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ(1, pod_viewer_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_viewer_db.diagnostics().warnings(0));
}
TEST(OpStatsToPodViewer, DeviceType) {
OpStats op_stats;
op_stats.mutable_run_environment()->set_device_type("GPU");
PodViewerDatabase pod_viewer_db = ConvertOpStatsToPodViewer(op_stats);
EXPECT_EQ("GPU", pod_viewer_db.device_type());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_viewer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/op_stats_to_pod_viewer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3fd2b1bc-61fe-4b2b-8e06-c2efe6bb07af | cpp | tensorflow/tensorflow | trace_viewer_visibility | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.cc | tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility_test.cc | #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "absl/log/check.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
namespace tensorflow {
namespace profiler {
TraceViewerVisibility::TraceViewerVisibility(
tsl::profiler::Timespan visible_span, uint64_t resolution_ps)
: visible_span_(visible_span), resolution_ps_(resolution_ps) {}
bool TraceViewerVisibility::Visible(const TraceEvent& event) {
if (visible_span_.Instant()) return true;
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
if (!visible_span_.Overlaps(span)) return false;
if (resolution_ps_ == 0) return true;
return VisibleAtResolution(event);
}
bool TraceViewerVisibility::VisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
#if 1
return true;
#else
CounterRowId counter_row_id(event.device_id(), event.name());
auto iter = last_counter_timestamp_ps_.find(counter_row_id);
bool found = (iter != last_counter_timestamp_ps_.end());
bool visible =
!found || ((event.timestamp_ps() - iter->second) >= resolution_ps_);
if (visible) {
if (found) {
iter->second = event.timestamp_ps();
} else {
last_counter_timestamp_ps_.emplace(counter_row_id,
event.timestamp_ps());
}
}
return visible;
#endif
}
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
bool visible = (span.duration_ps() >= resolution_ps_);
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
size_t depth = row.Depth(span.begin_ps());
if (!visible) {
auto last_end_timestamp_ps = row.LastEndTimestampPs(depth);
visible = !last_end_timestamp_ps ||
(span.begin_ps() - *last_end_timestamp_ps >= resolution_ps_);
}
if (event.has_flow_id()) {
auto result = flows_.try_emplace(event.flow_id(), visible);
if (!visible) {
if (result.second) {
auto last_flow_timestamp_ps = row.LastFlowTimestampPs();
result.first->second =
!last_flow_timestamp_ps ||
(span.end_ps() - *last_flow_timestamp_ps >= resolution_ps_);
}
visible = result.first->second;
}
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(result.first);
}
if (visible) {
row.SetLastFlowTimestampPs(span.end_ps());
}
}
if (visible) {
row.SetLastEndTimestampPs(depth, span.end_ps());
}
return visible;
}
void TraceViewerVisibility::SetVisibleAtResolution(const TraceEvent& event) {
DCHECK_NE(resolution_ps_, 0);
if (!event.has_resource_id()) {
CounterRowId counter_row_id(event.device_id(), event.name());
last_counter_timestamp_ps_.insert_or_assign(counter_row_id,
event.timestamp_ps());
} else {
tsl::profiler::Timespan span(event.timestamp_ps(), event.duration_ps());
auto& row = rows_[RowId(event.device_id(), event.resource_id())];
if (event.has_flow_id()) {
if (event.flow_entry_type() == TraceEvent::FLOW_END) {
flows_.erase(event.flow_id());
} else {
flows_.try_emplace(event.flow_id(), true);
}
row.SetLastFlowTimestampPs(span.end_ps());
}
size_t depth = row.Depth(span.begin_ps());
row.SetLastEndTimestampPs(depth, span.end_ps());
}
}
size_t TraceViewerVisibility::RowVisibility::Depth(
uint64_t begin_timestamp_ps) const {
size_t depth = 0;
for (; depth < last_end_timestamp_ps_.size(); ++depth) {
if (last_end_timestamp_ps_[depth] <= begin_timestamp_ps) break;
}
return depth;
}
}
} | #include "tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.h"
#include <cstdint>
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::Timespan;
constexpr uint32_t kDeviceId = 10;
constexpr uint32_t kResourceId = 1;
constexpr uint32_t kSrcResourceId = 2;
constexpr uint32_t kDstResourceId = 4;
TraceEvent Complete(Timespan span, uint32_t resource_id = kResourceId) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TraceEvent Counter(uint64_t time_ps) {
TraceEvent event;
event.set_device_id(kDeviceId);
event.set_timestamp_ps(time_ps);
return event;
}
TraceEvent Flow(Timespan span, uint64_t flow_id, uint32_t resource_id) {
TraceEvent event;
event.set_flow_id(flow_id);
event.set_device_id(kDeviceId);
event.set_resource_id(resource_id);
event.set_timestamp_ps(span.begin_ps());
event.set_duration_ps(span.duration_ps());
return event;
}
TEST(TraceViewerVisibilityTest, VisibilityNoDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000));
EXPECT_FALSE(v.Visible(Complete(Timespan(999))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1000))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1500))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001))));
EXPECT_FALSE(v.Visible(Complete(Timespan(900, 99))));
EXPECT_TRUE(v.Visible(Complete(Timespan(900, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1450, 100))));
EXPECT_TRUE(v.Visible(Complete(Timespan(2000, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(2001, 50))));
}
TEST(TraceViewerVisibilityTest, DISABLED_CounterEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_FALSE(v.Visible(Counter(999)));
EXPECT_TRUE(v.Visible(Counter(1000)));
EXPECT_FALSE(v.Visible(Counter(1099)));
EXPECT_TRUE(v.Visible(Counter(1100)));
EXPECT_TRUE(v.Visible(Counter(2000)));
EXPECT_FALSE(v.Visible(Counter(2001)));
}
TEST(TraceViewerVisibilityTest, CompleteEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(950, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1050, 50))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1055, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1355, 50))));
}
TEST(TraceViewerVisibilityTest, CompleteNestedEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Complete(Timespan(1000, 200))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1200, 190))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1250, 20))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1270, 20))));
EXPECT_TRUE(v.Visible(Complete(Timespan(1290, 100))));
}
TEST(TraceViewerVisibilityTest, FlowEventsDownsampling) {
TraceViewerVisibility v(Timespan(1000, 1000), 100);
EXPECT_TRUE(v.Visible(Flow(Timespan(1000, 50), 1, kSrcResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1050, 50), 2, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 3, kSrcResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1100, 50), 1, kDstResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1200, 52), 2, kDstResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1252, 10), 3, kDstResourceId)));
EXPECT_TRUE(v.Visible(Complete(Timespan(1300, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1350, 50))));
EXPECT_FALSE(v.Visible(Complete(Timespan(1400, 50))));
EXPECT_TRUE(v.Visible(Flow(Timespan(1600, 50), 4, kResourceId)));
EXPECT_TRUE(v.Visible(Flow(Timespan(1700, 52), 5, kResourceId)));
EXPECT_FALSE(v.Visible(Flow(Timespan(1752, 10), 6, kResourceId)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/trace_viewer/trace_viewer_visibility_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30f62b59-33de-4ae1-862a-1be8642281f9 | cpp | tensorflow/tensorflow | tpu_embedding_errors | tensorflow/core/tpu/tpu_embedding_errors.cc | tensorflow/core/tpu/tpu_embedding_errors_test.cc | #include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
namespace tensorflow::tpu {
Status AppendTpuEmbeddingErrorPayload(Status obj) {
if (obj.ok()) {
return absl::OkStatus();
} else {
const std::string error_message =
absl::StrCat(kTpuEmbeddingErrorMessage, ". ", obj.message());
Status status(obj.code(), error_message);
TPUEmbeddingError error_payload;
status.SetPayload(kTpuEmbeddingErrorUrl,
absl::Cord(error_payload.SerializeAsString()));
return status;
}
}
bool HasTpuEmbeddingErrorPayload(const Status& status) {
return status.GetPayload(kTpuEmbeddingErrorUrl).has_value();
}
bool HasTpuEmbeddingErrorMessage(const Status& status) {
return absl::StrContains(status.message(), kTpuEmbeddingErrorMessage);
}
} | #include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow::tpu {
namespace {
using absl::Status;
using absl::StatusOr;
StatusOr<std::string> GenerateTFStatusOr(absl::StatusCode code,
absl::string_view value = "") {
if (code == absl::StatusCode::kOk) {
return std::string(value);
} else {
return absl::Status(code, value);
}
}
TEST(TpuEmbeddingErrors, StatusOk) {
constexpr absl::string_view kValue = "success";
{
const Status status = AppendTpuEmbeddingErrorPayload(absl::OkStatus());
TF_EXPECT_OK(status);
EXPECT_FALSE(HasTpuEmbeddingErrorPayload(status));
EXPECT_FALSE(HasTpuEmbeddingErrorMessage(status));
}
{
TF_ASSERT_OK_AND_ASSIGN(const std::string value,
AppendTpuEmbeddingErrorPayload(GenerateTFStatusOr(
absl::StatusCode::kOk, kValue)));
EXPECT_EQ(value, kValue);
}
}
TEST(TpuEmbeddingErrors, StatusFailed) {
{
const Status status =
AppendTpuEmbeddingErrorPayload(errors::InvalidArgument(""));
EXPECT_EQ(status.code(), error::Code::INVALID_ARGUMENT);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
{
StatusOr<std::string> status_or = AppendTpuEmbeddingErrorPayload(
GenerateTFStatusOr(absl::StatusCode::kResourceExhausted));
EXPECT_FALSE(status_or.ok());
const Status& status = status_or.status();
EXPECT_EQ(status.code(), error::Code::RESOURCE_EXHAUSTED);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_errors.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_errors_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af90d2c7-6e27-4c92-88ac-00e9bdbb24f6 | cpp | tensorflow/tensorflow | tpu_embedding_configuration_proto_rewrite | tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.cc | tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite_test.cc | #include "tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.h"
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace {
absl::Status ValidateBatchSizeAndFeatureCounts(
const tpu::TPUEmbeddingConfiguration& config) {
if (config.batch_size_per_tensor_core() <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid batch_size_per_tensor_core: %d found in the TPU embedding "
"configuration. Valid values are >0.",
config.batch_size_per_tensor_core()));
}
for (const auto& table_config : config.table_descriptor()) {
if (table_config.num_features() <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid num_features: %d found for table: %s in the TPU embedding "
"configuration. Valid values are >0.",
table_config.num_features(), table_config.name()));
}
}
return absl::OkStatus();
}
absl::Status ValidateBatchSizeAndFeatureCountsAreEmpty(
const tpu::TPUEmbeddingConfiguration& config) {
if (config.batch_size_per_tensor_core() != 0) {
return absl::InvalidArgumentError(
"Invalid TPU embedding configuration. The batch_size_per_tensor_core "
"field must NOT be populated when the feature_descriptor fields are "
"filled in.");
}
for (const auto& table_config : config.table_descriptor()) {
if (table_config.num_features() != 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid TPU embedding configuration. The "
"TableDescriptor.num_features field must NOT be populated when the "
"feature_descriptor fields are filled in, num_features is set to %d "
"for table %s.",
table_config.num_features(), table_config.name()));
}
}
return absl::OkStatus();
}
absl::Status ValidateFeatureDescriptors(
const tpu::TPUEmbeddingConfiguration& config) {
const int table_count = config.table_descriptor_size();
std::vector<bool> tables_present(table_count, false);
for (const auto& feature_config : config.feature_descriptor()) {
const int table_id = feature_config.table_id();
const auto& input_shape = feature_config.input_shape();
if (table_id < 0 || table_id >= table_count) {
return absl::InvalidArgumentError(absl::StrFormat(
"Invalid table_id: %d found in feature_descriptor: %s, all table_ids "
"must be in the range[0, %d)",
table_id, feature_config.ShortDebugString(), table_count));
}
if (input_shape.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"The input_shape field cannot be empty in feature_descriptor: %s",
feature_config.ShortDebugString()));
}
for (const int dim_size : input_shape) {
if (dim_size <= 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"The input_shape dimension sizes must all be >0 in "
"feature_descriptor: %s, found dimension size set to %d",
feature_config.ShortDebugString(), dim_size));
}
}
tables_present[table_id] = true;
}
for (int table_id = 0; table_id < table_count; ++table_id) {
if (!tables_present[table_id]) {
return absl::InvalidArgumentError(absl::StrFormat(
"No feature_descriptor fields found for table: %s (ID: %d) in "
"the TPU embedding configuration.",
config.table_descriptor(table_id).name(), table_id));
}
}
return absl::OkStatus();
}
void PopulateFeatureDescriptors(tpu::TPUEmbeddingConfiguration* config) {
for (int table_id = 0; table_id < config->table_descriptor_size();
++table_id) {
tpu::TPUEmbeddingConfiguration::FeatureDescriptor* feature_descriptor =
config->add_feature_descriptor();
feature_descriptor->set_table_id(table_id);
feature_descriptor->add_input_shape(
config->batch_size_per_tensor_core() *
config->table_descriptor(table_id).num_features());
}
}
std::vector<int> ComputeInputFeatureBatchSizes(
const tpu::TPUEmbeddingConfiguration& config) {
std::vector<int32_t> input_feature_batch_sizes;
for (int i = 0; i < config.feature_descriptor_size(); ++i) {
const int32_t batch_size =
absl::c_accumulate(config.feature_descriptor(i).input_shape(),
1, std::multiplies<>());
input_feature_batch_sizes.push_back(batch_size);
}
return input_feature_batch_sizes;
}
int ComputeBatchSizePerTensorCore(
absl::Span<const int> input_feature_batch_sizes) {
uint32_t batch_size = input_feature_batch_sizes[0];
for (const uint32_t input_feature_batch_size : input_feature_batch_sizes) {
batch_size =
tensorflow::MathUtil::GCD(batch_size, input_feature_batch_size);
}
return batch_size;
}
std::vector<int> ComputeTpuFeatureCounts(
const tpu::TPUEmbeddingConfiguration& config,
absl::Span<const int> input_feature_batch_sizes,
int batch_size_per_tensor_core) {
DCHECK_EQ(input_feature_batch_sizes.size(), config.feature_descriptor_size());
std::vector<int> tpu_feature_counts(config.table_descriptor_size(), 0);
for (int i = 0; i < config.feature_descriptor_size(); ++i) {
DCHECK_EQ(input_feature_batch_sizes[i] % batch_size_per_tensor_core, 0);
tpu_feature_counts[config.feature_descriptor(i).table_id()] +=
(input_feature_batch_sizes[i] / batch_size_per_tensor_core);
}
return tpu_feature_counts;
}
void PopulateBatchSizeAndFeatureCounts(tpu::TPUEmbeddingConfiguration* config) {
const std::vector<int> input_feature_batch_sizes =
ComputeInputFeatureBatchSizes(*config);
const int batch_size_per_tensor_core =
ComputeBatchSizePerTensorCore(input_feature_batch_sizes);
const std::vector<int> tpu_feature_counts = ComputeTpuFeatureCounts(
*config, input_feature_batch_sizes, batch_size_per_tensor_core);
config->set_batch_size_per_tensor_core(batch_size_per_tensor_core);
for (int table_id = 0; table_id < config->table_descriptor_size();
++table_id) {
auto* table_config = config->mutable_table_descriptor(table_id);
table_config->set_num_features(tpu_feature_counts[table_id]);
}
}
}
absl::Status PopulateMissingFieldsInTPUEmbeddingConfig(
tpu::TPUEmbeddingConfiguration* config) {
if (config->feature_descriptor_size() == 0) {
TF_RETURN_IF_ERROR(ValidateBatchSizeAndFeatureCounts(*config));
PopulateFeatureDescriptors(config);
} else {
TF_RETURN_IF_ERROR(ValidateBatchSizeAndFeatureCountsAreEmpty(*config));
TF_RETURN_IF_ERROR(ValidateFeatureDescriptors(*config));
PopulateBatchSizeAndFeatureCounts(config);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace {
Status ParseTextProto(absl::string_view text_proto,
tpu::TPUEmbeddingConfiguration* parsed_proto) {
tsl::protobuf::TextFormat::Parser parser;
tsl::protobuf::io::ArrayInputStream input_stream(text_proto.data(),
text_proto.size());
if (parser.Parse(&input_stream, parsed_proto)) {
return absl::OkStatus();
}
parsed_proto->Clear();
return errors::InvalidArgument("Could not parse text proto: ", text_proto);
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, FillFeatureDescriptor) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
num_features: 3
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
num_features: 2
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
mode: TRAINING
batch_size_per_tensor_core: 256
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
TF_ASSERT_OK(
PopulateMissingFieldsInTPUEmbeddingConfig(&tpu_embedding_config));
EXPECT_EQ(tpu_embedding_config.feature_descriptor_size(), 2);
const auto& feature_0 = tpu_embedding_config.feature_descriptor(0);
EXPECT_EQ(feature_0.table_id(), 0);
EXPECT_THAT(feature_0.input_shape(), ::testing::ElementsAre(256 * 3));
const auto& feature_1 = tpu_embedding_config.feature_descriptor(1);
EXPECT_EQ(feature_1.table_id(), 1);
EXPECT_THAT(feature_1.input_shape(), ::testing::ElementsAre(256 * 2));
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, FillBatchSizeAndNumFeatures) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
name: "F0"
table_id: 0
input_shape: [ 100, 5 ]
}
feature_descriptor {
name: "F1"
table_id: 1
input_shape: [ 200, 5, 20 ]
}
feature_descriptor {
name: "F2"
table_id: 0
input_shape: [ 50 ]
}
feature_descriptor {
name: "F3"
table_id: 0
input_shape: [ 100, 2, 3 ]
}
mode: TRAINING
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
TF_ASSERT_OK(
PopulateMissingFieldsInTPUEmbeddingConfig(&tpu_embedding_config));
EXPECT_EQ(tpu_embedding_config.batch_size_per_tensor_core(), 50);
const auto& table_0 = tpu_embedding_config.table_descriptor(0);
EXPECT_EQ(table_0.num_features(), 23);
const auto& table_1 = tpu_embedding_config.table_descriptor(1);
EXPECT_EQ(table_1.num_features(), 400);
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, InvalidBatchSizeOrNumFeatures) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
num_features: 3
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
table_id: 0
input_shape: [ 768 ]
}
mode: TRAINING
batch_size_per_tensor_core: 256
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_feature_descriptor();
invalid_config.clear_batch_size_per_tensor_core();
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid batch_size_per_tensor_core")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_feature_descriptor();
invalid_config.mutable_table_descriptor(0)->clear_num_features();
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid num_features")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"The batch_size_per_tensor_core field must NOT be populated")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.clear_batch_size_per_tensor_core();
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The TableDescriptor.num_features "
"field must NOT be populated")));
}
}
TEST(TPUEmbeddingConfigurationProtoRewriteTest, InvalidFeatureDescriptor) {
const std::string config_str = R"pb(
table_descriptor {
name: "T0"
vocabulary_size: 35324928
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
table_descriptor {
name: "T1"
vocabulary_size: 3122176
dimension: 128
optimization_parameters {
adagrad {}
learning_rate { constant: 0.1 }
}
}
feature_descriptor {
name: "F1"
table_id: 0
input_shape: [ 768 ]
}
feature_descriptor {
name: "F2"
table_id: 1
input_shape: [ 512 ]
}
mode: TRAINING
num_hosts: 16
num_tensor_cores: 128
pipeline_execution_with_tensor_core: true
)pb";
tpu::TPUEmbeddingConfiguration tpu_embedding_config;
TF_ASSERT_OK(ParseTextProto(config_str, &tpu_embedding_config));
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->set_table_id(2);
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("Invalid table_id")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->clear_input_shape();
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The input_shape field cannot be empty")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(0)->set_input_shape(0, -5);
EXPECT_THAT(
PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("The input_shape dimension sizes must all")));
}
{
tpu::TPUEmbeddingConfiguration invalid_config = tpu_embedding_config;
invalid_config.mutable_feature_descriptor(1)->set_table_id(0);
EXPECT_THAT(PopulateMissingFieldsInTPUEmbeddingConfig(&invalid_config),
tensorflow::testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"No feature_descriptor fields found for table: T1")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_configuration_proto_rewrite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e436db0a-f077-4f69-8a2a-f6758b0a0ae3 | cpp | tensorflow/tensorflow | sharding_util_ops | tensorflow/compiler/tf2xla/kernels/sharding_util_ops.cc | tensorflow/core/tpu/kernels/sharding_util_ops_test.cc | #include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kNumSplitsAttrName = "num_splits";
constexpr absl::string_view kNumConcatsAttrName = "num_concats";
template <bool Split>
Status GetAndValidateAttributes(OpKernelConstruction* ctx,
std::vector<int64_t>& num_partitions,
int& num_slices, std::vector<int64_t>& paddings,
bool& has_paddings) {
absl::string_view num_partitions_attr_name =
Split ? kNumSplitsAttrName : kNumConcatsAttrName;
TF_RETURN_IF_ERROR(ctx->GetAttr(num_partitions_attr_name, &num_partitions));
int num_dims_to_split = 0;
for (int i = 0, e = num_partitions.size(); i < e; ++i) {
const auto& split = num_partitions[i];
if (split <= 0) {
return errors::InvalidArgument("'", num_partitions_attr_name,
"' at index ", i,
" must be positive, but got ", split, ".");
}
if (split > 1) {
++num_dims_to_split;
}
num_slices *= split;
}
int n;
TF_RETURN_IF_ERROR(ctx->GetAttr("N", &n));
if (n != num_slices) {
return errors::InvalidArgument(
"'N' must match number of slices ", num_slices, " from '",
num_partitions_attr_name, "', but got ", n, ".");
}
TF_RETURN_IF_ERROR(ctx->GetAttr("paddings", &paddings));
const int expected_rank = num_partitions.size();
if (!paddings.empty()) {
if (paddings.size() != expected_rank) {
return errors::InvalidArgument(
"'paddings' length must match '", num_partitions_attr_name,
"' length ", expected_rank, ", but got ", paddings.size(), ".");
}
for (int dim = 0; dim < expected_rank; ++dim) {
if (paddings[dim] < 0) {
return errors::InvalidArgument(
"'padding' must be all non-negative, but got ", paddings[dim],
" at index ", dim, ".");
}
if (paddings[dim] > 0) {
has_paddings = true;
}
}
} else {
paddings.assign(expected_rank, 0);
}
return absl::OkStatus();
}
std::vector<int64_t> GetSliceIndices(absl::Span<const int64> num_partitions,
absl::Span<const int64> slice_shape,
const int index) {
DCHECK_EQ(num_partitions.size(), slice_shape.size());
std::vector<int64_t> slice_indices(num_partitions.size());
if (num_partitions.empty()) {
return slice_indices;
}
auto divisor = [&](const int dim) {
int divisor = 1;
for (int i = num_partitions.size() - 1; i > dim; --i) {
divisor *= num_partitions[i];
}
return divisor;
};
for (int dim = num_partitions.size() - 1; dim > 0; --dim) {
slice_indices[dim] =
((index / divisor(dim)) % num_partitions[dim]) * slice_shape[dim];
}
slice_indices[0] = (index / divisor(0)) * slice_shape[0];
return slice_indices;
}
constexpr absl::string_view kTensorName = "'input' tensor";
constexpr absl::string_view kResourceName = "'resource' variable tensor";
template <bool Resource>
class XlaSplitNDBaseOp : public XlaOpKernel {
public:
explicit XlaSplitNDBaseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx,
GetAndValidateAttributes<true>(ctx, num_splits_, num_slices_,
paddings_, has_paddings_));
}
protected:
Status CompileInternal(XlaOpKernelContext* ctx, const xla::XlaOp input,
const TensorShape& input_shape,
const DataType input_dtype) {
xla::PrimitiveType type;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(input_dtype, &type));
absl::string_view input_name = Resource ? kResourceName : kTensorName;
const int rank = input_shape.dims();
if (rank != num_splits_.size()) {
return errors::InvalidArgument(
input_name, " rank must be the same as 'num_splits' length ",
num_splits_.size(), ", but got rank ", rank, ".");
}
for (int dim = 0; dim < rank; ++dim) {
if ((input_shape.dim_size(dim) + paddings_[dim]) % num_splits_[dim] !=
0) {
return errors::InvalidArgument(
input_name, " shape dimension ", dim, " (",
input_shape.dim_size(dim), ") with padding ", paddings_[dim],
" must be evenly divisible by 'num_splits' ", num_splits_[dim],
".");
}
}
if (num_slices_ == 1 && has_paddings_) {
xla::PaddingConfig padding_config;
for (int dim = 0; dim < rank; ++dim) {
auto* padding_dim = padding_config.add_dimensions();
padding_dim->set_edge_padding_low(0);
padding_dim->set_edge_padding_high(paddings_[dim]);
padding_dim->set_interior_padding(0);
}
ctx->SetOutput(
0,
xla::Pad(input,
xla::ConstantR0WithType(ctx->builder(), type, 0),
padding_config));
return absl::OkStatus();
} else if (num_slices_ == 1) {
ctx->SetOutput(0, input);
return absl::OkStatus();
}
std::vector<int64_t> slice_shape(rank);
for (int dim = 0; dim < rank; ++dim) {
slice_shape[dim] =
(input_shape.dim_size(dim) + paddings_[dim]) / num_splits_[dim];
}
const std::vector<int64_t> slice_strides(rank, 1);
for (int i = 0; i < num_slices_; ++i) {
int num_complete_pad_dims = 0;
int num_partial_pad_dims = 0;
std::vector<int64_t> slice_start_indices =
GetSliceIndices(num_splits_, slice_shape, i);
std::vector<int64_t> slice_limit_indices(slice_shape.size());
xla::PaddingConfig slice_padding_config;
for (int dim = 0; dim < rank; ++dim) {
auto* padding_dim = slice_padding_config.add_dimensions();
padding_dim->set_edge_padding_low(0);
padding_dim->set_edge_padding_high(0);
padding_dim->set_interior_padding(0);
}
for (int dim = 0; dim < rank; ++dim) {
const int64 dim_size = input_shape.dim_size(dim);
if (slice_start_indices[dim] >= dim_size) {
slice_start_indices[dim] = dim_size;
slice_limit_indices[dim] = dim_size;
slice_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
slice_shape[dim]);
++num_complete_pad_dims;
} else if (slice_start_indices[dim] + slice_shape[dim] > dim_size) {
slice_limit_indices[dim] = dim_size;
slice_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
slice_start_indices[dim] + slice_shape[dim] - dim_size);
++num_partial_pad_dims;
} else {
slice_limit_indices[dim] =
slice_start_indices[dim] + slice_shape[dim];
}
}
if (num_complete_pad_dims == rank) {
ctx->SetOutput(i, xla::Broadcast(xla::ConstantR0WithType(
ctx->builder(), type, 0),
slice_shape));
} else if (num_complete_pad_dims > 0 || num_partial_pad_dims > 0) {
ctx->SetOutput(
i,
xla::Pad(xla::Slice(input, slice_start_indices, slice_limit_indices,
slice_strides),
xla::ConstantR0WithType(ctx->builder(), type, 0),
slice_padding_config));
} else {
ctx->SetOutput(i, xla::Slice(input, slice_start_indices,
slice_limit_indices, slice_strides));
}
}
return absl::OkStatus();
}
private:
std::vector<int64_t> num_splits_;
int num_slices_ = 1;
std::vector<int64_t> paddings_;
bool has_paddings_ = false;
};
class XlaSplitNDOp : public XlaSplitNDBaseOp<false> {
public:
explicit XlaSplitNDOp(OpKernelConstruction* ctx)
: XlaSplitNDBaseOp<false>(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
OP_REQUIRES_OK(ctx,
this->CompileInternal(ctx, ctx->Input(0), ctx->InputShape(0),
ctx->input_type(0)));
}
};
REGISTER_XLA_OP(Name("XlaSplitND"), XlaSplitNDOp);
class ReadVariableXlaSplitNDOp : public XlaSplitNDBaseOp<true> {
public:
explicit ReadVariableXlaSplitNDOp(OpKernelConstruction* ctx)
: XlaSplitNDBaseOp<true>(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
}
void Compile(XlaOpKernelContext* ctx) override {
DataType variable_input_dtype;
TensorShape variable_input_shape;
OP_REQUIRES_OK(
ctx, ctx->GetVariableTypeAndShape(0, &variable_input_dtype,
&variable_input_shape));
OP_REQUIRES(
ctx, variable_input_dtype == dtype_,
errors::InvalidArgument("'T' must match 'resource' variable dtype ",
DataTypeString(variable_input_dtype),
", but got ", dtype_));
xla::XlaOp handle;
OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_,
nullptr, &handle));
OP_REQUIRES_OK(
ctx, this->CompileInternal(ctx, handle, variable_input_shape, dtype_));
}
private:
DataType dtype_;
};
REGISTER_XLA_OP(Name("ReadVariableXlaSplitND"), ReadVariableXlaSplitNDOp);
class XlaConcatNDBaseOp : public XlaOpKernel {
public:
explicit XlaConcatNDBaseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(
ctx, GetAndValidateAttributes<false>(ctx, num_concats_, num_slices_,
paddings_, has_paddings_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
}
protected:
absl::StatusOr<xla::XlaOp> CompileInternal(XlaOpKernelContext* ctx) {
xla::PrimitiveType type;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(dtype_, &type));
std::vector<xla::XlaOp> input_handles;
std::vector<TensorShape> input_shapes;
std::vector<int64_t> output_shape;
TF_RETURN_IF_ERROR(GetInputsAndOutputShape(ctx, input_handles, input_shapes,
output_shape));
const int rank = output_shape.size();
if (num_slices_ == 1 && has_paddings_) {
return xla::Slice(input_handles[0],
std::vector<int64_t>(rank, 0),
output_shape,
std::vector<int64_t>(rank, 1));
} else if (num_slices_ == 1) {
return input_handles[0];
}
auto slice_shape = input_shapes[0].dim_sizes();
xla::XlaOp output = xla::Broadcast(
xla::ConstantR0WithType(ctx->builder(), type, 0),
output_shape);
const std::vector<int64_t> input_slice_start_indices(rank, 0);
const std::vector<int64_t> slice_strides(rank, 1);
for (int i = 0; i < num_slices_; ++i) {
std::vector<int64_t> slice_start_indices =
GetSliceIndices(num_concats_, slice_shape, i);
int num_complete_pad_dims = 0;
int num_partial_pad_dims = 0;
std::vector<int64_t> slice_limit_indices(rank);
for (int dim = 0; dim < rank; ++dim) {
const int64_t dim_size = output_shape[dim];
if (slice_start_indices[dim] >= dim_size) {
slice_start_indices[dim] = dim_size;
slice_limit_indices[dim] = dim_size;
++num_complete_pad_dims;
} else if (slice_start_indices[dim] + slice_shape[dim] > dim_size) {
slice_limit_indices[dim] = dim_size;
++num_partial_pad_dims;
} else {
slice_limit_indices[dim] =
slice_start_indices[dim] + slice_shape[dim];
}
}
if (num_complete_pad_dims == rank) {
continue;
}
xla::XlaOp input_slice = input_handles[i];
if (num_complete_pad_dims > 0 || num_partial_pad_dims > 0) {
std::vector<int64_t> input_slice_limit_indices(rank);
for (int dim = 0; dim < rank; ++dim) {
input_slice_limit_indices[dim] =
slice_limit_indices[dim] - slice_start_indices[dim];
}
input_slice = xla::Slice(input_slice, input_slice_start_indices,
input_slice_limit_indices, slice_strides);
}
std::vector<xla::XlaOp> update_slice_start_indices;
update_slice_start_indices.reserve(rank);
for (int64 start_index : slice_start_indices) {
update_slice_start_indices.push_back(
xla::ConstantR0<int32>(ctx->builder(), start_index));
}
output = xla::DynamicUpdateSlice(output, input_slice,
update_slice_start_indices);
}
return output;
}
DataType dtype_;
private:
Status GetInputsAndOutputShape(XlaOpKernelContext* ctx,
std::vector<xla::XlaOp>& input_handles,
std::vector<TensorShape>& input_shapes,
std::vector<int64_t>& output_shape) {
TF_RETURN_IF_ERROR(ctx->InputList("inputs", &input_handles, &input_shapes));
const TensorShape& slice_shape = input_shapes[0];
if (slice_shape.dims() != num_concats_.size()) {
return errors::InvalidArgument(
"'inputs' rank must be the same as 'num_concats' length ",
num_concats_.size(), ", but got rank ", slice_shape.dims(), ".");
}
for (int i = 1; i < num_slices_; ++i) {
const TensorShape& slice_shape_i = input_shapes[i];
if (slice_shape != slice_shape_i) {
return errors::InvalidArgument(
"'inputs' must all have the same expected shape ", slice_shape,
", but got ", slice_shape_i, " at index ", i, ".");
}
}
const int rank = input_shapes[0].dims();
for (int dim = 0; dim < rank; ++dim) {
const int max_dim_size = slice_shape.dim_size(dim) * num_concats_[dim];
if (paddings_[dim] > max_dim_size) {
return errors::InvalidArgument(
"'paddings' must not exceed expected output shape dimension ",
max_dim_size, " at index ", dim, ", but got ", paddings_[dim], ".");
}
output_shape.push_back(max_dim_size - paddings_[dim]);
}
return absl::OkStatus();
}
std::vector<int64_t> num_concats_;
int num_slices_ = 1;
std::vector<int64_t> paddings_;
bool has_paddings_ = false;
};
class XlaConcatNDOp : public XlaConcatNDBaseOp {
public:
explicit XlaConcatNDOp(OpKernelConstruction* ctx) : XlaConcatNDBaseOp(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
auto output_or = this->CompileInternal(ctx);
OP_REQUIRES_OK(ctx, output_or.status());
ctx->SetOutput(0, output_or.value());
}
};
REGISTER_XLA_OP(Name("XlaConcatND"), XlaConcatNDOp);
class AssignVariableXlaConcatNDOp : public XlaConcatNDBaseOp {
public:
explicit AssignVariableXlaConcatNDOp(OpKernelConstruction* ctx)
: XlaConcatNDBaseOp(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
auto output_or = this->CompileInternal(ctx);
OP_REQUIRES_OK(ctx, output_or.status());
OP_REQUIRES_OK(ctx,
ctx->AssignVariable("resource", dtype_, output_or.value()));
}
};
REGISTER_XLA_OP(Name("AssignVariableXlaConcatND"), AssignVariableXlaConcatNDOp);
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
MATCHER_P2(IsStatus, error_code, error_message, "") {
return arg.code() == error_code &&
absl::StrContains(arg.message(), error_message);
}
Status RunGraph(const Graph& graph,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_tensor_names,
std::vector<Tensor>* output_tensors) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
SessionOptions session_options;
std::unique_ptr<Session> session(NewSession(session_options));
TF_RETURN_IF_ERROR(session->Create(graph_def));
RunOptions run_options;
return session->Run(run_options, {}, output_tensor_names,
target_tensor_names, output_tensors,
nullptr);
}
TEST(ReadVariableXlaSplitNDOpTest, VariableMissing) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
const TensorShape input_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(&graph, &var_handle));
Node* xla_op = nullptr;
const std::vector<int32_t> num_splits = {2, 2};
const int num_outputs = 4;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.Attr("num_splits", num_splits)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, {xla_op->name()},
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "cannot be found"));
}
TEST(ReadVariableXlaSplitNDOpTest, DTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
const TensorShape input_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(&graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(&graph, input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(&graph, &assign_var));
Node* xla_op = nullptr;
const std::vector<int32_t> num_splits = {2, 2};
const int num_outputs = 4;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_splits)
.Attr("T", DataTypeToEnum<float>::value)
.Attr("N", num_outputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, {xla_op->name()},
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'T' must match 'resource'"));
}
Status CreateSplitTensorGraph(const TensorShape& input_shape,
absl::Span<const int32_t> num_splits,
absl::Span<const int32_t> paddings,
const int num_outputs, Graph* graph,
std::vector<std::string>* output_tensor_names) {
DataType data_type = DataTypeToEnum<int32_t>::value;
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_op"), "XlaSplitND")
.Input(input)
.Attr("num_splits", num_splits)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(graph, &xla_op));
output_tensor_names->reserve(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", i));
}
return absl::OkStatus();
}
Status CreateSplitResourceGraph(const TensorShape& input_shape,
absl::Span<const int32_t> num_splits,
absl::Span<const int32_t> paddings,
const int num_outputs, Graph* graph,
std::vector<std::string>* output_tensor_names) {
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* assign_var = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_splits)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(graph, &xla_op));
output_tensor_names->reserve(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", i));
}
return absl::OkStatus();
}
struct XlaSplitNDTestParam {
std::string name;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, const int num_outputs, Graph*,
std::vector<std::string>*)>
graph_creator;
};
using XlaSplitNDOpTest = ::testing::TestWithParam<XlaSplitNDTestParam>;
TEST_P(XlaSplitNDOpTest, SplitDimensionZero) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 0};
const std::vector<int32_t> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "index 2 must be positive, but got 0"));
}
TEST_P(XlaSplitNDOpTest, SplitDimensionNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, -1, 1};
const std::vector<int32_t> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"index 1 must be positive, but got -1"));
}
TEST_P(XlaSplitNDOpTest, NumOutputsMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2});
const std::vector<int32_t> num_splits = {2};
const std::vector<int> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'N' must match number of slices 2"));
}
TEST_P(XlaSplitNDOpTest, PaddingsLengthMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "length 2, but got 1"));
}
TEST_P(XlaSplitNDOpTest, PaddingsNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0, -1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "non-negative, but got -1 at index 1"));
}
TEST_P(XlaSplitNDOpTest, InputRank0) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({});
const std::vector<int32_t> num_splits = {2};
const std::vector<int32_t> paddings;
const int num_outputs = 2;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 0"));
}
TEST_P(XlaSplitNDOpTest, InputRank9) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2, 2, 2, 2, 2, 2, 2});
const std::vector<int32_t> num_splits(9, 2);
const std::vector<int32_t> paddings;
const int num_outputs = 512;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 9"));
}
TEST_P(XlaSplitNDOpTest, InputRankSplitMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 8;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"'num_splits' length 3, but got rank 2"));
}
TEST_P(XlaSplitNDOpTest, DimNotEvenlySplit) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 2});
const std::vector<int32_t> num_splits = {3, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 6;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "divisible by 'num_splits' 3"));
}
TEST_P(XlaSplitNDOpTest, DimWithPaddingNotEvenlySplit) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0, 1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "divisible by 'num_splits' 2"));
}
TEST_P(XlaSplitNDOpTest, NoSplits) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST_P(XlaSplitNDOpTest, NoSplitsWithPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings = {0, 1, 1};
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(3 * 3 * 3);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 0, 0, 0, 1, 0, 0, 0},
TensorShape({2, 2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitNoPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 4});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 4, 5}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 3, 6, 7}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({8, 9, 12, 13}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({10, 11, 14, 15}, TensorShape({2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitPartialPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({3, 3});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 3, 4}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 0, 5, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({6, 7, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({8, 0, 0, 0}, TensorShape({2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitCompletePadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 1});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {2, 3};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 0, 1, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
}
INSTANTIATE_TEST_SUITE_P(
XlaSplitNDOpTest, XlaSplitNDOpTest,
::testing::ValuesIn<XlaSplitNDTestParam>(
{{"Tensor", CreateSplitTensorGraph},
{"Resource", CreateSplitResourceGraph}}),
[](const ::testing::TestParamInfo<XlaSplitNDOpTest::ParamType>& info) {
return info.param.name;
});
struct RankedXlaSplitNDTestParam {
std::string name;
int rank = 0;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, const int num_outputs, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RankedXlaSplitNDOpTest
: public ::testing::TestWithParam<RankedXlaSplitNDTestParam> {};
TEST_P(RankedXlaSplitNDOpTest, TestSubscriptRank) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_splits(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 2));
const std::vector<int32_t> paddings;
const int num_outputs = 2 << (rank - 1);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
TensorShape output_shape(std::vector<int64_t>(rank, 1));
for (int i = 0; i < num_outputs; ++i) {
test::ExpectTensorEqual<int32_t>(
output_tensors[i], test::AsTensor<int32_t>({i}, output_shape));
}
}
INSTANTIATE_TEST_SUITE_P(
RankedXlaSplitNDOpTest, RankedXlaSplitNDOpTest,
::testing::ValuesIn<RankedXlaSplitNDTestParam>(
{{"TensorRanked1", 1, CreateSplitTensorGraph},
{"TensorRanked2", 2, CreateSplitTensorGraph},
{"TensorRanked3", 3, CreateSplitTensorGraph},
{"TensorRanked4", 4, CreateSplitTensorGraph},
{"TensorRanked5", 5, CreateSplitTensorGraph},
{"TensorRanked6", 6, CreateSplitTensorGraph},
{"TensorRanked7", 7, CreateSplitTensorGraph},
{"TensorRanked8", 8, CreateSplitTensorGraph},
{"ResourceRanked1", 1, CreateSplitResourceGraph},
{"ResourceRanked2", 2, CreateSplitResourceGraph},
{"ResourceRanked3", 3, CreateSplitResourceGraph},
{"ResourceRanked4", 4, CreateSplitResourceGraph},
{"ResourceRanked5", 5, CreateSplitResourceGraph},
{"ResourceRanked6", 6, CreateSplitResourceGraph},
{"ResourceRanked7", 7, CreateSplitResourceGraph},
{"ResourceRanked8", 8, CreateSplitResourceGraph}}),
[](const ::testing::TestParamInfo<RankedXlaSplitNDOpTest::ParamType>&
info) { return info.param.name; });
TEST(AssignVariableXlaConcatNDOpTest, HandleDTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<int32_t>::value;
PartialTensorShape handle_shape;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "dtype int32, but got float"));
}
TEST(AssignVariableXlaConcatNDOpTest, TensorDTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType init_data_type = DataTypeToEnum<int32_t>::value;
const TensorShape init_input_shape({4, 4});
Tensor init_input_tensor(init_data_type, init_input_shape);
test::FillIota<int32_t>(&init_input_tensor, 0);
Node* input = test::graph::Constant(&graph, init_input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", init_data_type)
.Finalize(&graph, &assign_var));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.ControlInput(assign_var)
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "dtype int32, but got float"));
}
TEST(AssignVariableXlaConcatNDOpTest, HandleShapeIncompatible) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape({});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "expected shape [4,4], but got []"));
}
TEST(AssignVariableXlaConcatNDOpTest, HandleShapeWithPaddingIncompatible) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "expected shape [3,3], but got [4,4]"));
}
TEST(AssignVariableXlaConcatNDOpTest, AssignDifferentShape) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<float>::value;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape({4, -1}))
.Finalize(&graph, &var_handle));
const TensorShape init_input_shape({4, 2});
Tensor init_input_tensor(data_type, init_input_shape);
test::FillFn<float>(&init_input_tensor, [](int unused) { return -1.f; });
Node* init_input = test::graph::Constant(&graph, init_input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(init_input)
.Attr("dtype", data_type)
.Finalize(&graph, &assign_var));
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.ControlInput(assign_var)
.Attr("num_concats", num_concats)
.Attr("T", data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
Node* read_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_op)
.Attr("dtype", data_type)
.Finalize(&graph, &read_var));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(
graph, {absl::StrCat(read_var->name(), ":", 0)},
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorNear<float>(output_tensors[0], update_input_tensor,
1e-6);
}
Status CreateConcatTensorGraph(absl::Span<const TensorShape> input_shapes,
absl::Span<const int32_t> num_concats,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
int32_t val = 0;
DataType data_type = DataTypeToEnum<int32_t>::value;
std::vector<NodeBuilder::NodeOut> inputs;
inputs.reserve(input_shapes.size());
for (const TensorShape& input_shape : input_shapes) {
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, val);
val += input_tensor.NumElements();
inputs.push_back(test::graph::Constant(graph, input_tensor));
}
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_op"), "XlaConcatND")
.Input(inputs)
.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", static_cast<int64_t>(input_shapes.size()))
.Finalize(graph, &xla_op));
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", 0));
return absl::OkStatus();
}
template <bool Init>
Status CreateConcatResourceGraph(
absl::Span<const TensorShape> input_shapes,
absl::Span<const int32_t> num_concats, absl::Span<const int32_t> paddings,
Graph* graph, std::vector<std::string>* output_tensor_names) {
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape())
.Finalize(graph, &var_handle));
Node* assign_var = nullptr;
if (Init) {
Tensor init_input_tensor(data_type, input_shapes.front());
test::FillFn<int32_t>(&init_input_tensor, [](int unused) { return -1; });
Node* init_input = test::graph::Constant(graph, init_input_tensor);
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(init_input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
}
int32_t val = 0;
std::vector<NodeBuilder::NodeOut> inputs;
inputs.reserve(input_shapes.size());
for (const TensorShape& input_shape : input_shapes) {
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, val);
val += input_tensor.NumElements();
inputs.push_back(test::graph::Constant(graph, input_tensor));
}
Node* xla_op = nullptr;
NodeBuilder builder(graph->NewName("xla_op"), "AssignVariableXlaConcatND");
builder.Input(var_handle);
builder.Input(inputs);
if (assign_var != nullptr) {
builder.ControlInput(assign_var);
}
TF_RETURN_IF_ERROR(builder.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", static_cast<int64_t>(input_shapes.size()))
.Finalize(graph, &xla_op));
Node* read_var = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_op)
.Attr("dtype", data_type)
.Finalize(graph, &read_var));
output_tensor_names->push_back(absl::StrCat(read_var->name(), ":", 0));
return absl::OkStatus();
}
struct XlaConcatNDTestParam {
std::string name;
std::function<Status(absl::Span<const TensorShape>, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
using XlaConcatNDOpTest = ::testing::TestWithParam<XlaConcatNDTestParam>;
TEST_P(XlaConcatNDOpTest, ConcatDimensionZero) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_concats = {1, 1, 0};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "index 2 must be positive, but got 0"));
}
TEST_P(XlaConcatNDOpTest, ConcatDimensionNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, -1, 1};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_splits, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"index 1 must be positive, but got -1"));
}
TEST_P(XlaConcatNDOpTest, NumInputsMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2});
const std::vector<int32_t> num_concats = {2};
const std::vector<int> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'N' must match number of slices 2"));
}
TEST_P(XlaConcatNDOpTest, PaddingsLengthMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {0};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "length 2, but got 1"));
}
TEST_P(XlaConcatNDOpTest, PaddingsNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {0, -1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "non-negative, but got -1 at index 1"));
}
TEST_P(XlaConcatNDOpTest, InputRank0) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({});
const std::vector<int32_t> num_concats;
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 0"));
}
TEST_P(XlaConcatNDOpTest, InputRank9) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1, 1, 1, 1, 1, 1, 1});
const std::vector<int32_t> num_concats(9, 1);
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 9"));
}
TEST_P(XlaConcatNDOpTest, InputRankConcatMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"'num_concats' length 2, but got rank 1"));
}
TEST_P(XlaConcatNDOpTest, DifferentShapedInputs) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{1}, {2}};
const std::vector<int32_t> num_concats = {2};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"same expected shape [1], but got [2] at index 1"));
}
TEST_P(XlaConcatNDOpTest, PaddingExceedsOutputDimSize) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1});
const std::vector<int32_t> num_concats = {1};
const std::vector<int32_t> paddings = {2};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(
error::INVALID_ARGUMENT,
"exceed expected output shape dimension 1 at index 0, but got 2"));
}
TEST_P(XlaConcatNDOpTest, NoConcats) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST_P(XlaConcatNDOpTest, NoConcatsWithPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings = {1, 1, 1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0}, TensorShape({1, 1, 1})));
}
TEST_P(XlaConcatNDOpTest, ConcatNoPadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 5, 2, 3, 6, 7, 8, 9,
12, 13, 10, 11, 14, 15},
TensorShape({4, 4})));
}
TEST_P(XlaConcatNDOpTest, ConcatPartialPadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 2, 3, 6, 8, 9, 12},
TensorShape({3, 3})));
}
TEST_P(XlaConcatNDOpTest, ConcatCompletePadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings = {2, 2};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 2, 3}, TensorShape({2, 2})));
}
INSTANTIATE_TEST_SUITE_P(
XlaConcatNDOpTest, XlaConcatNDOpTest,
::testing::ValuesIn<XlaConcatNDTestParam>(
{{"Tensor", CreateConcatTensorGraph},
{"InitializedResource", CreateConcatResourceGraph<true>},
{"UninitializedResource", CreateConcatResourceGraph<false>}}),
[](const ::testing::TestParamInfo<XlaConcatNDOpTest::ParamType>& info) {
return info.param.name;
});
struct RankedXlaConcatNDTestParam {
std::string name;
int rank = 0;
std::function<Status(absl::Span<const TensorShape>, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RankedXlaConcatNDOpTest
: public ::testing::TestWithParam<RankedXlaConcatNDTestParam> {};
TEST_P(RankedXlaConcatNDOpTest, TestSubscriptRank) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_concats(rank, 2);
Graph graph(OpRegistry::Global());
const int num_inputs = 2 << (rank - 1);
const TensorShape base_input_shape(std::vector<int64_t>(rank, 1));
const std::vector<TensorShape> input_shapes(num_inputs, base_input_shape);
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(num_inputs);
std::iota(expected_values.begin(), expected_values.end(), 0);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>(expected_values,
TensorShape(std::vector<int64_t>(rank, 2))));
}
INSTANTIATE_TEST_SUITE_P(
RankedXlaConcatNDOpTest, RankedXlaConcatNDOpTest,
::testing::ValuesIn<RankedXlaConcatNDTestParam>(
{{"TensorRanked1", 1, CreateConcatTensorGraph},
{"TensorRanked2", 2, CreateConcatTensorGraph},
{"TensorRanked3", 3, CreateConcatTensorGraph},
{"TensorRanked4", 4, CreateConcatTensorGraph},
{"TensorRanked5", 5, CreateConcatTensorGraph},
{"TensorRanked6", 6, CreateConcatTensorGraph},
{"TensorRanked7", 7, CreateConcatTensorGraph},
{"TensorRanked8", 8, CreateConcatTensorGraph},
{"InitializedResourceRanked1", 1, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked2", 2, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked3", 3, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked4", 4, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked5", 5, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked6", 6, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked7", 7, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked8", 8, CreateConcatResourceGraph<true>},
{"UninitializedResourceRanked1", 1, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked2", 2, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked3", 3, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked4", 4, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked5", 5, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked6", 6, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked7", 7, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked8", 8,
CreateConcatResourceGraph<false>}}),
[](const ::testing::TestParamInfo<RankedXlaConcatNDOpTest::ParamType>&
info) { return info.param.name; });
Status CreateRoundtripTensorGraph(
const TensorShape& input_shape, absl::Span<const int32_t> num_partitions,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
const int32_t num_partitions_size =
std::accumulate(num_partitions.begin(), num_partitions.end(), 1,
std::multiplies<int32_t>());
DataType data_type = DataTypeToEnum<int32_t>::value;
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* xla_split_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_split_op"), "XlaSplitND")
.Input(input)
.Attr("num_splits", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_split_op));
std::vector<NodeBuilder::NodeOut> concat_inputs;
concat_inputs.reserve(num_partitions_size);
for (int i = 0; i < num_partitions_size; ++i) {
concat_inputs.push_back({xla_split_op, i});
}
Node* xla_concat_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_concat_op"), "XlaConcatND")
.Input(concat_inputs)
.Attr("num_concats", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_concat_op));
Node* equal = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("equal"), "Equal")
.Input(input)
.Input(xla_concat_op)
.Attr("T", data_type)
.Finalize(graph, &equal));
output_tensor_names->push_back(absl::StrCat(equal->name(), ":", 0));
return absl::OkStatus();
}
Status CreateRoundtripResourceGraph(
const TensorShape& input_shape, absl::Span<const int32_t> num_partitions,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
const int32_t num_partitions_size =
std::accumulate(num_partitions.begin(), num_partitions.end(), 1,
std::multiplies<int32_t>());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape())
.Finalize(graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* assign_var = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
Node* xla_split_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_split_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_split_op));
std::vector<NodeBuilder::NodeOut> concat_inputs;
concat_inputs.reserve(num_partitions_size);
for (int i = 0; i < num_partitions_size; ++i) {
concat_inputs.push_back({xla_split_op, i});
}
Node* xla_concat_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(concat_inputs)
.Attr("num_concats", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_concat_op));
Node* read_var = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_concat_op)
.Attr("dtype", data_type)
.Finalize(graph, &read_var));
Node* equal = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("equal"), "Equal")
.Input(input)
.Input(read_var)
.Attr("T", data_type)
.Finalize(graph, &equal));
output_tensor_names->push_back(absl::StrCat(equal->name(), ":", 0));
return absl::OkStatus();
}
struct RoundtripXlaSplitConcatNDTestParam {
std::string name;
int rank = 0;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RoundtripXlaSplitConcatNDTest
: public ::testing::TestWithParam<RoundtripXlaSplitConcatNDTestParam> {};
template <typename T>
Tensor Constant(T v, TensorShape shape) {
Tensor ret(DataTypeToEnum<T>::value, shape);
ret.flat<T>().setConstant(v);
return ret;
}
TEST_P(RoundtripXlaSplitConcatNDTest, NoPadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
TEST_P(RoundtripXlaSplitConcatNDTest, PartialPadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings(rank, 2);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
TEST_P(RoundtripXlaSplitConcatNDTest, CompletePadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings(rank, 4);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
INSTANTIATE_TEST_SUITE_P(
RoundtripXlaSplitConcatNDTest, RoundtripXlaSplitConcatNDTest,
::testing::ValuesIn<RoundtripXlaSplitConcatNDTestParam>(
{{"TensorRanked1", 1, CreateRoundtripTensorGraph},
{"TensorRanked2", 2, CreateRoundtripTensorGraph},
{"TensorRanked3", 3, CreateRoundtripTensorGraph},
{"TensorRanked4", 4, CreateRoundtripTensorGraph},
{"TensorRanked5", 5, CreateRoundtripTensorGraph},
{"TensorRanked6", 6, CreateRoundtripTensorGraph},
{"TensorRanked7", 7, CreateRoundtripTensorGraph},
{"TensorRanked8", 8, CreateRoundtripTensorGraph},
{"ResourceRanked1", 1, CreateRoundtripResourceGraph},
{"ResourceRanked2", 2, CreateRoundtripResourceGraph},
{"ResourceRanked3", 3, CreateRoundtripResourceGraph},
{"ResourceRanked4", 4, CreateRoundtripResourceGraph},
{"ResourceRanked5", 5, CreateRoundtripResourceGraph},
{"ResourceRanked6", 6, CreateRoundtripResourceGraph},
{"ResourceRanked7", 7, CreateRoundtripResourceGraph},
{"ResourceRanked8", 8, CreateRoundtripResourceGraph}}),
[](const ::testing::TestParamInfo<RoundtripXlaSplitConcatNDTest::ParamType>&
info) { return info.param.name; });
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sharding_util_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_util_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe09ef8b-81a3-4fba-97de-713f5e1c309e | cpp | tensorflow/tensorflow | sparse_core_ops_utils | tensorflow/core/tpu/kernels/sparse_core_ops_utils.cc | tensorflow/core/tpu/kernels/sparse_core_ops_utils_test.cc | #include "tensorflow/core/tpu/kernels/sparse_core_ops_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/flags.h"
#include "xla/stream_executor/tpu/status_helper.h"
#include "xla/stream_executor/tpu/tpu_api.h"
#include "xla/stream_executor/tpu/tpu_ops_c_api.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
std::vector<int> ConvertBinarySplitsToBucketSplits(int64 split,
int max_division_level) {
std::vector<int> bucket_splits;
uint32 current_index = 0;
while (split > 0) {
if (split % 2 == 1) {
int split_level = absl::bit_width(current_index + 1) - 1;
int split_offset = current_index - (1 << split_level) + 1;
int split_size = 1 << (max_division_level - 1 - split_level);
bucket_splits.push_back(split_size + split_offset * split_size * 2);
}
split >>= 1;
current_index += 1;
}
absl::c_sort(bucket_splits);
return bucket_splits;
}
int64 ConvertBucketSplitsToBinarySplits(std::vector<int> bucket_splits,
int max_division_level) {
int64 binary_splits = 0;
for (auto& bucket_split : bucket_splits) {
int split_level = max_division_level - 1;
while (bucket_split > 0 && bucket_split % 2 == 0) {
--split_level;
bucket_split = bucket_split >> 1;
}
binary_splits |= (1LL << ((1 << split_level) - 1 + bucket_split / 2));
}
return binary_splits;
}
Status ValidateInputCombiner(const std::string& combiner) {
if (combiner != "sum" && combiner != "mean" && combiner != "sqrtn") {
return absl::InvalidArgumentError(
"Invalid combiner: only \"sum\", \"mean\", and "
"\"sqrtn\" are supported.");
}
return absl::OkStatus();
}
std::function<float(float)> GetCombinerScaleContributionFunction(
absl::string_view combiner) {
if (combiner == "sum") {
return [](float x) -> float { return 1.f; };
} else if (combiner == "mean") {
return [](float x) -> float { return x; };
} else {
return [](float x) -> float { return x * x; };
}
}
std::function<float(float)> GetCombinerScaleTransformFunction(
absl::string_view combiner) {
if (combiner == "sum") {
return [](float x) -> float { return 1; };
} else if (combiner == "mean") {
return [](float x) -> float { return x == 0.0f ? 0.0f : 1.0 / x; };
} else {
return
[](float x) -> float { return x == 0.0f ? 0.0f : 1.0 / std::sqrt(x); };
}
}
Status GetMaxIdsAndUniquesExternal(const std::string& program_key,
const std::string& table_name,
int64_t num_samples_per_sparse_core,
int64_t feature_width,
int64_t* max_ids_per_partition,
int64_t* max_unique_ids_per_partition) {
SparseCore_GetMaxIdsAndUniques_Params params;
params.program_key = program_key.c_str();
params.table_name = table_name.c_str();
params.num_samples_per_sparse_core = num_samples_per_sparse_core;
params.feature_width = feature_width;
StatusHelper status;
params.status = status.c_status;
stream_executor::tpu::OpsApiFn()->SparseCore_GetMaxIdsAndUniquesFn(¶ms);
*max_ids_per_partition = params.max_ids_per_partition;
*max_unique_ids_per_partition = params.max_unique_ids_per_partition;
return status.status();
}
std::vector<std::vector<std::string>> GetTableStacks(
const std::vector<int64_t>& table_height,
const std::vector<int64_t>& table_width,
const std::vector<int64_t>& table_num_samples,
const std::vector<int64_t>& table_group,
const std::vector<std::string>& table_names, int64_t num_tpu_chips) {
if (GetDisableTableStacking()) {
std::vector<std::vector<std::string>> stacks(table_names.size());
for (int i = 0; i < table_names.size(); ++i) stacks[i] = {table_names[i]};
return stacks;
}
std::vector<std::tuple<int64_t, int64_t, int64_t, int64_t, std::string>>
table_data(table_height.size());
for (int i = 0; i < table_height.size(); ++i)
table_data[i] =
std::make_tuple(table_height[i], table_width[i], table_num_samples[i],
table_group[i], table_names[i]);
std::sort(table_data.begin(), table_data.end(), [](auto& lh, auto& rh) {
return std::get<4>(lh) < std::get<4>(rh);
});
absl::flat_hash_map<int64_t, std::vector<std::vector<std::string>>>
stacks_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_height_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_width_by_group;
absl::flat_hash_map<int64_t, std::vector<int64_t>> stacks_samples_by_group;
const int64_t mem_limit = GetXlaSparseCoreStackingMemLimit();
const int64_t table_shard_limit = GetXlaSparseCoreStackingTableShardLimit();
for (const auto& table : table_data) {
int64_t height;
int64_t width;
int64_t num_samples;
int64_t group;
std::string name;
std::tie(height, width, num_samples, group, name) = table;
num_samples /= 4;
int64_t stack_id = 0;
for (; stack_id < stacks_by_group[group].size(); ++stack_id)
if (((mem_limit == 0) ||
(sizeof(float) * width *
(num_samples + stacks_samples_by_group[group][stack_id]) <
mem_limit)) &&
((table_shard_limit == 0) ||
(sizeof(float) * (height + stacks_height_by_group[group][stack_id]) *
width / num_tpu_chips <
table_shard_limit)))
break;
if (stack_id == stacks_by_group[group].size()) {
stacks_by_group[group].resize(stacks_by_group[group].size() + 1);
stacks_height_by_group[group].push_back(0);
stacks_width_by_group[group].push_back(width);
stacks_samples_by_group[group].push_back(0);
}
stacks_by_group[group][stack_id].push_back(name);
stacks_height_by_group[group][stack_id] += height;
stacks_samples_by_group[group][stack_id] += num_samples;
}
std::vector<std::vector<std::string>> table_stacks;
for (const auto& [group, stacks] : stacks_by_group)
table_stacks.insert(table_stacks.end(), stacks.begin(), stacks.end());
return table_stacks;
}
ABSL_ATTRIBUTE_WEAK int GetMinibatchMaxDivisionLevel() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_minibatch_max_division_level;
}
ABSL_ATTRIBUTE_WEAK bool GetDisableTableStacking() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_disable_table_stacking;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingMemLimit() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_mem_limit_bytes;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingTableShardLimit() {
XlaSparseCoreFlags* sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_table_shard_limit_bytes;
}
} | #include "tensorflow/core/tpu/kernels/sparse_core_ops_utils.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
TEST(ConvertSplitsAndBackTest, Split0) {
const int max_division_level = 6;
int64 original_split = 0;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
TEST(ConvertSplitsAndBackTest, Split2) {
const int max_division_level = 6;
int64 original_split = 2;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {16};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
TEST(ConvertSplitsAndBackTest, Split3) {
const int max_division_level = 6;
int64 original_split = 3;
std::vector<int> actual_buckets =
ConvertBinarySplitsToBucketSplits(original_split, max_division_level);
std::vector<int> expected_buckets = {16, 32};
int64 re_split =
ConvertBucketSplitsToBinarySplits(expected_buckets, max_division_level);
ASSERT_EQ(re_split, original_split);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b8f066c-2a97-4e81-9265-a2cf2cc649cd | cpp | tensorflow/tensorflow | sparse_core_layout | tensorflow/core/tpu/kernels/sparse_core_layout.cc | tensorflow/core/tpu/kernels/sparse_core_layout_test.cc | #include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
ABSL_ATTRIBUTE_WEAK bool GetDisableTableStacking(bool disable_table_stacking) {
bool should_disable_stacking = false;
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
should_disable_stacking =
sparse_core_flags->tf_xla_sparse_core_disable_table_stacking;
return should_disable_stacking || disable_table_stacking;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingMemLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_mem_limit_bytes;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingTableShardLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_table_shard_limit_bytes;
}
namespace tpu {
static int64_t NextLargestMultiple(int64_t n, int64_t factor) {
int64_t extra = n % factor;
if (extra == 0) return n;
return n + factor - extra;
}
SparseCoreLayoutStacker::SparseCoreLayoutStacker(int num_partitions,
bool disable_table_stacking,
int sparse_cores_per_partition)
: num_partitions_(num_partitions),
sparse_cores_per_partition_(sparse_cores_per_partition),
num_sparse_cores_(num_partitions_ * sparse_cores_per_partition_),
stacking_enabled_(!GetDisableTableStacking(disable_table_stacking)),
activation_mem_bytes_limit_(GetXlaSparseCoreStackingMemLimit()),
variable_shard_bytes_limit_(GetXlaSparseCoreStackingTableShardLimit()) {}
absl::Status SparseCoreLayoutStacker::AddTable(absl::string_view table_name,
int64_t table_height,
int64_t table_width,
absl::string_view group,
int64_t output_samples) {
if (stacks_by_group_.empty()) {
VLOG(1) << "Stacking parameters: stacking_enabled_ = " << stacking_enabled_
<< ", activation_mem_bytes_limit_ = " << activation_mem_bytes_limit_
<< ", variable_shard_bytes_limit_ = " << variable_shard_bytes_limit_
<< ", row_limit_ = " << row_limit_
<< ", table_limit_ = " << table_limit_;
}
VLOG(2) << "Table " << table_name << ":";
int64_t samples_per_sparse_core =
output_samples / sparse_cores_per_partition_;
int64_t padded_width = NextLargestMultiple(table_width, 8);
int64_t padded_height =
NextLargestMultiple(table_height, num_sparse_cores_ * 8);
VLOG(2) << " Original size: " << table_height << "x" << table_width
<< " padded size: " << padded_height << "x" << padded_width;
int64_t activation_mem_bytes =
sizeof(float) * padded_width * samples_per_sparse_core;
int64_t variable_shard_bytes =
sizeof(float) * padded_width * padded_height / num_partitions_;
VLOG(2) << " activation mem = " << activation_mem_bytes
<< ", variable shard bytes = " << variable_shard_bytes;
std::vector<TableStack> &candidate_stacks =
stacks_by_group_[std::make_pair(padded_width, std::string(group))];
TableStack *stack = nullptr;
if (stacking_enabled_) {
for (TableStack &ts : candidate_stacks) {
if (ts.incomplete_tables.size() >= table_limit_) continue;
if (activation_mem_bytes_limit_ != 0 &&
ts.total_activation_mem_bytes + activation_mem_bytes >=
activation_mem_bytes_limit_) {
continue;
}
if (variable_shard_bytes_limit_ != 0 &&
ts.total_variable_shard_bytes + variable_shard_bytes >=
variable_shard_bytes_limit_) {
continue;
}
if (row_limit_ != 0 &&
ts.unsharded_height + padded_height >= row_limit_) {
continue;
}
stack = &ts;
break;
}
}
if (stack == nullptr) {
candidate_stacks.emplace_back();
stack = &candidate_stacks.back();
stack->padded_width = padded_width;
stack->temporary_name = absl::Substitute("w$0_i$1_$2", padded_width,
candidate_stacks.size(), group);
}
stack->incomplete_tables.emplace_back();
SparseCoreTableLayout &layout = stack->incomplete_tables.back();
layout.set_table_name(std::string(table_name));
layout.set_num_sparse_cores(num_sparse_cores_);
layout.set_num_partitions(num_partitions_);
layout.add_unsharded_shape(table_height);
layout.add_unsharded_shape(table_width);
layout.add_unsharded_padded_shape(padded_height);
layout.add_unsharded_padded_shape(padded_width);
layout.set_sparse_core_shard_row_offset(stack->unsharded_height /
num_sparse_cores_);
layout.set_sparse_core_shard_rotation(((stack->incomplete_tables.size() - 1) *
num_sparse_cores_ / num_partitions_) %
num_sparse_cores_);
stack->unsharded_height += padded_height;
stack->total_variable_shard_bytes += variable_shard_bytes;
stack->total_activation_mem_bytes += activation_mem_bytes;
return absl::OkStatus();
}
absl::StatusOr<SparseCoreTableLayouts> SparseCoreLayoutStacker::GetLayouts() {
SparseCoreTableLayouts layouts;
for (const auto &[key, stacks] : stacks_by_group_) {
VLOG(1) << "Stack group: padded width " << key.first
<< ", name = " << key.second;
for (const TableStack &stack : stacks) {
VLOG(1) << " Stack " << stack.temporary_name
<< ": unsharded_height = " << stack.unsharded_height
<< ", total_activation_mem_bytes = "
<< stack.total_activation_mem_bytes
<< ", total_variable_shard_bytes = "
<< stack.total_variable_shard_bytes;
std::string stacked_table_name;
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
if (!stacked_table_name.empty()) stacked_table_name += "_";
absl::StrAppend(&stacked_table_name, incomplete_layout.table_name());
}
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
SparseCoreTableLayout *out_layout = layouts.add_tables();
*out_layout = incomplete_layout;
out_layout->set_stacked_table_name(stacked_table_name);
VLOG(1) << " Contains " << out_layout->table_name();
out_layout->set_total_rows_per_sparse_core_shard(
stack.unsharded_height / num_sparse_cores_);
}
}
}
return layouts;
}
}
} | #include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
namespace tensorflow {
namespace tpu {
namespace {
using ::testing::EqualsProto;
using ::testing::proto::Partially;
using ::testing::status::IsOkAndHolds;
TEST(SparseCoreLayoutStacker, StacksTwoTablesAndPads) {
SparseCoreLayoutStacker stacker(2);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24 # = (128 + 64) / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 16 # = 128/8
sparse_core_shard_rotation: 4
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsDisableStacking) {
SparseCoreLayoutStacker stacker(2);
stacker.SetStackingEnabled(false);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 16 # = 128 / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 8 # = 64/8
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsActivationMemLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(16384 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsVariableShardLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetVariableShardBytesLimit(4096 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsRowLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
ASSERT_OK(stacker.AddTable("table1", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 1 << 29, 8, "stack1", 1024));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table3'
stacked_table_name: 'table1_table2_table3'
}
tables { table_name: 'table4' stacked_table_name: 'table4' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsTableLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
stacker.SetStackingTableLimit(2);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
)pb"))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_layout.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_layout_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b171af87-ba8d-4846-b355-b1c99330705c | cpp | tensorflow/tensorflow | encapsulate_tpu_computations_pass | tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.cc | tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass_test.cc | #include "tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/tpu/tpu_compile_interface.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
const char* const kTPUReplicatedInput = "TPUReplicatedInput";
const char* const kTPUReplicatedOutput = "TPUReplicatedOutput";
const char* const kPivotForClusterAttr = "_pivot_for_cluster";
const char* const kTPUPartitionedInput = "TPUPartitionedInput";
const char* const kTPUPartitionedInputV2 = "TPUPartitionedInputV2";
Status GetIndexAttr(const Node& n, int num_args, int* index) {
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", index));
if (*index < 0 || *index >= num_args) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid ", n.type_string(), " number ", *index));
}
return absl::OkStatus();
}
Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
auto is_replicated_input = [&](const Node& n, bool* is_packed = nullptr) {
CHECK_EQ("_Arg", n.type_string());
int index;
TF_CHECK_OK(GetIndexAttr(n, arg_source_tensors.size(), &index));
bool ret =
arg_source_tensors.at(index).node->type_string() == kTPUReplicatedInput;
if (is_packed) {
if (!ret || !GetNodeAttr(arg_source_tensors.at(index).node->attrs(),
"is_packed", is_packed)
.ok()) {
*is_packed = false;
}
}
return ret;
};
auto is_guaranteed_constant = [&](const Node& n) {
bool guaranteed_constant = false;
if (!GetNodeAttr(n.attrs(), "_is_guaranteed_constant", &guaranteed_constant)
.ok()) {
return false;
}
return guaranteed_constant && !is_replicated_input(n);
};
Graph* graph = graph_ptr->get();
Node* metadata_node = nullptr;
const int num_args = input_permutation->size();
const int num_retvals = output_permutation->size();
std::vector<Node*> args;
std::vector<Node*> retvals;
args.reserve(num_args);
retvals.reserve(num_retvals);
for (Node* n : graph->nodes()) {
if (n->type_string() == "_Arg") {
args.push_back(n);
} else if (n->type_string() == "_Retval") {
retvals.push_back(n);
} else if (n->type_string() == "TPUReplicateMetadata") {
metadata_node = n;
} else if (!absl::StrContains(n->requested_device(),
DEVICE_TPU_REPLICATED_CORE)) {
n->set_assigned_device_name(
absl::StrCat("/device:", DEVICE_TPU_REPLICATED_CORE));
}
}
if (metadata_node == nullptr) {
return absl::InvalidArgumentError("Missing TPUReplicateMetadata node");
}
for (const auto& attr : metadata_node->attrs()) {
if (attr.first == "computation_shape") {
std::vector<int> shape;
TF_RETURN_IF_ERROR(
GetNodeAttr(metadata_node->attrs(), "computation_shape", &shape));
if (!shape.empty()) {
int64_t num_cores_per_replica = 1LL;
for (int dim : shape) {
num_cores_per_replica *= dim;
}
call_def->mutable_attr()->erase("num_cores_per_replica");
AddNodeAttr("num_cores_per_replica", num_cores_per_replica, call_def);
}
} else {
call_def->mutable_attr()->insert(attr);
}
}
MergeDebugInfo(NodeDebugInfo(metadata_node->def()), call_def);
graph->RemoveNode(metadata_node);
if (std::find(args.begin(), args.end(), nullptr) != args.end()) {
return absl::InvalidArgumentError("Missing or non-consecutive arguments");
}
std::sort(args.begin(), args.end(), [&](Node* a, Node* b) {
bool a_is_guaranteed_constant = is_guaranteed_constant(*a);
bool b_is_guaranteed_constant = is_guaranteed_constant(*b);
bool a_is_packed;
bool b_is_packed;
bool a_not_replicated = !is_replicated_input(*a, &a_is_packed);
bool b_not_replicated = !is_replicated_input(*b, &b_is_packed);
bool a_is_resource = (a->output_type(0) == DT_RESOURCE);
bool b_is_resource = (b->output_type(0) == DT_RESOURCE);
absl::string_view a_name(a->name());
absl::string_view b_name(b->name());
return std::tie(a_is_guaranteed_constant, a_not_replicated, a_is_packed,
a_is_resource, a_name) <
std::tie(b_is_guaranteed_constant, b_not_replicated, b_is_packed,
b_is_resource, b_name);
});
std::sort(retvals.begin(), retvals.end(),
[](Node* a, Node* b) { return a->name() < b->name(); });
int variable_start_index = num_args;
int guaranteed_const_start_index = num_args;
for (int i = 0; i < num_args; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*args[i], num_args, &index));
if (args[i]->output_type(0) == DT_RESOURCE &&
!is_replicated_input(*args[i]) && variable_start_index == num_args) {
variable_start_index = i;
} else if (is_guaranteed_constant(*args[i]) &&
guaranteed_const_start_index == num_args) {
guaranteed_const_start_index = i;
}
(*input_permutation)[index] = i;
args[i]->AddAttr("index", i);
}
VLOG(4) << "variable_start_index: " << variable_start_index
<< " guaranteed_const_start_index: " << guaranteed_const_start_index;
for (int i = 0; i < num_retvals; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*retvals[i], num_retvals, &index));
(*output_permutation)[index] = i;
retvals[i]->AddAttr("index", i);
}
AddNodeAttr(kTPUReplicateAttr, call_def->name(), call_def);
AddNodeAttr("_variable_start_index", variable_start_index, call_def);
AddNodeAttr("_guaranteed_const_start_index", guaranteed_const_start_index,
call_def);
TF_ASSIGN_OR_RETURN(std::string serialized,
SerializeGraphDeterministic(*graph));
uint64_t fingerprint =
TpuCompileInterface::Get()->FingerprintString(serialized);
LOG(INFO) << "Subgraph fingerprint:" << fingerprint;
call_def->set_op(absl::StrCat(call_def->op(), "_", fingerprint));
return absl::OkStatus();
}
DataType EdgeType(const Edge* edge) {
return edge->dst()->input_type(edge->dst_input());
}
void AddControlInputs(const Node& node, gtl::FlatSet<Node*>* deps) {
for (const Edge* edge : node.in_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->src());
}
}
}
void AddControlOutputs(const Node& node, gtl::FlatSet<Node*>* deps) {
for (const Edge* edge : node.out_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->dst());
}
}
}
Status RemoveIdentityNodesForArgRetval(Graph* g) {
std::vector<Node*> identity_nodes;
for (Node* n : g->nodes()) {
if (n->type_string() == "Identity" &&
(HasNodeAttr(n->def(), "_tpu_input_identity") ||
HasNodeAttr(n->def(), "_tpu_output_identity"))) {
identity_nodes.push_back(n);
}
}
for (Node* n : identity_nodes) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(n->input_edge(0, &input_edge));
std::vector<const Edge*> output_edges;
for (const Edge* e : n->out_edges()) {
output_edges.push_back(e);
}
for (const Edge* e : output_edges) {
if (e->IsControlEdge()) {
Node* dst = e->dst();
g->RemoveEdge(e);
g->AddControlEdge(input_edge->src(), dst);
} else {
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
}
g->RemoveNode(n);
}
return absl::OkStatus();
}
Status UpdateMirroredVariableIndices(int additional_per_replica_inputs,
Node* xla_node) {
std::vector<int> mirrored_variable_indices;
if (xla_node->attrs().Find(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR) !=
nullptr) {
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(),
TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
&mirrored_variable_indices));
}
if (!mirrored_variable_indices.empty()) {
for (int i = 0; i < mirrored_variable_indices.size(); ++i)
mirrored_variable_indices[i] += additional_per_replica_inputs;
xla_node->ClearAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR);
xla_node->AddAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
mirrored_variable_indices);
}
return absl::OkStatus();
}
Status MoveHeadOutsideCompilationToHost(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, const std::string& cluster_name, Graph* g,
Graph* xla_graph, Node* xla_node, Node* pivot_node) {
std::vector<Node*> oc_nodes_at_head;
const std::string kOnlyArgOrOcInputAttrName = "_xla_only_arg_or_oc_input";
ReverseDFS(
*xla_graph, nullptr,
[&](Node* n) {
bool has_non_arg_or_oc_input = false;
for (const Edge* e : n->in_edges()) {
if (e->src() == xla_graph->source_node()) {
continue;
}
if (!e->src()->IsArg() &&
(!HasNodeAttr(e->src()->def(), outside_compilation_attr_name) ||
!HasNodeAttr(e->src()->def(), kOnlyArgOrOcInputAttrName))) {
has_non_arg_or_oc_input = true;
break;
}
}
if (HasNodeAttr(n->def(), outside_compilation_attr_name) &&
!has_non_arg_or_oc_input &&
!HasNodeAttr(n->def(), kXlaIsPlaceholderForArg)) {
n->AddAttr(kOnlyArgOrOcInputAttrName, true);
oc_nodes_at_head.push_back(n);
}
},
NodeComparatorName());
std::vector<Node*> const_nodes_to_remove;
for (Node* n : oc_nodes_at_head) {
if (n->type_string() != "Const") {
continue;
}
std::vector<const Edge*> edges_to_replace;
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge() &&
HasNodeAttr(e->dst()->def(), outside_compilation_attr_name) &&
!HasNodeAttr(e->dst()->def(), kOnlyArgOrOcInputAttrName)) {
edges_to_replace.push_back(e);
}
}
if (edges_to_replace.empty()) {
continue;
}
Node* const_copy = xla_graph->CopyNode(n);
for (const Edge* e : edges_to_replace) {
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(const_copy, 0, dst, dst_input);
}
xla_graph->AddControlEdge(xla_graph->source_node(), const_copy);
bool has_output_edge = false;
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge()) {
has_output_edge = true;
break;
}
}
if (!has_output_edge) {
const_nodes_to_remove.push_back(n);
}
}
for (Node* n : const_nodes_to_remove) {
xla_graph->RemoveNode(n);
oc_nodes_at_head.erase(
std::remove(oc_nodes_at_head.begin(), oc_nodes_at_head.end(), n),
oc_nodes_at_head.end());
}
if (VLOG_IS_ON(5)) {
for (Node* n : oc_nodes_at_head) {
VLOG(5) << "oc_nodes_at_head: " << n->DebugString();
}
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int old_num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
VLOG(5) << "old_num_per_replica_inputs: " << old_num_per_replica_inputs;
absl::flat_hash_map<Node*, std::vector<Node*>> node_images;
for (Node* n : oc_nodes_at_head) {
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
NodeDef copy_def = n->def();
copy_def.set_name(absl::StrCat(n->name(), "_head_oc/R", replica_id));
copy_def.clear_device();
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
copy_node->AddAttr(kXlaReplicaIdAttrName, replica_id);
copy_node->AddAttr(kTPUReplicateAttr, cluster_name);
for (const Edge* e : n->in_edges()) {
if (e->src() == xla_graph->source_node()) {
continue;
}
if (e->src()->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->attrs(), "index", &index));
const int new_index =
(index < old_num_per_replica_inputs)
? (old_num_per_replica_inputs * replica_id + index)
: (old_num_per_replica_inputs * num_replicas +
(index - old_num_per_replica_inputs));
const Edge* original_edge = input_edges.at(new_index);
g->AddEdge(original_edge->src(), original_edge->src_output(),
copy_node, e->dst_input());
} else {
g->AddEdge(node_images[e->src()][replica_id], e->src_output(),
copy_node, e->dst_input());
}
}
g->AddControlEdge(copy_node, xla_node);
if (pivot_node) {
g->AddControlEdge(pivot_node, copy_node);
}
node_images[n].push_back(copy_node);
}
}
std::vector<const Edge*> oc_output_edges;
std::vector<DataType> new_arg_types;
for (Node* n : oc_nodes_at_head) {
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge() &&
node_images.find(e->dst()) == node_images.end()) {
VLOG(5) << "oc_output_edges: " << e->DebugString();
oc_output_edges.push_back(e);
new_arg_types.push_back(e->src()->output_type(e->src_output()));
}
}
}
int new_num_per_replica_inputs =
old_num_per_replica_inputs + oc_output_edges.size();
VLOG(5) << "new_num_per_replica_inputs: " << new_num_per_replica_inputs;
int num_variables;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "NumVariables", &num_variables));
std::vector<DataType> broadcast_input_types, guaranteed_constant_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tbroadcast_inputs",
&broadcast_input_types));
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tguaranteed_constants",
&guaranteed_constant_types));
int num_other_inputs = num_distributed_vars + num_variables +
broadcast_input_types.size() +
guaranteed_constant_types.size();
VLOG(5) << "num_other_inputs: " << num_other_inputs;
std::vector<DataType> new_input_types;
new_input_types.reserve(num_replicas * new_num_per_replica_inputs +
num_distributed_vars);
for (int replica_id = 0; replica_id < num_replicas; ++replica_id) {
for (int i = 0; i < old_num_per_replica_inputs; ++i) {
new_input_types.push_back(input_types[i]);
}
for (int i = old_num_per_replica_inputs; i < new_num_per_replica_inputs;
++i) {
new_input_types.push_back(new_arg_types[i - old_num_per_replica_inputs]);
}
}
const int num_new_per_replica_input_types = new_input_types.size();
for (int i = input_types.size() - num_distributed_vars;
i < input_types.size(); i++) {
new_input_types.push_back(input_types[i]);
}
xla_node->ClearAttr("Tinputs");
xla_node->AddAttr("Tinputs", new_input_types);
TF_RETURN_IF_ERROR(UpdateMirroredVariableIndices(
oc_output_edges.size(), xla_node));
int new_variable_start_index =
num_new_per_replica_input_types / num_replicas + num_distributed_vars +
broadcast_input_types.size();
if (xla_node->attrs().Find("_variable_start_index") != nullptr) {
xla_node->ClearAttr("_variable_start_index");
xla_node->AddAttr("_variable_start_index", new_variable_start_index);
}
int new_guaranteed_const_start_index =
new_variable_start_index + num_variables;
if (xla_node->attrs().Find("_guaranteed_const_start_index") != nullptr) {
xla_node->ClearAttr("_guaranteed_const_start_index");
xla_node->AddAttr("_guaranteed_const_start_index",
new_guaranteed_const_start_index);
}
std::vector<const Edge*> new_input_edges(
num_replicas * new_num_per_replica_inputs + num_other_inputs);
int end_input_index =
num_replicas * new_num_per_replica_inputs + num_other_inputs - 1;
int start_input_index = end_input_index + 1 - num_other_inputs;
for (int input_index = end_input_index; input_index >= start_input_index;
input_index--) {
const Edge* e =
input_edges.at(input_index - num_replicas * new_arg_types.size());
Node* src = e->src();
int src_output = e->src_output();
g->RemoveEdge(e);
const Edge* new_input_edge =
g->AddEdge(src, src_output, xla_node, input_index);
new_input_edges[input_index] = new_input_edge;
}
std::vector<std::pair<Node*, int>> per_replica_inputs;
std::vector<const Edge*> old_per_replica_edges;
for (int i = 0; i < old_num_per_replica_inputs * num_replicas; i++) {
const Edge* e = input_edges.at(i);
per_replica_inputs.push_back(std::make_pair(e->src(), e->src_output()));
old_per_replica_edges.push_back(e);
}
for (const Edge* e : old_per_replica_edges) {
g->RemoveEdge(e);
}
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int input_index = 0; input_index < old_num_per_replica_inputs;
input_index++) {
Node* src = per_replica_inputs[replica_id * old_num_per_replica_inputs +
input_index]
.first;
int src_output =
per_replica_inputs[replica_id * old_num_per_replica_inputs +
input_index]
.second;
const Edge* new_input_edge =
g->AddEdge(src, src_output, xla_node,
replica_id * new_num_per_replica_inputs + input_index);
new_input_edges[input_index] = new_input_edge;
}
for (int input_index = old_num_per_replica_inputs;
input_index < new_num_per_replica_inputs; input_index++) {
Node* original_src =
oc_output_edges[input_index - old_num_per_replica_inputs]->src();
int original_src_output =
oc_output_edges[input_index - old_num_per_replica_inputs]
->src_output();
Node* src = node_images[original_src][replica_id];
const Edge* new_input_edge =
g->AddEdge(src, original_src_output, xla_node,
replica_id * new_num_per_replica_inputs + input_index);
new_input_edges[input_index] = new_input_edge;
}
}
for (Node* n : xla_graph->nodes()) {
if (n->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
if (index >= old_num_per_replica_inputs) {
index += new_arg_types.size();
n->ClearAttr("index");
n->AddAttr("index", index);
}
}
}
for (int i = old_num_per_replica_inputs; i < new_num_per_replica_inputs;
i++) {
NodeDefBuilder arg_builder(absl::StrCat("arg_", i),
FunctionLibraryDefinition::kArgOp);
arg_builder.Attr("T", new_arg_types[i - old_num_per_replica_inputs]);
arg_builder.Attr("index", i);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_ASSIGN_OR_RETURN(Node * arg_node, xla_graph->AddNode(arg_def));
const Edge* original_edge = oc_output_edges[i - old_num_per_replica_inputs];
Node* dst = original_edge->dst();
int dst_input = original_edge->dst_input();
xla_graph->RemoveEdge(original_edge);
xla_graph->AddEdge(arg_node, 0, dst, dst_input);
}
for (Node* n : oc_nodes_at_head) {
bool is_lifted_arg;
std::string outside_compilation_attr;
if (!TryGetNodeAttr(n->def(), kXlaIsLiftedArgAttrName, &is_lifted_arg) ||
!TryGetNodeAttr(n->def(), kOutsideCompilationAttr,
&outside_compilation_attr)) {
continue;
}
TF_RET_CHECK(n->IsIdentity());
NodeDefBuilder ph_builder(absl::StrCat("placeholder_", n->name()),
"Placeholder");
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
ph_builder.Attr("dtype", dtype);
ph_builder.Attr(kXlaIsLiftedArgAttrName, true);
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_attr);
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
Status s;
xla_graph->AddNode(ph_def, &s);
TF_RETURN_IF_ERROR(s);
Node* input_node;
TF_RETURN_IF_ERROR(n->input_node(0, &input_node));
TF_RET_CHECK(input_node->type_string() == "_Arg");
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(input_node->def(), "index", &index));
TF_RET_CHECK(index >= new_num_per_replica_inputs + num_distributed_vars);
const Edge* input_edge =
new_input_edges.at(num_replicas * new_num_per_replica_inputs + index -
new_num_per_replica_inputs);
NodeDefBuilder id_builder(absl::StrCat("lifted_arg_input_", index),
"IdentityN");
DataType input_dtype =
input_edge->src()->output_type(input_edge->src_output());
id_builder.Attr("T", std::vector<DataType>(num_replicas, input_dtype));
std::vector<NodeDefBuilder::NodeOut> inputs(
num_replicas,
NodeDefBuilder::NodeOut{input_edge->src()->name(),
input_edge->src_output(), input_dtype});
id_builder.Attr(kXlaOutsideCompilationInputsAttrName,
outside_compilation_attr);
id_builder.Input(inputs);
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
for (int i = 0; i < num_replicas; i++) {
g->AddEdge(input_edge->src(), input_edge->src_output(), id_node, i);
}
}
for (Node* n : oc_nodes_at_head) {
xla_graph->RemoveNode(n);
}
VLOG(4) << "MoveHeadOutsideCompilationToHost host graph: "
<< DumpGraphToFile(absl::StrCat("move_head_oc_host_", xla_func_name),
*g);
VLOG(4) << "MoveHeadOutsideCompilationToHost XLA graph: "
<< DumpGraphToFile(absl::StrCat("move_head_oc_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveUnusedXlaInput(const std::string& xla_func_name, Graph* g,
Graph* xla_graph, Node* xla_node) {
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tinputs", &input_types));
std::vector<int> mirrored_variable_indices;
if (xla_node->attrs().Find(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR) !=
nullptr) {
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(),
TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
&mirrored_variable_indices));
}
std::vector<DataType> broadcast_input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tbroadcast_inputs",
&broadcast_input_types));
std::vector<DataType> guaranteed_constant_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tguaranteed_constants",
&guaranteed_constant_types));
int num_variables;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "NumVariables", &num_variables));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
std::set<int> arg_indices_to_remove;
std::vector<Node*> arg_nodes_to_update, nodes_to_remove;
int num_args = 0, num_removed_per_replica_inputs = 0,
num_removed_distributed_vars = 0;
for (Node* n : xla_graph->nodes()) {
if (!n->IsArg()) {
continue;
}
bool has_output = false;
for (const Edge* e : n->out_edges()) {
if (e->dst() != xla_graph->sink_node()) {
has_output = true;
break;
}
}
num_args++;
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
if (has_output) {
arg_nodes_to_update.push_back(n);
continue;
}
arg_indices_to_remove.insert(index);
if (index < num_per_replica_inputs) {
num_removed_per_replica_inputs++;
} else if (index < num_per_replica_inputs + num_distributed_vars) {
num_removed_distributed_vars++;
}
nodes_to_remove.push_back(n);
}
for (Node* n : nodes_to_remove) {
xla_graph->RemoveNode(n);
}
std::map<int, int> arg_index_mapping;
int new_arg_index = 0;
for (int i = 0; i < num_args; i++) {
if (arg_indices_to_remove.find(i) != arg_indices_to_remove.end()) {
continue;
} else {
arg_index_mapping[i] = new_arg_index;
new_arg_index++;
}
}
for (Node* n : arg_nodes_to_update) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
n->ClearAttr("index");
n->AddAttr("index", arg_index_mapping[index]);
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
const int num_new_per_replica_inputs =
num_per_replica_inputs - num_removed_per_replica_inputs;
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_per_replica_inputs; j++) {
auto iter = arg_index_mapping.find(j);
if (iter != arg_index_mapping.end()) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + j);
Node* src = e->src();
int src_output = e->src_output();
int dst_input = i * num_new_per_replica_inputs + iter->second;
g->RemoveEdge(e);
g->AddEdge(src, src_output, xla_node, dst_input);
} else {
const Edge* e = input_edges.at(i * num_per_replica_inputs + j);
g->RemoveEdge(e);
}
}
}
for (int i = num_replicas * num_per_replica_inputs;
i < xla_node->num_inputs(); i++) {
int arg_index =
num_per_replica_inputs + i - num_replicas * num_per_replica_inputs;
auto iter = arg_index_mapping.find(arg_index);
if (iter != arg_index_mapping.end()) {
const Edge* e = input_edges.at(i);
Node* src = e->src();
int src_output = e->src_output();
int dst_input = num_replicas * num_new_per_replica_inputs + iter->second -
num_new_per_replica_inputs;
g->RemoveEdge(e);
g->AddEdge(src, src_output, xla_node, dst_input);
} else {
const Edge* e = input_edges.at(i);
g->RemoveEdge(e);
}
}
std::vector<DataType> new_input_types;
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_per_replica_inputs; j++) {
auto iter = arg_index_mapping.find(j);
if (iter != arg_index_mapping.end()) {
new_input_types.push_back(input_types[iter->first]);
}
}
}
for (int i = 0; i < num_distributed_vars; ++i) {
auto iter = arg_index_mapping.find(i + num_per_replica_inputs);
if (iter != arg_index_mapping.end()) {
new_input_types.push_back(
input_types[iter->first - num_per_replica_inputs +
num_per_replica_inputs * num_replicas]);
}
}
xla_node->ClearAttr("Tinputs");
xla_node->AddAttr("Tinputs", new_input_types);
const int num_new_distributed_vars =
num_distributed_vars - num_removed_distributed_vars;
xla_node->ClearAttr("num_distributed_variables");
xla_node->AddAttr("num_distributed_variables", num_new_distributed_vars);
if (!mirrored_variable_indices.empty()) {
std::vector<int> new_mirrored_variable_indices;
absl::flat_hash_set<int> old_mirrored_variable_indices_set;
for (int index : mirrored_variable_indices) {
old_mirrored_variable_indices_set.insert(index);
}
for (int i = 0; i < num_per_replica_inputs + num_distributed_vars; i++) {
auto iter = arg_index_mapping.find(i);
if (iter != arg_index_mapping.end() &&
old_mirrored_variable_indices_set.contains(iter->first)) {
new_mirrored_variable_indices.push_back(iter->second);
}
}
xla_node->ClearAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR);
xla_node->AddAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
new_mirrored_variable_indices);
}
int num_replicated_inputs = num_per_replica_inputs + num_distributed_vars;
std::vector<DataType> new_broadcast_input_types;
for (int i = 0; i < broadcast_input_types.size(); i++) {
int arg_index = num_replicated_inputs + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_broadcast_input_types.push_back(broadcast_input_types[i]);
}
}
xla_node->ClearAttr("Tbroadcast_inputs");
xla_node->AddAttr("Tbroadcast_inputs", new_broadcast_input_types);
int new_num_variables = 0;
for (int i = 0; i < num_variables; i++) {
int arg_index = num_replicated_inputs + broadcast_input_types.size() + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_num_variables++;
}
}
xla_node->ClearAttr("NumVariables");
xla_node->AddAttr("NumVariables", new_num_variables);
std::vector<DataType> new_guaranteed_constant_types;
for (int i = 0; i < guaranteed_constant_types.size(); i++) {
int arg_index = num_replicated_inputs + broadcast_input_types.size() +
num_variables + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_guaranteed_constant_types.push_back(guaranteed_constant_types[i]);
}
}
xla_node->ClearAttr("Tguaranteed_constants");
xla_node->AddAttr("Tguaranteed_constants", new_guaranteed_constant_types);
int new_variable_start_index = num_new_per_replica_inputs +
num_new_distributed_vars +
new_broadcast_input_types.size();
if (xla_node->attrs().Find("_variable_start_index") != nullptr) {
xla_node->ClearAttr("_variable_start_index");
xla_node->AddAttr("_variable_start_index", new_variable_start_index);
}
int new_guaranteed_const_start_index =
new_variable_start_index + new_num_variables;
if (xla_node->attrs().Find("_guaranteed_const_start_index") != nullptr) {
xla_node->ClearAttr("_guaranteed_const_start_index");
xla_node->AddAttr("_guaranteed_const_start_index",
new_guaranteed_const_start_index);
}
VLOG(4) << "RemoveUnusedXlaInput host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_input_host_", xla_func_name), *g);
VLOG(4) << "RemoveUnusedXlaInput XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_input_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status MoveTailOutsideCompilationToHost(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, const std::string& cluster_name, Graph* g,
Graph* xla_graph, Node* xla_node, Node* pivot_node) {
std::vector<Node*> oc_nodes_at_tail;
const std::string kOnlyRetOrOcOutputAttrName = "_xla_only_ret_or_oc_output";
DFS(
*xla_graph, nullptr,
[&](Node* n) {
bool has_non_ret_or_oc_output = false;
for (const Edge* e : n->out_edges()) {
if (e->dst() == xla_graph->sink_node()) {
continue;
}
if (!e->dst()->IsRetval() &&
(!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name) ||
!HasNodeAttr(e->dst()->def(), kOnlyRetOrOcOutputAttrName))) {
has_non_ret_or_oc_output = true;
break;
}
}
if (HasNodeAttr(n->def(), outside_compilation_attr_name) &&
!has_non_ret_or_oc_output) {
n->AddAttr(kOnlyRetOrOcOutputAttrName, true);
oc_nodes_at_tail.push_back(n);
}
},
NodeComparatorName());
if (VLOG_IS_ON(5)) {
for (Node* n : oc_nodes_at_tail) {
VLOG(5) << "oc_nodes_at_tail: " << n->DebugString();
}
}
std::vector<const Edge*> oc_input_edges;
std::vector<DataType> new_ret_types;
for (Node* n : oc_nodes_at_tail) {
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->src()->def(), kOnlyRetOrOcOutputAttrName)) {
VLOG(5) << "oc_input_edges: " << e->DebugString();
oc_input_edges.push_back(e);
new_ret_types.push_back(e->src()->output_type(e->src_output()));
}
}
}
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "output_types", &output_types));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int old_num_replicated_outputs = output_types.size() / num_replicas;
int new_num_replicated_outputs =
old_num_replicated_outputs + oc_input_edges.size();
VLOG(5) << "old_num_replicated_outputs: " << old_num_replicated_outputs;
VLOG(5) << "new_num_replicated_outputs: " << new_num_replicated_outputs;
std::vector<DataType> new_output_types;
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int i = 0; i < old_num_replicated_outputs; i++) {
new_output_types.push_back(output_types[i]);
}
for (int i = old_num_replicated_outputs; i < new_num_replicated_outputs;
i++) {
new_output_types.push_back(new_ret_types[i - old_num_replicated_outputs]);
}
}
xla_node->ClearAttr("output_types");
xla_node->AddAttr("output_types", new_output_types);
std::vector<std::vector<std::pair<Node*, int>>> replicated_outputs(
old_num_replicated_outputs * num_replicas);
std::vector<const Edge*> old_replicated_edges;
for (const Edge* e : xla_node->out_edges()) {
if (e->src_output() >= 0 &&
e->src_output() < old_num_replicated_outputs * num_replicas) {
replicated_outputs[e->src_output()].push_back(
std::make_pair(e->dst(), e->dst_input()));
old_replicated_edges.push_back(e);
}
}
for (const Edge* e : old_replicated_edges) {
g->RemoveEdge(e);
}
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int output_index = 0; output_index < old_num_replicated_outputs;
output_index++) {
for (const auto& node_input_pair :
replicated_outputs[replica_id * old_num_replicated_outputs +
output_index]) {
Node* dst = node_input_pair.first;
int dst_input = node_input_pair.second;
g->AddEdge(xla_node,
replica_id * new_num_replicated_outputs + output_index, dst,
dst_input);
}
}
}
absl::flat_hash_map<Node*, std::vector<Node*>> node_images;
for (Node* n : oc_nodes_at_tail) {
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
NodeDef copy_def = n->def();
copy_def.set_name(absl::StrCat(n->name(), "_tail_oc/R", replica_id));
copy_def.clear_device();
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
copy_node->AddAttr(kXlaReplicaIdAttrName, replica_id);
copy_node->AddAttr(kTPUReplicateAttr, cluster_name);
for (const Edge* e : n->out_edges()) {
if (e->dst() == xla_graph->sink_node()) {
continue;
}
if (e->dst()->IsRetval()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->attrs(), "index", &index));
for (const auto& output :
replicated_outputs[replica_id * old_num_replicated_outputs +
index]) {
const Edge* original_edge;
Status s = output.first->input_edge(output.second, &original_edge);
if (s.ok()) {
g->RemoveEdge(original_edge);
}
g->AddEdge(copy_node, e->src_output(), output.first, output.second);
}
} else {
g->AddEdge(copy_node, e->src_output(),
node_images[e->dst()][replica_id], e->dst_input());
}
}
copy_node->AddAttr("_xla_tail_outside_compilation", true);
g->AddControlEdge(xla_node, copy_node);
if (pivot_node) {
g->AddControlEdge(pivot_node, copy_node);
}
node_images[n].push_back(copy_node);
}
}
for (int i = 0; i < new_ret_types.size(); i++) {
const Edge* original_edge = oc_input_edges[i];
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
int src_output = replica_id * new_num_replicated_outputs +
old_num_replicated_outputs + i;
Node* dst = node_images[original_edge->dst()][replica_id];
g->AddEdge(xla_node, src_output, dst, original_edge->dst_input());
}
}
for (int i = old_num_replicated_outputs; i < new_num_replicated_outputs;
i++) {
NodeDefBuilder ret_builder(absl::StrCat("ret_", i),
FunctionLibraryDefinition::kRetOp);
ret_builder.Attr("T", new_ret_types[i - old_num_replicated_outputs]);
ret_builder.Attr("index", i);
const Edge* original_edge = oc_input_edges[i - old_num_replicated_outputs];
Node* src = original_edge->src();
int src_output = original_edge->src_output();
ret_builder.Input(src->name(), src_output, src->output_type(src_output));
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, xla_graph->AddNode(ret_def));
xla_graph->RemoveEdge(original_edge);
xla_graph->AddEdge(src, src_output, ret_node, 0);
}
for (Node* n : oc_nodes_at_tail) {
xla_graph->RemoveNode(n);
}
std::vector<Node*> unused_rets;
for (Node* n : xla_graph->nodes()) {
if (n->IsRetval() && n->in_edges().empty()) {
unused_rets.push_back(n);
}
}
for (Node* n : unused_rets) {
NodeDefBuilder builder(absl::StrCat("placeholder_", n->name()),
"Placeholder");
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
builder.Attr("dtype", dtype);
builder.Attr(kXlaIsPlaceholderForTailOcAttrName, true);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(Node * placeholder, xla_graph->AddNode(def));
xla_graph->AddEdge(placeholder, 0, n, 0);
}
VLOG(4) << "MoveTailOutsideCompilationToHost host graph: "
<< DumpGraphToFile(absl::StrCat("move_tail_oc_host_", xla_func_name),
*g);
VLOG(4) << "MoveTaildOutsideCompilationToHost XLA graph: "
<< DumpGraphToFile(absl::StrCat("move_tail_oc_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status ReplaceArgUsedByOutsideCompilationWithPlaceholder(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, Graph* g, Graph* xla_graph,
Node* xla_node) {
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
for (Node* n : xla_graph->op_nodes()) {
if (!n->IsArg()) {
continue;
}
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
if (dtype != DT_RESOURCE) {
continue;
}
std::vector<const Edge*> oc_out_edges;
for (const Edge* e : n->out_edges()) {
if (e->IsControlEdge() ||
!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
continue;
}
oc_out_edges.push_back(e);
}
if (oc_out_edges.empty()) {
continue;
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
std::string oc_identifier = absl::StrCat("oc_only_arg_", index);
NodeDefBuilder id_builder(absl::StrCat(oc_identifier, "_inputs"),
"IdentityN");
std::vector<DataType> dtypes(num_replicas, dtype);
id_builder.Attr("T", dtypes);
id_builder.Attr(kXlaOutsideCompilationInputsAttrName, oc_identifier);
std::vector<NodeDefBuilder::NodeOut> inputs(num_replicas);
if (index >= num_per_replica_inputs) {
const Edge* e = input_edges.at(num_replicas * num_per_replica_inputs +
(index - num_per_replica_inputs));
for (int i = 0; i < num_replicas; i++) {
inputs[i] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
} else {
for (int i = 0; i < num_replicas; i++) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + index);
inputs[i] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
}
id_builder.Input(inputs);
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
if (index >= num_per_replica_inputs) {
const Edge* e = input_edges.at(num_replicas * num_per_replica_inputs +
(index - num_per_replica_inputs));
for (int i = 0; i < num_replicas; i++) {
g->AddEdge(e->src(), e->src_output(), id_node, i);
}
} else {
for (int i = 0; i < num_replicas; i++) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + index);
g->AddEdge(e->src(), e->src_output(), id_node, i);
}
}
for (const Edge* e : oc_out_edges) {
NodeDefBuilder ph_builder(xla_graph->NewName("ph_for_arg_in_oc_"),
"Placeholder");
ph_builder.Attr("dtype", dtype);
std::string outside_compilation_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), kOutsideCompilationAttr,
&outside_compilation_attr));
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_attr);
ph_builder.Attr(kXlaOutsideCompilationInputsAttrName, oc_identifier);
ph_builder.Attr(kXlaIsPlaceholderForArg, true);
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
TF_ASSIGN_OR_RETURN(Node * ph_node, xla_graph->AddNode(ph_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(ph_node, 0, dst, dst_input);
xla_graph->AddControlEdge(xla_graph->source_node(), ph_node);
}
}
VLOG(4) << "ReplaceOutsideCompilationOnlyArgWithPlaceholder host graph: "
<< DumpGraphToFile(
absl::StrCat("replace_oc_only_arg_host_", xla_func_name), *g);
VLOG(4) << "ReplaceOutsideCompilationOnlyArgWithPlaceholder XLA graph: "
<< DumpGraphToFile(
absl::StrCat("replace_oc_only_arg_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveUnusedXlaOutput(const std::string& xla_func_name, Graph* g,
Graph* xla_graph, Node* xla_node) {
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "output_types", &output_types));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
int num_replicated_outputs = output_types.size() / num_replicas;
std::set<int> ret_indices_to_remove;
std::vector<Node*> ret_nodes_to_update, nodes_to_remove;
int num_rets = 0;
for (Node* n : xla_graph->nodes()) {
if (!n->IsRetval()) {
continue;
}
num_rets++;
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(0, &e));
if (e->src()->type_string() != "Placeholder" ||
!HasNodeAttr(e->src()->def(), kXlaIsPlaceholderForTailOcAttrName)) {
ret_nodes_to_update.push_back(n);
continue;
}
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
ret_indices_to_remove.insert(index);
nodes_to_remove.push_back(e->src());
nodes_to_remove.push_back(n);
}
for (Node* n : nodes_to_remove) {
xla_graph->RemoveNode(n);
}
std::map<int, int> ret_index_mapping;
int new_ret_index = 0;
for (int i = 0; i < num_rets; i++) {
if (ret_indices_to_remove.find(i) != ret_indices_to_remove.end()) {
continue;
} else {
ret_index_mapping[i] = new_ret_index;
new_ret_index++;
}
}
for (Node* n : ret_nodes_to_update) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
n->ClearAttr("index");
n->AddAttr("index", ret_index_mapping[index]);
}
std::vector<DataType> new_output_types;
for (int i = 0; i < num_replicas; i++) {
for (const auto& e : ret_index_mapping) {
new_output_types.push_back(output_types[e.first]);
}
}
xla_node->ClearAttr("output_types");
xla_node->AddAttr("output_types", new_output_types);
std::vector<std::vector<const Edge*>> output_edges(num_replicas *
num_replicated_outputs);
for (const Edge* e : xla_node->out_edges()) {
if (e->src_output() >= 0 &&
e->src_output() < num_replicas * num_replicated_outputs) {
output_edges[e->src_output()].push_back(e);
}
}
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_replicated_outputs; j++) {
auto iter = ret_index_mapping.find(j);
if (iter != ret_index_mapping.end()) {
for (const Edge* e : output_edges[i * num_replicated_outputs + j]) {
Node* dst = e->dst();
int dst_input = e->dst_input();
int src_output =
i * (num_replicated_outputs - ret_indices_to_remove.size()) +
iter->second;
g->RemoveEdge(e);
g->AddEdge(xla_node, src_output, dst, dst_input);
}
} else {
TF_RET_CHECK(output_edges[i * num_replicated_outputs + j].empty())
<< "Output edge not removed: "
<< output_edges[i * num_replicated_outputs + j][0]->DebugString();
}
}
}
VLOG(4) << "RemoveUnusedXlaOutput host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_output_host_", xla_func_name), *g);
VLOG(4) << "RemoveUnusedXlaOutput XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_output_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveEdgesBetweenArgAndRetval(const std::string& xla_func_name,
Graph* g, Graph* xla_graph,
Node* xla_node) {
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int old_num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "output_types", &output_types));
int old_num_outputs = output_types.size() / num_replicas;
std::vector<const Edge*> edges;
for (const Edge* e : xla_graph->edges()) {
if (!e->IsControlEdge() && e->src()->IsArg() && e->dst()->IsRetval()) {
edges.push_back(e);
}
}
std::vector<std::vector<const Edge*>> xla_node_out_edges(
xla_node->num_outputs());
for (const Edge* e : xla_node->out_edges()) {
if (!e->IsControlEdge()) {
xla_node_out_edges[e->src_output()].push_back(e);
}
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
for (const Edge* e : edges) {
int arg_index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->def(), "index", &arg_index));
int ret_index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), "index", &ret_index));
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
int input_index;
if (arg_index < old_num_per_replica_inputs) {
input_index = replica_id * old_num_per_replica_inputs + arg_index;
} else {
input_index = num_replicas * old_num_per_replica_inputs +
(arg_index - old_num_per_replica_inputs);
}
const Edge* input_edge = input_edges.at(input_index);
int output_index = replica_id * old_num_outputs + ret_index;
for (const Edge* output_edge : xla_node_out_edges[output_index]) {
Node* dst = output_edge->dst();
int dst_input = output_edge->dst_input();
g->RemoveEdge(output_edge);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
}
}
for (const Edge* e : edges) {
NodeDefBuilder placeholder_builder(
absl::StrCat("placeholder_", e->dst()->name()), "Placeholder");
placeholder_builder.Attr("dtype", e->src()->output_type(e->src_output()));
placeholder_builder.Attr(kXlaIsPlaceholderForTailOcAttrName, true);
NodeDef placeholder_def;
TF_RETURN_IF_ERROR(placeholder_builder.Finalize(&placeholder_def));
TF_ASSIGN_OR_RETURN(Node * placeholder_node,
xla_graph->AddNode(placeholder_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(placeholder_node, 0, dst, dst_input);
}
VLOG(4) << "RemoveUnusedArgRetvalPair host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_arg_ret_host_", xla_func_name),
*g);
VLOG(4) << "RemoveUnusedArgRetvalPair XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_arg_ret_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
void RemoveUnusedTPUReplicatedInputs(Graph* graph) {
for (Node* n : graph->nodes()) {
if (n->type_string() == kTPUReplicatedInput) {
bool has_output = false;
for (const Edge* e : n->out_edges()) {
if (!e->dst()->IsSink()) {
has_output = true;
break;
}
}
if (!has_output) {
std::vector<Node*> to_be_removed_src_nodes;
for (const auto& e_in : n->in_edges()) {
if (!e_in->IsControlEdge() &&
(e_in->src()->type_string() == kTPUPartitionedInput ||
e_in->src()->type_string() == kTPUPartitionedInputV2))
to_be_removed_src_nodes.push_back(e_in->src());
}
graph->RemoveNode(n);
for (Node* node : to_be_removed_src_nodes) {
graph->RemoveNode(node);
}
}
}
}
}
Status RenameClustersWithDuplicatedNames(Graph* g) {
std::unordered_map<std::string, std::vector<Node*>>
cluster_name_to_metadata_nodes;
std::unordered_set<std::string> cluster_names;
for (Node* n : g->nodes()) {
if (n->type_string() != "TPUReplicateMetadata") {
continue;
}
std::string cluster_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), kTPUReplicateAttr, &cluster_name));
cluster_name_to_metadata_nodes[cluster_name].push_back(n);
cluster_names.insert(cluster_name);
}
for (const auto& iter : cluster_name_to_metadata_nodes) {
if (iter.second.size() == 1) {
continue;
}
for (int i = 1; i < iter.second.size(); i++) {
std::string new_cluster_name;
int cluster_name_suffix = 1;
while (true) {
new_cluster_name = absl::StrCat(iter.first, "_", cluster_name_suffix);
if (cluster_names.find(new_cluster_name) == cluster_names.end()) {
break;
}
cluster_name_suffix++;
}
cluster_names.insert(new_cluster_name);
std::queue<Node*> queue;
queue.push(iter.second.at(i));
absl::flat_hash_set<Node*> visited;
while (!queue.empty()) {
Node* n = queue.front();
queue.pop();
visited.insert(n);
n->ClearAttr(kTPUReplicateAttr);
n->AddAttr(kTPUReplicateAttr, new_cluster_name);
std::string cluster_name;
for (const Edge* e : n->out_edges()) {
if (GetNodeAttr(e->dst()->def(), kTPUReplicateAttr, &cluster_name)
.ok() &&
cluster_name == iter.first &&
visited.find(e->dst()) == visited.end()) {
queue.push(e->dst());
}
}
}
for (const Edge* e : iter.second.at(i)->out_edges()) {
if (e->dst()->type_string() == "TPUCompilationResult") {
e->dst()->ClearAttr("_tpu_compilation_status");
e->dst()->AddAttr("_tpu_compilation_status", new_cluster_name);
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<FunctionBody>> InstantiateAssociatedFunction(
const Node& n, absl::string_view function_name_attr,
FunctionLibraryDefinition* fld) {
std::unique_ptr<FunctionBody> fbody;
NameAttrList func_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(n.def(), function_name_attr, &func_attr_list));
const FunctionDef* fdef = fld->Find(func_attr_list.name());
if (fdef == nullptr) {
return absl::InternalError(absl::StrCat("Cannot find ", function_name_attr,
" function", "for node ",
n.DebugString()));
}
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*fdef, AttrSlice(&func_attr_list.attr()), fld, &fbody));
return fbody;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForIfNode(
const Node& if_node, FunctionLibraryDefinition* fld) {
absl::flat_hash_set<int> args_to_lift_indices;
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(if_node.def(), "Tin", &dtypes));
int num_args = dtypes.size();
for (int i = 0; i < num_args; i++) {
if (dtypes[i] == DT_RESOURCE) {
args_to_lift_indices.insert(i);
}
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(if_node, "then_branch", fld));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(if_node, "else_branch", fld));
for (int i = 0; i < num_args; ++i) {
bool used = false;
const Node* then_arg_node = then_branch_fbody->arg_nodes[i];
for (const Edge* e : then_arg_node->out_edges()) {
used = true;
if (e->IsControlEdge() ||
HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr))
continue;
args_to_lift_indices.erase(i);
break;
}
const Node* else_arg_node = else_branch_fbody->arg_nodes[i];
for (const Edge* e : else_arg_node->out_edges()) {
used = true;
if (e->IsControlEdge() ||
HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr))
continue;
args_to_lift_indices.erase(i);
break;
}
if (!used) args_to_lift_indices.erase(i);
}
return args_to_lift_indices;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForWhileNode(
Node* while_node, FunctionLibraryDefinition* fld) {
absl::flat_hash_set<int> result;
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "T", &dtypes));
for (int i = 0; i < dtypes.size(); i++) {
if (dtypes[i] == DT_RESOURCE) {
result.insert(i);
}
}
NameAttrList cond_func;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "cond", &cond_func));
const FunctionDef* cond_fdef = fld->Find(cond_func.name());
if (cond_fdef == nullptr) {
return absl::InternalError(
absl::StrCat("Cannot find cond function ", cond_func.name(),
" for while node ", while_node->DebugString()));
}
std::unique_ptr<FunctionBody> cond_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*cond_fdef, AttrSlice(&cond_func.attr()), fld, &cond_fbody));
for (int i = 0; i < cond_fbody->arg_nodes.size(); i++) {
const Node* arg_node = cond_fbody->arg_nodes[i];
for (const Edge* e : arg_node->out_edges()) {
if (!e->IsControlEdge()) {
result.erase(i);
}
}
}
NameAttrList body_func;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "body", &body_func));
const FunctionDef* body_fdef = fld->Find(body_func.name());
if (body_fdef == nullptr) {
return absl::InternalError(
absl::StrCat("Cannot find body function ", body_func.name(),
" for while node ", while_node->DebugString()));
}
std::unique_ptr<FunctionBody> body_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*body_fdef, AttrSlice(&body_func.attr()), fld, &body_fbody));
for (int i = 0; i < body_fbody->ret_nodes.size(); i++) {
const Node* node = body_fbody->ret_nodes[i];
do {
TF_RETURN_IF_ERROR(node->input_node(0, &node));
} while (node->IsIdentity());
if (node != body_fbody->arg_nodes[i]) {
result.erase(i);
}
}
for (int i = 0; i < body_fbody->arg_nodes.size(); i++) {
const Node* arg_node = body_fbody->arg_nodes[i];
int data_edge_count = std::count_if(
arg_node->out_edges().begin(), arg_node->out_edges().end(),
[](const Edge* e) { return !e->IsControlEdge(); });
if (data_edge_count == 1) {
result.erase(i);
}
}
for (int i = 0; i < body_fbody->arg_nodes.size(); i++) {
const Node* arg_node = body_fbody->arg_nodes[i];
for (const Edge* e : arg_node->out_edges()) {
if (!e->dst()->IsRetval() &&
!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
result.erase(i);
break;
}
}
}
return result;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForCallNode(
Node* call_node, const FunctionBody& fbody) {
absl::flat_hash_set<int> result;
std::vector<DataType> dtypes(call_node->input_types().begin(),
call_node->input_types().end());
for (int i = 0; i < dtypes.size(); i++) {
if (dtypes[i] == DT_RESOURCE) {
result.insert(i);
}
}
for (int i = 0; i < fbody.arg_nodes.size(); i++) {
const Node* arg_node = fbody.arg_nodes[i];
if (arg_node->out_edges().empty()) {
result.erase(i);
continue;
}
for (const Edge* e : arg_node->out_edges()) {
if (!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
result.erase(i);
break;
}
}
}
return result;
}
Status LiftOutsideCompilationOnlyArgs(Graph* g, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten);
Status LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
const FunctionBody& fbody, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, int* lifted_arg_count,
std::optional<std::string> new_func_name, bool* rewritten) {
*rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgs(
fbody.graph, flr, fld, lifted_arg_count, rewritten));
if (*rewritten) {
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(fbody.graph), fbody.record->fdef().signature().name(),
&rewritten_fdef));
if (new_func_name) {
rewritten_fdef.mutable_signature()->set_name(*new_func_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
} else {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
fbody.record->fdef().signature().name(), rewritten_fdef));
}
}
return absl::OkStatus();
}
Status MakeIdentityNodesForArgsToLift(
const absl::flat_hash_set<int>& args_to_lift,
const int arg_to_input_edge_offset, Graph* g, Node* n,
absl::flat_hash_map<int, std::string>* lifted_arg_index_to_oc_cluster_name,
int* lifted_arg_count) {
int num_input = n->num_inputs();
for (int arg_index = 0; arg_index < num_input; ++arg_index) {
if (!args_to_lift.contains(arg_index)) continue;
int input_edge_index = arg_index + arg_to_input_edge_offset;
const Edge* arg_edge;
TF_RETURN_IF_ERROR(n->input_edge(input_edge_index, &arg_edge));
std::string node_name =
g->NewName(absl::StrCat("lifted_arg", *lifted_arg_count));
(*lifted_arg_count)++;
(*lifted_arg_index_to_oc_cluster_name)[arg_index] = node_name;
NodeDefBuilder id_builder(node_name, "Identity");
id_builder.Attr("T", n->input_type(input_edge_index));
id_builder.Attr(kOutsideCompilationAttr, id_builder.node_name());
id_builder.Attr(kXlaIsLiftedArgAttrName, true);
id_builder.Input(arg_edge->src()->name(), arg_edge->src_output(),
n->input_type(input_edge_index));
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
g->AddEdge(arg_edge->src(), arg_edge->src_output(), id_node, 0);
g->AddControlEdge(id_node, n);
}
return absl::OkStatus();
}
Status RemoveArgsToLiftFromFunctionBody(
const absl::flat_hash_set<int>& args_to_lift,
const std::vector<DataType>& arg_dtypes,
const absl::flat_hash_map<int, std::string>&
lifted_arg_index_to_oc_cluster_name,
const absl::flat_hash_map<int, int>& index_mapping,
const FunctionBody* fbody) {
for (int i = 0; i < fbody->arg_nodes.size(); ++i) {
Node* arg_node = fbody->arg_nodes[i];
if (!args_to_lift.contains(i)) {
int new_index = index_mapping.at(i);
arg_node->ClearAttr("index");
arg_node->AddAttr("index", new_index);
arg_node->ClearAttr("T");
arg_node->AddAttr("T", arg_dtypes[i]);
continue;
}
std::vector<const Edge*> out_edges_to_oc;
for (const Edge* e : arg_node->out_edges()) {
if (HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
out_edges_to_oc.push_back(e);
}
}
for (const Edge* e : out_edges_to_oc) {
std::string outside_compilation_cluster;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), kOutsideCompilationAttr,
&outside_compilation_cluster));
NodeDefBuilder ph_builder(fbody->graph->NewName("lifted_arg"),
"Placeholder");
ph_builder.Attr("dtype", arg_dtypes[i]);
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_cluster);
TF_RET_CHECK(lifted_arg_index_to_oc_cluster_name.contains(i));
ph_builder.Attr(kXlaLiftedArgOutsideCompilationAttrName,
lifted_arg_index_to_oc_cluster_name.at(i));
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
TF_ASSIGN_OR_RETURN(Node * ph_node, fbody->graph->AddNode(ph_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
fbody->graph->RemoveEdge(e);
fbody->graph->AddEdge(ph_node, 0, dst, dst_input);
}
fbody->graph->RemoveNode(arg_node);
}
return absl::OkStatus();
}
Status CleanUpInEdges(const absl::flat_hash_map<int, int>& index_mapping,
const int arg_to_input_edge_offset, Graph* g, Node* n) {
int num_inputs = n->num_inputs();
for (int i = 0; i < num_inputs; ++i) {
if (i < arg_to_input_edge_offset) continue;
int arg_idx = i - arg_to_input_edge_offset;
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(i, &e));
if (!index_mapping.contains(arg_idx)) {
g->RemoveEdge(e);
continue;
}
if (index_mapping.at(arg_idx) == arg_idx) continue;
g->AddEdge(e->src(), e->src_output(), n,
index_mapping.at(arg_idx) + arg_to_input_edge_offset);
g->RemoveEdge(e);
}
return absl::OkStatus();
}
void RemoveOutputIdentityNodesForWhileV2(Graph* g, Node* while_node) {
std::vector<const Edge*> edges_to_identity_node;
for (const Edge* e : while_node->out_edges()) {
if (!e->IsControlEdge() && e->dst()->IsIdentity()) {
edges_to_identity_node.push_back(e);
}
}
for (const Edge* e : edges_to_identity_node) {
Node* identity = e->dst();
std::vector<const Edge*> out_edges(identity->out_edges().begin(),
identity->out_edges().end());
for (const Edge* out_edge : out_edges) {
if (out_edge->IsControlEdge()) {
g->AddControlEdge(while_node, out_edge->dst());
} else {
Node* dst = out_edge->dst();
int dst_input = out_edge->dst_input();
g->RemoveEdge(out_edge);
g->AddEdge(while_node, e->src_output(), dst, dst_input);
}
}
g->RemoveNode(identity);
}
}
Status ReplaceOutputEdgesWithInputEdgeSourceForWhile(
const absl::flat_hash_set<int>& args_to_lift, Graph* g, Node* while_node) {
std::vector<const Edge*> edges_to_replace;
for (const Edge* e : while_node->out_edges()) {
if (args_to_lift.contains(e->src_output())) {
edges_to_replace.push_back(e);
}
}
for (const Edge* e : edges_to_replace) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(while_node->input_edge(e->src_output(), &input_edge));
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
return absl::OkStatus();
}
absl::flat_hash_map<int, int> ArgIndexMapping(
const int num_args, const absl::flat_hash_set<int>& args_to_lift) {
absl::flat_hash_map<int, int> index_mapping;
int new_index = 0;
for (int i = 0; i < num_args; i++) {
if (!args_to_lift.contains(i)) {
index_mapping[i] = new_index;
++new_index;
}
}
return index_mapping;
}
void CleanUpRetvalsForWhileBody(
const absl::flat_hash_map<int, int>& index_mapping,
const std::vector<DataType>& dtypes, FunctionBody* fbody) {
for (int i = 0; i < fbody->ret_nodes.size(); i++) {
Node* ret_node = fbody->ret_nodes[i];
if (index_mapping.contains(i)) {
int new_index = index_mapping.at(i);
ret_node->ClearAttr("index");
ret_node->AddAttr("index", new_index);
ret_node->ClearAttr("T");
ret_node->AddAttr("T", dtypes[i]);
} else {
fbody->graph->RemoveNode(ret_node);
}
}
}
Status LiftOutsideCompilationOnlyArgsFromWhileNode(
Graph* g, Node* while_node, FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForWhileNode(while_node, fld));
if (args_to_lift.empty()) return absl::OkStatus();
RemoveOutputIdentityNodesForWhileV2(g, while_node);
TF_RETURN_IF_ERROR(ReplaceOutputEdgesWithInputEdgeSourceForWhile(
args_to_lift, g, while_node));
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "T", &dtypes));
absl::flat_hash_map<int, int> index_mapping =
ArgIndexMapping(dtypes.size(), args_to_lift);
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 0, g, while_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> cond_fbody,
InstantiateAssociatedFunction(*while_node, "cond", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
cond_fbody.get()));
FunctionDef rewritten_cond_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(cond_fbody->graph), cond_fbody->record->fdef().signature().name(),
&rewritten_cond_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
cond_fbody->record->fdef().signature().name(), rewritten_cond_fdef));
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> body_fbody,
InstantiateAssociatedFunction(*while_node, "body", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
body_fbody.get()));
CleanUpRetvalsForWhileBody(index_mapping, dtypes, body_fbody.get());
FunctionDef rewritten_body_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(body_fbody->graph), body_fbody->record->fdef().signature().name(),
&rewritten_body_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
body_fbody->record->fdef().signature().name(), rewritten_body_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 0, g, while_node));
TF_RETURN_IF_ERROR(while_node->ShrinkTypeInfo(index_mapping, "T",
true));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgsFromIfNode(Graph* g, Node* if_node,
FunctionLibraryDefinition* fld,
int* lifted_arg_count,
bool* rewritten) {
*rewritten = false;
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForIfNode(*if_node, fld));
if (args_to_lift.empty()) return absl::OkStatus();
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(if_node->def(), "Tin", &dtypes));
absl::flat_hash_map<int, int> index_mapping;
int new_index = 0;
for (int i = 0; i < dtypes.size(); i++) {
if (!args_to_lift.contains(i)) {
index_mapping[i] = new_index;
++new_index;
}
}
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 1, g, if_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(*if_node, "then_branch", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
then_branch_fbody.get()));
FunctionDef rewritten_then_branch_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*(then_branch_fbody->graph),
then_branch_fbody->record->fdef().signature().name(),
&rewritten_then_branch_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(then_branch_fbody->record->fdef().signature().name(),
rewritten_then_branch_fdef));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(*if_node, "else_branch", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
else_branch_fbody.get()));
FunctionDef rewritten_else_branch_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*(else_branch_fbody->graph),
else_branch_fbody->record->fdef().signature().name(),
&rewritten_else_branch_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(else_branch_fbody->record->fdef().signature().name(),
rewritten_else_branch_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 1, g, if_node));
TF_RETURN_IF_ERROR(if_node->ShrinkTypeInfo(index_mapping, "Tin",
false));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgsFromCallNode(
Graph* g, Node* call_node, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
NameAttrList func;
if (fld->Contains(call_node->type_string())) {
func.set_name(call_node->type_string());
*func.mutable_attr() = call_node->def().attr();
} else if (call_node->IsPartitionedCall()) {
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &func));
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
func.set_name(FunctionLibraryDefinition::kGradientOp);
*func.mutable_attr() = call_node->def().attr();
}
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(
flr->Instantiate(func.name(), AttrSlice(&func.attr()), &handle));
auto cleanup_handle = gtl::MakeCleanup(
[&flr, &handle]() { flr->ReleaseHandle(handle).IgnoreError(); });
const FunctionBody* fbody = flr->GetFunctionBody(handle);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForCallNode(call_node, *fbody));
if (args_to_lift.empty()) return absl::OkStatus();
std::vector<DataType> dtypes;
dtypes = std::vector<DataType>(call_node->input_types().begin(),
call_node->input_types().end());
absl::flat_hash_map<int, int> index_mapping =
ArgIndexMapping(dtypes.size(), args_to_lift);
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 0, g, call_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
fbody));
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(fbody->graph), fbody->record->fdef().signature().name(),
&rewritten_fdef));
std::string new_func_name =
fld->UniqueFunctionName(fbody->record->fdef().signature().name());
rewritten_fdef.mutable_signature()->set_name(new_func_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 0, g, call_node));
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
if (call_node->IsPartitionedCall()) {
NameAttrList f;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &f));
*node_def.mutable_attr() = f.attr();
} else if (fld->Contains(call_node->type_string())) {
*node_def.mutable_attr() = call_node->def().attr();
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
*node_def.mutable_attr() = call_node->def().attr();
node_def.mutable_attr()->erase(FunctionLibraryDefinition::kFuncAttr);
}
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgs(Graph* g, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
std::vector<Node*> while_nodes, if_nodes, call_nodes;
for (Node* n : g->op_nodes()) {
if (HasNodeAttr(n->def(), kOutsideCompilationAttr)) {
continue;
}
if (n->IsWhileNode()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> body_fbody,
InstantiateAssociatedFunction(*n, "body", fld));
bool func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*body_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten = *rewritten || func_rewritten;
while_nodes.push_back(n);
} else if (n->IsIfNode()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(*n, "then_branch", fld));
bool func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*then_branch_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten |= func_rewritten;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(*n, "else_branch", fld));
func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*else_branch_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten |= func_rewritten;
if_nodes.push_back(n);
} else if (IsFunctionCall(*fld, *n)) {
call_nodes.push_back(n);
}
}
std::vector<Node*> rewritten_call_nodes;
for (Node* call_node : call_nodes) {
if (call_node->IsPartitionedCall()) {
std::unique_ptr<FunctionBody> function_fbody;
TF_ASSIGN_OR_RETURN(function_fbody,
InstantiateAssociatedFunction(*call_node, "f", fld));
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
function_fbody->record->fdef().signature().name());
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NameAttrList f;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &f));
f.set_name(new_func_name);
call_node->ClearAttr("f");
call_node->AddAttr("f", f);
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
} else if (fld->Contains(call_node->type_string())) {
std::unique_ptr<FunctionBody> function_fbody;
const FunctionDef* fdef = fld->Find(call_node->type_string());
TF_RET_CHECK(fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, call_node->attrs(), fld,
&function_fbody));
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
function_fbody->record->fdef().signature().name());
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
*node_def.mutable_attr() = call_node->def().attr();
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(flr->Instantiate(call_node->type_string(),
call_node->attrs(), &handle));
auto cleanup_handle = gtl::MakeCleanup(
[&flr, &handle]() { flr->ReleaseHandle(handle).IgnoreError(); });
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
absl::StrCat(call_node->name(), "_lift_args"));
const FunctionBody* function_fbody = flr->GetFunctionBody(handle);
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
*node_def.mutable_attr() = call_node->def().attr();
node_def.mutable_attr()->erase(FunctionLibraryDefinition::kFuncAttr);
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
}
}
for (Node* n : while_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromWhileNode(
g, n, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
for (Node* n : if_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromIfNode(
g, n, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
for (Node* n : rewritten_call_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromCallNode(
g, n, flr, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
if (*rewritten) {
VLOG(4) << DumpGraphToFile("after_lifting_args", *g, fld);
}
return absl::OkStatus();
}
bool ShouldSkipEncapsulationForNonTPUGraph() {
return flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.value();
}
}
Status EncapsulateTPUComputationsPass::Encapsulate(
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def) {
if (ShouldSkipEncapsulationForNonTPUGraph()) {
bool found_tpu_replicate = false;
for (const Node* n : (*graph)->nodes()) {
if (n->attrs().Find(kTPUReplicateAttr) != nullptr) {
found_tpu_replicate = true;
break;
}
}
if (!found_tpu_replicate) {
VLOG(1) << "No TPU replicate found, skipping encapsulation";
return absl::OkStatus();
}
}
for (const Edge* e : (*graph)->edges()) {
if (!e->IsControlEdge() &&
e->src()->attrs().Find(kTPUReplicateAttr) != nullptr &&
e->src()->attrs().Find(kOutsideCompilationAttr) == nullptr &&
e->dst()->attrs().Find(kTPUReplicateAttr) == nullptr &&
e->dst()->type_string() != kTPUReplicatedOutput) {
return absl::InvalidArgumentError(absl::StrCat(
"Undeclared output of TPU computation. A common cause of this error "
"is variable initializers that depend on the TPU computation. Edge: ",
FormatNodeForError(*e->src()), ":", e->src_output(), " -> ",
FormatNodeForError(*e->dst()), ":", e->dst_input()));
}
}
RemoveUnusedTPUReplicatedInputs(graph->get());
TF_RETURN_IF_ERROR(RenameClustersWithDuplicatedNames(graph->get()));
TF_RETURN_IF_ERROR(
PerformStaticShapeInferenceBeforeEncapsulation(graph->get()));
auto output = std::make_unique<Graph>((*graph)->op_registry());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kTPUReplicateAttr, **graph, RewriteSubgraph,
true, &output, flib_def),
"EncapsulateTPUComputationsPass failed");
graph->swap(output);
return absl::OkStatus();
}
Status EncapsulateTPUComputationsPass::BuildTPUReplicateOps(
Graph* graph) {
std::vector<Node*> replicate_nodes;
std::vector<Node*> guarantee_const_nodes;
for (Node* n : graph->nodes()) {
std::string name;
if (TryGetNodeAttr(n->attrs(), kTPUReplicateAttr, &name) &&
!TryGetNodeAttr(n->attrs(), kOutsideCompilationAttr, &name)) {
replicate_nodes.push_back(n);
} else if (n->type_string() == "GuaranteeConst") {
guarantee_const_nodes.push_back(n);
}
}
for (Node* n : guarantee_const_nodes) {
std::vector<std::pair<Node*, int>> predecessors;
for (const Edge* e : n->in_edges()) {
predecessors.emplace_back(e->src(), e->src_output());
}
std::vector<std::pair<Node*, int>> successors;
for (const Edge* e : n->out_edges()) {
successors.emplace_back(e->dst(), e->dst_input());
}
NodeDef ndef;
ndef.set_name(n->name());
ndef.set_op("Identity");
ndef.set_device(n->requested_device());
MergeDebugInfo(NodeDebugInfo(n->def()), &ndef);
AddNodeAttr("T", n->output_type(0), &ndef);
graph->RemoveNode(n);
TF_ASSIGN_OR_RETURN(Node * id_node, graph->AddNode(ndef));
for (const auto& pred : predecessors) {
if (pred.second < 0) {
graph->AddControlEdge(pred.first, id_node);
} else {
graph->AddEdge(pred.first, pred.second, id_node, 0);
}
}
for (const auto& succ : successors) {
if (succ.second < 0) {
graph->AddControlEdge(id_node, succ.first);
} else {
graph->AddEdge(id_node, 0, succ.first, succ.second);
}
}
}
for (Node* replicate : replicate_nodes) {
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(replicate->attrs(), "num_replicas", &num_replicas));
int variable_start_index;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(), "_variable_start_index",
&variable_start_index));
int guaranteed_const_start_index;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(),
"_guaranteed_const_start_index",
&guaranteed_const_start_index));
if (HasNodeAttr(replicate->def(), "use_tpu")) {
bool use_tpu;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(), "use_tpu", &use_tpu));
if (!use_tpu) {
LOG(WARNING) << "use_tpu=false attr on a TPUReplicate node is ignored.";
}
}
std::vector<const Edge*> in_edges;
TF_RETURN_IF_ERROR(replicate->input_edges(&in_edges));
int pos = 0;
std::vector<int> mirrored_variable_indices;
int distributed_var_start_index = 0;
while (pos < in_edges.size() &&
in_edges[pos]->src()->type_string() == kTPUReplicatedInput) {
int input_num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(in_edges[pos]->src()->attrs(), "N", &input_num_replicas));
bool is_mirrored_variable;
TF_CHECK_OK(GetNodeAttr(in_edges[pos]->src()->attrs(),
"is_mirrored_variable", &is_mirrored_variable));
if (is_mirrored_variable) {
mirrored_variable_indices.push_back(pos);
}
bool is_packed = false;
GetNodeAttr(in_edges[pos]->src()->attrs(), "is_packed", &is_packed)
.IgnoreError();
bool is_distributed_variable =
is_packed && (in_edges[pos]->src()->output_type(
in_edges[pos]->src_output()) == DT_RESOURCE);
if (!is_distributed_variable && input_num_replicas != num_replicas) {
return absl::InvalidArgumentError(absl::StrCat(
"Mismatched number of replicas. Computation has ", num_replicas,
" replicas, input '", FormatNodeForError(*in_edges[pos]->src()),
"' has ", input_num_replicas, " replicas."));
}
if (!is_distributed_variable) {
if (distributed_var_start_index < pos) {
return absl::InvalidArgumentError(
absl::StrCat("Expect a distributed resource after index ",
distributed_var_start_index,
", but got a replicated resource at index ", pos));
} else {
++distributed_var_start_index;
}
}
++pos;
}
const int num_replicated_inputs = distributed_var_start_index;
const int num_distributed_vars = pos - num_replicated_inputs;
const int num_variables =
std::max(0, guaranteed_const_start_index - variable_start_index);
const int num_guaranteed_constants =
in_edges.size() - guaranteed_const_start_index;
TF_RET_CHECK(num_guaranteed_constants >= 0);
VLOG(1) << "Replicate node '" << replicate->name() << "'"
<< " input edges: " << in_edges.size()
<< " num_replicated_inputs: " << num_replicated_inputs
<< " num_distributed_vars: " << num_distributed_vars
<< " num_variables: " << num_variables
<< " num_guaranteed_constants: " << num_guaranteed_constants
<< " num_mirrored_variables: " << mirrored_variable_indices.size();
const int num_broadcast_inputs =
in_edges.size() - (num_replicated_inputs + num_distributed_vars +
num_variables + num_guaranteed_constants);
TF_RET_CHECK(num_broadcast_inputs >= 0);
const int num_inputs = num_replicated_inputs * num_replicas +
num_distributed_vars + num_broadcast_inputs +
num_guaranteed_constants + num_variables;
std::vector<Node*> nodes_to_remove = {replicate};
std::vector<std::pair<Node*, int>> data_inputs(num_inputs);
gtl::FlatSet<Node*> control_inputs;
AddControlInputs(*replicate, &control_inputs);
DataTypeVector replicated_input_types(num_replicated_inputs * num_replicas +
num_distributed_vars);
for (int i = 0; i < num_replicated_inputs; ++i) {
std::vector<const Edge*> replica_in_edges;
TF_RETURN_IF_ERROR(in_edges[i]->src()->input_edges(&replica_in_edges));
for (int replica = 0; replica < num_replicas; ++replica) {
int pos = replica * num_replicated_inputs + i;
const Edge* edge = replica_in_edges[replica];
data_inputs[pos] = {edge->src(), edge->src_output()};
replicated_input_types[pos] = EdgeType(edge);
}
AddControlInputs(*in_edges[i]->src(), &control_inputs);
nodes_to_remove.push_back(in_edges[i]->src());
}
for (int i = 0; i < num_distributed_vars; ++i) {
int pos = num_replicas * num_replicated_inputs + i;
std::vector<const Edge*> replica_in_edges;
TF_RETURN_IF_ERROR(
in_edges[num_replicated_inputs + i]->src()->input_edges(
&replica_in_edges));
TF_RET_CHECK(replica_in_edges.size() == 1);
const Edge* edge = replica_in_edges[0];
data_inputs[pos] = {edge->src(), edge->src_output()};
replicated_input_types[pos] = EdgeType(edge);
AddControlInputs(*in_edges[num_replicated_inputs + i]->src(),
&control_inputs);
nodes_to_remove.push_back(in_edges[num_replicated_inputs + i]->src());
}
DataTypeVector broadcast_input_types(num_broadcast_inputs);
for (int i = 0; i < num_broadcast_inputs; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars + i;
const Edge* edge =
in_edges[num_replicated_inputs + num_distributed_vars + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
broadcast_input_types[i] = EdgeType(edge);
}
for (int i = 0; i < num_variables; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + i;
const Edge* edge = in_edges[num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
}
DataTypeVector guaranteed_constant_types(num_guaranteed_constants);
for (int i = 0; i < num_guaranteed_constants; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + num_variables + i;
const Edge* edge = in_edges[num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + num_variables + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
guaranteed_constant_types[i] = EdgeType(edge);
}
const int num_outputs = replicate->output_types().size();
gtl::FlatSet<Node*> control_outputs;
std::vector<Node*> replicated_outputs(num_outputs);
for (const Edge* e : replicate->out_edges()) {
if (e->IsControlEdge()) {
control_outputs.insert(e->dst());
} else {
TF_RET_CHECK(e->src_output() < num_outputs);
TF_RET_CHECK(e->dst()->type_string() == kTPUReplicatedOutput)
<< e->DebugString();
TF_RET_CHECK(e->dst()->output_types().size() == num_replicas);
replicated_outputs[e->src_output()] = e->dst();
nodes_to_remove.push_back(e->dst());
AddControlOutputs(*e->dst(), &control_outputs);
}
}
std::vector<std::vector<std::pair<Node*, int>>> data_outputs(num_replicas *
num_outputs);
DataTypeVector output_types(num_replicas * num_outputs);
for (int i = 0; i < num_outputs; ++i) {
std::vector<std::vector<const Edge*>> replica_out_edges(num_replicas);
TF_RET_CHECK(replicated_outputs[i] != nullptr);
for (const Edge* e : replicated_outputs[i]->out_edges()) {
TF_RET_CHECK(!e->IsControlEdge());
replica_out_edges[e->src_output()].push_back(e);
}
for (int replica = 0; replica < num_replicas; ++replica) {
const int pos = replica * num_outputs + i;
for (const Edge* edge : replica_out_edges[replica]) {
data_outputs[pos].push_back({edge->dst(), edge->dst_input()});
}
output_types[pos] = replicated_outputs[i]->input_type(0);
}
}
NodeDef def;
def.set_name(replicate->name());
def.set_op("_TPUReplicate");
MergeDebugInfo(NodeDebugInfo(replicate->def()), &def);
NameAttrList computation;
computation.set_name(replicate->type_string());
AddNodeAttr("computation", computation, &def);
for (const auto& attr : replicate->attrs()) {
def.mutable_attr()->insert(attr);
}
AddNodeAttr("Tinputs", replicated_input_types, &def);
AddNodeAttr("Tbroadcast_inputs", broadcast_input_types, &def);
AddNodeAttr("NumVariables", num_variables, &def);
AddNodeAttr("Tguaranteed_constants", guaranteed_constant_types, &def);
AddNodeAttr("output_types", output_types, &def);
AddNodeAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
mirrored_variable_indices, &def);
AddNodeAttr("num_distributed_variables", num_distributed_vars, &def);
for (Node* node : nodes_to_remove) {
VLOG(2) << "Deleting node " << node->DebugString();
control_inputs.erase(node);
control_outputs.erase(node);
graph->RemoveNode(node);
}
TF_ASSIGN_OR_RETURN(Node * tpu_replicate, graph->AddNode(def));
for (int i = 0; i < data_inputs.size(); ++i) {
graph->AddEdge(data_inputs[i].first, data_inputs[i].second, tpu_replicate,
i);
}
for (Node* n : control_inputs) {
graph->AddControlEdge(n, tpu_replicate);
}
for (int i = 0; i < data_outputs.size(); ++i) {
for (const auto& successor : data_outputs[i]) {
graph->AddEdge(tpu_replicate, i, successor.first, successor.second);
}
}
for (Node* n : control_outputs) {
graph->AddControlEdge(tpu_replicate, n);
}
}
return absl::OkStatus();
}
Status EncapsulateTPUComputationsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateTPUComputations(): "
<< DumpGraphToFile("encapsulate_tpu_computations_before",
**options.graph, options.flib_def);
TF_RETURN_IF_ERROR(Encapsulate(options.graph, options.flib_def));
VLOG(1) << "EncapsulateTPUComputations() half-way: "
<< DumpGraphToFile("encapsulate_tpu_computations_halfway",
**options.graph, options.flib_def);
TF_RETURN_IF_ERROR(BuildTPUReplicateOps(options.graph->get()));
VLOG(1) << "EncapsulateTPUComputations() finished: "
<< DumpGraphToFile("encapsulate_tpu_computations_after",
**options.graph, options.flib_def);
return absl::OkStatus();
}
Status ExtractOutsideCompilationPass::ProcessHeadTailOutsideCompilation(
const std::string& outside_compilation_attr_name, int* lifted_arg_count,
std::unordered_map<std::string, XlaClusterInfo>* clusters, Graph* g,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld) {
absl::node_hash_map<std::string, Node*> pivots;
std::string cluster_name;
for (Node* node : g->nodes()) {
if (TryGetNodeAttr(node->attrs(), kPivotForClusterAttr, &cluster_name)) {
pivots[cluster_name] = node;
}
}
for (auto& iter : *clusters) {
Node* pivot_node = pivots[iter.first];
std::string xla_func_name = iter.second.func_name_attrs.name();
std::unique_ptr<FunctionBody> xla_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*fld->Find(xla_func_name),
AttrSlice(&iter.second.func_name_attrs.attr()), fld, &xla_fbody));
Graph* xla_graph = xla_fbody->graph;
FixupSourceAndSinkEdges(xla_graph);
TF_RETURN_IF_ERROR(RemoveIdentityNodesForArgRetval(xla_graph));
bool rewritten;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgs(
xla_graph, flr, fld, lifted_arg_count, &rewritten));
TF_RETURN_IF_ERROR(MoveHeadOutsideCompilationToHost(
outside_compilation_attr_name, iter.second.func_name_attrs.name(),
iter.second.cluster_name, g, xla_graph, iter.second.node, pivot_node));
TF_RETURN_IF_ERROR(MoveTailOutsideCompilationToHost(
outside_compilation_attr_name, iter.second.func_name_attrs.name(),
iter.second.cluster_name, g, xla_graph, iter.second.node, pivot_node));
TF_RETURN_IF_ERROR(ReplaceArgUsedByOutsideCompilationWithPlaceholder(
outside_compilation_attr_name, xla_func_name, g, xla_graph,
iter.second.node));
TF_RETURN_IF_ERROR(RemoveEdgesBetweenArgAndRetval(
iter.second.func_name_attrs.name(), g, xla_graph, iter.second.node));
TF_RETURN_IF_ERROR(RemoveUnusedXlaInput(iter.second.func_name_attrs.name(),
g, xla_graph, iter.second.node));
TF_RETURN_IF_ERROR(RemoveUnusedXlaOutput(iter.second.func_name_attrs.name(),
g, xla_graph, iter.second.node));
FunctionDef replace_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*xla_graph, xla_func_name, &replace_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(xla_func_name, replace_fdef));
FixupSourceAndSinkEdges(g);
}
return absl::OkStatus();
}
Status ExtractOutsideCompilationPass::Run(
const GraphOptimizationPassOptions& options) {
const auto* config =
(options.session_options ? &options.session_options->config : nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, options.session_options->env,
config, TF_GRAPH_DEF_VERSION, options.flib_def,
config ? config->graph_options().optimizer_options()
: OptimizerOptions()));
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
static std::map<std::string, std::string>* kNodeTypeToFunctionAttrMapping =
new std::map<std::string, std::string>{
{"_TPUReplicate", "computation"},
};
std::unordered_map<std::string, XlaClusterInfo> clusters;
int lifted_arg_count = 0;
for (Node* n : (*options.graph)->nodes()) {
auto iter = kNodeTypeToFunctionAttrMapping->find(n->type_string());
if (iter == kNodeTypeToFunctionAttrMapping->end()) {
continue;
}
std::string xla_cluster_name = n->name();
std::string func_attr = iter->second;
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), func_attr, &func));
std::vector<std::string> core_list;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "host_compute_core", &core_list));
std::map<std::string, int> host_compute_core;
TF_RETURN_IF_ERROR(ParseHostComputeCoreList(core_list, &host_compute_core));
clusters.emplace(xla_cluster_name, XlaClusterInfo{xla_cluster_name, func, n,
host_compute_core});
}
TF_RETURN_IF_ERROR(ProcessHeadTailOutsideCompilation(
kOutsideCompilationAttr, &lifted_arg_count, &clusters,
options.graph->get(), flr, options.flib_def));
bool modified;
TF_RETURN_IF_ERROR(ExtractOutsideCompilation(
kTPUReplicateAttr, kOutsideCompilationAttr, clusters,
options.graph->get(), flr, options.flib_def, &modified));
if (modified) {
TF_RETURN_IF_ERROR(
PruneUnreachableFunctionsFromGraph(**options.graph, options.flib_def));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.h"
#include <memory>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
std::unique_ptr<Graph> CreateGraph() {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Arg(g.get(), 0, DT_FLOAT);
auto in1 = test::graph::Arg(g.get(), 1, DT_FLOAT);
auto tmp = test::graph::Add(g.get(), in0, in1);
auto ret = test::graph::Retval(g.get(), 0, tmp);
g->AddControlEdge(in1, ret);
FixupSourceAndSinkEdges(g.get());
return g;
}
TEST(EncapsulateTPUComputationsPassTest, NonTPUGraph) {
auto g = CreateGraph();
GraphOptimizationPassOptions options;
options.graph = &g;
options.flib_def = g->mutable_flib_def();
EncapsulateTPUComputationsPass pass;
TF_ASSERT_OK(pass.Run(options));
int nodes_meeting_expectations = 0;
for (const auto* node : g->nodes()) {
if (!IsSource(node) && !IsSink(node)) {
ASSERT_TRUE(node->attrs().Find("_xla_inferred_shapes"));
++nodes_meeting_expectations;
}
}
EXPECT_EQ(nodes_meeting_expectations, 4);
}
TEST(EncapsulateTPUComputationsPassTest, SkipEncapsulationForNonTPUGraph) {
flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.reset(true);
auto g = CreateGraph();
GraphOptimizationPassOptions options;
options.graph = &g;
options.flib_def = g->mutable_flib_def();
EncapsulateTPUComputationsPass pass;
TF_ASSERT_OK(pass.Run(options));
int nodes_meeting_expectations = 0;
for (const auto* node : g->nodes()) {
if (!IsSource(node) && !IsSink(node)) {
ASSERT_FALSE(node->attrs().Find("_xla_inferred_shapes"));
++nodes_meeting_expectations;
}
}
EXPECT_EQ(nodes_meeting_expectations, 4);
flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.reset(false);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
054d4ec8-7d02-492e-9705-84fd9efd0b3a | cpp | tensorflow/tensorflow | serialization_utils | tensorflow/core/data/serialization_utils.cc | tensorflow/core/data/serialization_utils_test.cc | #include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kDelimiter[] = "@@";
constexpr char kComponent[] = "component";
constexpr char kNumComponents[] = "num_components";
constexpr char kNumElements[] = "num_elements";
constexpr char kIsDataset[] = ".is_dataset";
constexpr char kIteratorVariantTypeName[] = "tensorflow::Iterator";
constexpr char kOutputNode[] = ".output_node";
Status FromGraphDef(FunctionLibraryRuntime* flr, const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& input_list,
const string& output_node, Tensor* result) {
FunctionLibraryRuntime* cloned_flr = nullptr;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr;
TF_RETURN_IF_ERROR(flr->Clone(&lib_def, &pflr, &cloned_flr, true));
TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
GraphRunner graph_runner(cloned_flr->device());
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, cloned_flr, input_list,
{output_node}, &outputs));
*result = outputs[0];
return absl::OkStatus();
}
Status FindStatefulOps(const GraphDef& graph_def,
std::vector<string>* stateful_op_names) {
FunctionLibraryDefinition lib_def(OpRegistry::Global(), graph_def.library());
for (const auto& node : graph_def.node()) {
if (node.op() == FunctionLibraryDefinition::kRetOp) continue;
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(node.op());
}
}
for (const auto& fdef : graph_def.library().function()) {
if (!fdef.signature().is_stateful()) continue;
for (const auto& node : fdef.node_def()) {
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(
absl::StrCat(node.op(), " in function: ", fdef.signature().name()));
}
}
}
return absl::OkStatus();
}
}
Status ReadElementsFromCheckpoint(IteratorContext* ctx,
IteratorStateReader* reader,
StringPiece key_prefix,
std::vector<std::vector<Tensor>>* elements) {
int64_t num_elements;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key_prefix, kNumElements, &num_elements));
DCHECK(elements->empty());
elements->reserve(num_elements);
for (int i = 0; i < num_elements; ++i) {
std::string element_prefix = absl::StrCat(key_prefix, "::", i);
int64_t num_components;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kNumComponents, &num_components));
elements->emplace_back();
std::vector<Tensor>& element = elements->at(i);
element.reserve(num_components);
for (int j = 0; j < num_components; ++j) {
element.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), element_prefix, absl::StrCat(kComponent, "[", j, "]"),
&element.back()));
}
}
return absl::OkStatus();
}
Status WriteElement(IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
int64_t index) {
const std::vector<Tensor>& element = elements[index];
std::string element_prefix = absl::StrCat(key_prefix, "::", index);
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kNumComponents, element.size()));
for (int j = 0; j < element.size(); ++j) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
element_prefix, absl::StrCat(kComponent, "[", j, "]"), element[j]));
}
return absl::OkStatus();
}
Status WriteElementsToCheckpoint(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int i = 0; i < elements.size(); ++i) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
Status UpdateCheckpointElements(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
const absl::flat_hash_set<int64_t>& checkpoint_indices) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int64_t i : checkpoint_indices) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
VariantTensorDataReader::VariantTensorDataReader(
const std::vector<const tensorflow::VariantTensorData*>& data) {
for (const auto& d : data) {
string metadata;
d->get_metadata(&metadata);
auto keys = str_util::Split(metadata, kDelimiter, str_util::SkipEmpty());
const string name = keys[0];
data_[name] = d;
map_[name] = std::map<string, size_t>();
for (size_t i = 1; i < keys.size(); ++i) {
map_[name][keys[i]] = i - 1;
}
}
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
int64_t* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
int64_t* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
tstring* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
tstring* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensor(prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensorInternal(flr, prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensor(nullptr, name, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensorInternal(flr, name, key, val);
}
bool VariantTensorDataReader::Contains(StringPiece key) const {
string prefix;
if (!ExtractIteratorPrefix(key, &prefix).ok()) {
return false;
}
return Contains(prefix, key);
}
bool VariantTensorDataReader::Contains(StringPiece n, StringPiece key) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return false;
}
const auto& bucket = it->second;
return bucket.find(string(key)) != bucket.end();
}
template <typename T>
Status VariantTensorDataReader::ReadScalarInternal(StringPiece n,
StringPiece key,
T* val) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second).scalar<T>()();
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadTensorInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (Contains(n, strings::StrCat(key, kIsDataset))) {
return ReadDatasetInternal(flr, n, key, val);
}
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second);
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadDatasetInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (flr == nullptr) {
return errors::Internal(
"Function library runtime is needed to restore a dataset.");
}
tstring output_node, serialized_graph_def;
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key, kOutputNode), &output_node));
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key), &serialized_graph_def));
GraphDef graph_def;
graph_def.ParseFromString(serialized_graph_def);
TF_RETURN_IF_ERROR(FromGraphDef(flr, graph_def, {}, output_node, val));
return absl::OkStatus();
}
std::map<string, Tensor> VariantTensorDataReader::ReadAllTensors() {
std::map<string, Tensor> result;
for (const auto& entry : map_) {
string key1 = entry.first;
for (const auto& inner : entry.second) {
string key2 = inner.first;
size_t index = inner.second;
result[absl::StrCat(key1, kDelimiter, key2)] =
data_[key1]->tensors(index);
}
}
return result;
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const int64_t val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const int64_t val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const tstring& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const tstring& val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece key,
const Tensor& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteTensor(prefix, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece name, StringPiece key,
const Tensor& val) {
return WriteTensorInternal(name, key, val);
}
void VariantTensorDataWriter::MaybeFlush() {
if (is_flushed_) return;
for (auto& keys : keys_) {
const string name = keys.first;
string metadata = name;
for (size_t i = 0; i < keys_[name].size(); ++i) {
strings::StrAppend(&metadata, kDelimiter, keys_[name][i]);
}
data_[name]->set_metadata(metadata);
}
is_flushed_ = true;
}
void VariantTensorDataWriter::Reset() {
is_flushed_ = false;
data_.clear();
keys_.clear();
}
void VariantTensorDataWriter::ReleaseData(
std::vector<std::unique_ptr<VariantTensorData>>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(std::move(it.second));
}
Reset();
}
void VariantTensorDataWriter::GetData(
std::vector<const VariantTensorData*>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(it.second.get());
}
}
template <typename T>
Status VariantTensorDataWriter::WriteScalarInternal(StringPiece name,
StringPiece key,
const T& val) {
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteScalar after GetData or ReleaseData is called");
}
Tensor val_t = Tensor(DataTypeToEnum<T>::v(), TensorShape({}));
val_t.scalar<T>()() = val;
return WriteTensorInternal(name, key, val_t);
}
Status VariantTensorDataWriter::WriteTensorInternal(StringPiece n,
StringPiece key,
const Tensor& val) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(val, &dataset).ok()) {
return WriteDatasetInternal(n, key, dataset);
}
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteTensor after GetData or ReleaseData is called");
}
DCHECK_EQ(key.find(kDelimiter), string::npos);
string name(n);
if (keys_.count(name) == 0) {
keys_[name] = std::vector<string>();
}
keys_[name].push_back(string(key));
if (data_.count(name) == 0) {
data_[name] = std::make_unique<VariantTensorData>();
data_[name]->set_type_name("tensorflow::Iterator");
}
*(data_[name]->add_tensors()) = val;
return absl::OkStatus();
}
Status VariantTensorDataWriter::WriteDatasetInternal(
StringPiece n, StringPiece key, const DatasetBase* dataset) {
GraphDef graph_def;
SerializationContext ctx((SerializationContext::Params()));
TF_RETURN_IF_ERROR(AsGraphDef(dataset, std::move(ctx), &graph_def));
string output_node;
for (const auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
output_node = node.input(0);
break;
}
}
string result;
graph_def.SerializeToString(&result);
TF_RETURN_IF_ERROR(WriteScalar(n, strings::StrCat(key, kIsDataset), ""));
TF_RETURN_IF_ERROR(
WriteScalar(n, strings::StrCat(key, kOutputNode), output_node));
TF_RETURN_IF_ERROR(WriteScalar(n, key, result));
return absl::OkStatus();
}
std::string IteratorStateVariant::TypeName() {
return kIteratorVariantTypeName;
}
IteratorStateVariant::IteratorStateVariant(const IteratorStateVariant& other) {
if (other.data_) {
data_ = std::make_unique<VariantTensorData>(*other.data_);
}
}
Status IteratorStateVariant::InitializeFromVariantData(
std::unique_ptr<VariantTensorData> data) {
data_ = std::move(data);
return absl::OkStatus();
}
void IteratorStateVariant::Encode(VariantTensorData* data) const {
CompressedElement compressed_tensors;
Status s = CompressElement(data_->tensors(), &compressed_tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to compress iterator state variant: " << s;
*data = *data_;
return;
}
data->set_type_name(TypeName());
data->set_metadata(data_->metadata_string());
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = std::move(compressed_tensors);
*data->add_tensors() = std::move(tensor);
}
bool IteratorStateVariant::Decode(VariantTensorData data) {
if (data.type_name() != TypeName()) {
return false;
}
const CompressedElement* compressed = GetCompressedElement(data);
if (!compressed) {
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
std::vector<Tensor> tensors;
Status s = UncompressElement(*compressed, &tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to uncompress iterator state variant: " << s;
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
data_ = std::make_unique<VariantTensorData>();
data_->set_type_name(TypeName());
data_->set_metadata(std::move(data.metadata_string()));
for (auto& tensor : tensors) {
*data_->add_tensors() = std::move(tensor);
}
return true;
}
const CompressedElement* IteratorStateVariant::GetCompressedElement(
const VariantTensorData& data) {
bool should_uncompress =
data.tensors_size() == 1 &&
TensorShapeUtils::IsScalar(data.tensors(0).shape()) &&
data.tensors(0).dtype() == DT_VARIANT;
if (!should_uncompress) {
return nullptr;
}
const Variant& variant = data.tensors(0).scalar<Variant>()();
return variant.get<CompressedElement>();
}
std::string IteratorStateVariant::DebugString() const {
if (data_) {
return strings::StrCat("IteratorStateVariant<", data_->DebugString(), ">");
} else {
return strings::StrCat("IteratorStateVariant<empty>");
}
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(IteratorStateVariant,
kIteratorVariantTypeName);
Status AsGraphDefForRewrite(OpKernelContext* ctx, const DatasetBase* input,
std::vector<std::pair<string, Tensor>>* input_list,
GraphDef* result, string* dataset_node) {
SerializationContext::Params params(ctx);
params.input_list = input_list;
params.external_state_policy = ExternalStatePolicy::POLICY_IGNORE;
params.is_graph_rewrite = true;
SerializationContext serialization_ctx(params);
TF_RETURN_IF_ERROR(AsGraphDef(input, std::move(serialization_ctx), result));
for (const auto& node : result->node()) {
if (node.op() == kRetvalOp) {
*dataset_node = node.input(0);
}
}
return absl::OkStatus();
}
Status AsGraphDef(const DatasetBase* dataset,
SerializationContext&& serialization_ctx,
GraphDef* graph_def) {
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_FAIL) {
TF_RETURN_IF_ERROR(dataset->CheckExternalState());
}
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_WARN) {
std::vector<string> stateful_op_names;
TF_RETURN_IF_ERROR(FindStatefulOps(*graph_def, &stateful_op_names));
if (!stateful_op_names.empty()) {
LOG(WARNING) << "We found the following stateful ops in the dataset "
"construction graph whose state would not be "
"serialized and might "
"cause subtle bugs: "
<< absl::StrJoin(stateful_op_names, ", ");
}
}
GraphDefBuilder b;
DatasetBase::DatasetGraphDefBuilder db(&b);
Node* output_node = nullptr;
TF_RETURN_IF_ERROR(
db.AddInputDataset(&serialization_ctx, dataset, &output_node));
ops::UnaryOp(std::string(kRetvalOp), output_node,
b.opts()
.WithName("dataset")
.WithAttr("T", DT_VARIANT)
.WithAttr("index", 0));
TF_RETURN_IF_ERROR(b.ToGraphDef(graph_def));
return absl::OkStatus();
}
absl::StatusOr<absl::flat_hash_map<std::string, int64_t>> CheckpointStats(
const std::string& checkpoint_bytes) {
TensorProto proto;
if (!ParseProtoUnlimited(&proto, checkpoint_bytes)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint bytes into proto.");
}
Tensor t;
if (!t.FromProto(proto)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint tensor from proto.");
}
auto variant = t.scalar<Variant>()();
auto* w = variant.get<IteratorStateVariant>();
if (!w) {
return absl::InvalidArgumentError(
"Failed to access IteratorStateVariant inside checkpoint tensor");
}
const VariantTensorData* data = w->GetData();
auto reader = std::make_unique<VariantTensorDataReader>(
std::vector<const VariantTensorData*>{data});
absl::flat_hash_map<std::string, int64_t> stats;
for (const auto& [key, tensor] : reader->ReadAllTensors()) {
stats[key] = tensor.TotalBytes();
}
return stats;
}
}
} | #include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/test_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
string full_name(string key) { return FullName("Iterator:", key); }
TEST(SerializationUtilsTest, CheckpointElementsRoundTrip) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int i = 0; i < elements.size(); ++i) {
std::vector<Tensor>& original = elements[i];
std::vector<Tensor>& read = read_elements[i];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtrip) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor(full_name("Tensor"), input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar(full_name("Int64"), &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor(full_name("Tensor"), &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKey) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_int64).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_string).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadTensor(full_name("NonExistentKey"), &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtripIteratorName) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar("Iterator", "Int64", 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor("Iterator", "Tensor", input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar("Iterator", "Int64", &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor("Iterator", "Tensor", &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKeyIteratorName) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_int64).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_string).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadTensor("Iterator", "NonExistentKey", &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataWriteAfterFlushing) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
EXPECT_EQ(error::FAILED_PRECONDITION,
writer.WriteTensor(full_name("Tensor"), input_tensor).code());
}
class ParameterizedIteratorStateVariantTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {
protected:
VariantTensorData GetVariantTensorData() const {
std::vector<Tensor> tensors = GetParam();
VariantTensorData data;
data.set_type_name(IteratorStateVariant::TypeName());
for (Tensor& tensor : tensors) {
*data.add_tensors() = std::move(tensor);
}
return data;
}
absl::StatusOr<VariantTensorData> EncodeAndDecode(
const VariantTensorData& data) const {
IteratorStateVariant encoder;
TF_RETURN_IF_ERROR(encoder.InitializeFromVariantData(
std::make_unique<VariantTensorData>(data)));
VariantTensorData encoded_data;
encoder.Encode(&encoded_data);
IteratorStateVariant decoder;
decoder.Decode(encoded_data);
return *decoder.GetData();
}
absl::StatusOr<VariantTensorData> DecodeUncompressed(
const VariantTensorData& data) const {
IteratorStateVariant decoder;
decoder.Decode(data);
return *decoder.GetData();
}
};
class ParemeterizedCheckpointIndicesTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<absl::flat_hash_set<int64_t>> {
protected:
absl::flat_hash_set<int64_t> GetCheckpointIndices() const {
absl::flat_hash_set<int64_t> checkpoint_indices = GetParam();
return checkpoint_indices;
}
};
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
};
}
std::vector<absl::flat_hash_set<int64_t>> CheckpointIndicesTestCases() {
return {
{},
{ 0},
{ 0, 1},
{ 0, 1, 2},
{ 1, 3, 4},
{ 1, 2, 3, 4},
{ 0, 1, 2, 3, 4},
};
}
TEST_P(ParameterizedIteratorStateVariantTest, EncodeAndDecode) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, EncodeAndDecode(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParameterizedIteratorStateVariantTest, DecodeUncompressed) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, DecodeUncompressed(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParemeterizedCheckpointIndicesTest,
CheckpointElementsRoundTripUsingIndices) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
elements.push_back(
CreateTensors<int32>(TensorShape({5}), {{6, 7, 8, 9, 10}}));
elements.push_back(
CreateTensors<int32>(TensorShape({4}), {{11, 12, 13, 14}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{15, 16}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
absl::flat_hash_set<int64_t> checkpoint_indices_write = {0, 1, 2, 3, 4};
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
for (auto index : GetCheckpointIndices()) {
elements.at(index) = CreateTensors<int32>(TensorShape({1}), {{1}});
}
TF_ASSERT_OK(UpdateCheckpointElements(&writer, test_prefix, elements,
GetCheckpointIndices()));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int index = 0; index < elements.size(); ++index) {
std::vector<Tensor>& original = elements[index];
std::vector<Tensor>& read = read_elements[index];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedIteratorStateVariantTest,
::testing::ValuesIn(TestCases()));
INSTANTIATE_TEST_SUITE_P(Instantiation, ParemeterizedCheckpointIndicesTest,
::testing::ValuesIn(CheckpointIndicesTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/serialization_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/serialization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3867fb7c-4631-4b9d-84a9-d1c6629febeb | cpp | tensorflow/tensorflow | rewrite_utils | tensorflow/core/data/rewrite_utils.cc | tensorflow/core/data/rewrite_utils_test.cc | #include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/platform/refcount.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <algorithm>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptimizerName[] = "tf_data_meta_optimizer";
constexpr char kOptimizers[] = "optimizers";
constexpr char kOptimizerConfigs[] = "optimizer_configs";
void AddFakeSinks(FunctionDef* function_def) {
int counter = 0;
for (const auto& output : function_def->signature().output_arg()) {
NodeDef* node = function_def->add_node_def();
tensorflow::grappler::function_utils::SetUniqueFunctionNodeName(
strings::StrCat("FakeSink", counter++), function_def, node);
node->set_op("Identity");
node->add_input(function_def->ret().at(output.name()));
(*node->mutable_attr())["T"].set_type(output.type());
(*function_def->mutable_ret())[output.name()] =
strings::StrCat(node->name(), ":output:0");
}
}
void RemoveFakeSinks(FunctionDef* function_def) {
std::map<std::string, std::string> identity_map;
for (const auto& node : function_def->node_def()) {
if (node.op() == "Identity" && node.input_size() == 1) {
identity_map[node.name()] = node.input(0);
}
}
for (const auto& output_arg : function_def->signature().output_arg()) {
const std::string& tensor = function_def->ret().at(output_arg.name());
const std::string& output_node = tensor.substr(0, tensor.find(':'));
if (identity_map.find(output_node) != identity_map.end()) {
(*function_def->mutable_ret())[output_arg.name()] =
identity_map.at(output_node);
}
}
}
Status ApplyRewrites(OpKernelContext* ctx,
const std::function<RewriterConfig(void)> config_factory,
GraphDef* graph_def, string* dataset_node) {
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(graph_def, dataset_node, true);
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
tensorflow::ConfigProto config;
*config.mutable_graph_options()->mutable_rewrite_options() = config_factory();
TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer(
std::move(*grappler_item), config, ctx->device(), &cluster, graph_def));
for (auto& function_def : *graph_def->mutable_library()->mutable_function()) {
RemoveFakeSinks(&function_def);
}
return absl::OkStatus();
}
}
RewriterConfig CreateRewriterConfig(
const absl::flat_hash_set<tstring>& optimizations,
const absl::flat_hash_set<tstring>& optimizations_configs) {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(kOptimizerName);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
auto custom_optimizer = rewriter_config.add_custom_optimizers();
custom_optimizer->set_name(kOptimizerName);
auto* custom_optimizations_list =
(*custom_optimizer->mutable_parameter_map())[kOptimizers].mutable_list();
const auto& registered_optimizers =
grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
for (const auto& optimization : optimizations) {
if (std::find(registered_optimizers.begin(), registered_optimizers.end(),
optimization) != registered_optimizers.end()) {
custom_optimizations_list->add_s(optimization.data(),
optimization.size());
} else {
VLOG(1) << "Optimization " << optimization << " is not registered.";
}
}
auto* config_list =
(*custom_optimizer->mutable_parameter_map())[kOptimizerConfigs]
.mutable_list();
for (const auto& config : optimizations_configs) {
config_list->add_s(config.data(), config.size());
}
return rewriter_config;
}
Status RewriteDataset(OpKernelContext* ctx, const DatasetBase* input,
std::function<RewriterConfig(void)> config_factory,
bool record_fingerprint,
core::RefCountPtr<DatasetBase>* rewritten_input) {
std::vector<std::pair<string, Tensor>> input_list;
GraphDef graph_def;
string output_node;
TF_RETURN_IF_ERROR(
AsGraphDefForRewrite(ctx, input, &input_list, &graph_def, &output_node));
VLOG(3) << "Before graph rewrites: " << graph_def.DebugString();
TF_RETURN_IF_ERROR(
ApplyRewrites(ctx, config_factory, &graph_def, &output_node));
VLOG(3) << "After graph rewrites: " << graph_def.DebugString();
FunctionLibraryRuntime* flr = nullptr;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr;
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&lib_def, &pflr, &flr, true));
TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
GraphRunner graph_runner(flr->device());
TF_RETURN_IF_ERROR(
graph_runner.Run(&graph, flr, input_list, {output_node}, &outputs));
DatasetBase* rewritten_dataset;
TF_RETURN_IF_ERROR(
GetDatasetFromVariantTensor(outputs[0], &rewritten_dataset));
rewritten_dataset->Ref();
rewritten_input->reset(rewritten_dataset);
if (record_fingerprint) {
(*ctx->runner())([graph_def = std::move(graph_def),
lib_def = lib_def.release(),
input_list = std::move(input_list),
output_node = std::move(output_node)]() {
std::unique_ptr<FunctionLibraryDefinition> lib_def_owner(lib_def);
const NodeDef* node_def = nullptr;
for (const auto& node : graph_def.node()) {
if (node.name() == output_node) {
node_def = &node;
break;
}
}
if (node_def == nullptr) {
VLOG(3) << "Failed to find node: " << output_node;
return;
}
uint64 hash = 0;
Status s = HashNode(graph_def, *node_def, *lib_def, &hash);
if (!s.ok()) {
VLOG(3) << "Failed to hash graph: " << s;
return;
}
for (const auto& pair : input_list) {
hash = Hash64CombineUnordered(hash, Hash64(pair.first));
uint64 tensor_hash = 0;
Status s = HashTensor(pair.second, &tensor_hash);
if (s.ok()) {
hash = Hash64CombineUnordered(hash, tensor_hash);
} else {
VLOG(3) << "Failed to hash tensor: " << s;
}
}
string graph_hash =
strings::StrCat(strings::Hex(hash, strings::kZeroPad16));
metrics::RecordTFDataFingerprint(graph_hash);
});
}
return absl::OkStatus();
}
std::unique_ptr<tensorflow::grappler::GrapplerItem> GetGrapplerItem(
GraphDef* graph_def, std::string* dataset_node, bool add_fake_sinks,
bool apply_optimizations) {
NodeDef* node = graph_def->mutable_node()->Add();
tensorflow::grappler::graph_utils::SetUniqueGraphNodeName("Sink", graph_def,
node);
node->set_op("Identity");
node->add_input(*dataset_node);
(*node->mutable_attr())["T"].set_type(DT_VARIANT);
*dataset_node = node->name();
if (add_fake_sinks) {
for (auto& function_def :
*graph_def->mutable_library()->mutable_function()) {
AddFakeSinks(&function_def);
}
}
MetaGraphDef meta_graph_def;
(*meta_graph_def.mutable_graph_def()) = *graph_def;
CollectionDef collection_def;
auto node_list = collection_def.mutable_node_list();
node_list->add_value(*dataset_node);
(*meta_graph_def.mutable_collection_def())["train_op"] = collection_def;
tensorflow::grappler::ItemConfig item_config;
item_config.apply_optimizations = apply_optimizations;
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
tensorflow::grappler::GrapplerItemFromMetaGraphDef(
"graph", meta_graph_def, item_config);
grappler_item->optimization_options().optimize_function_library = false;
return grappler_item;
}
absl::flat_hash_set<tstring> SelectOptimizations(
const absl::flat_hash_set<string>& experiments,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default) {
absl::flat_hash_set<tstring> optimizations;
optimizations.insert(optimizations_enabled.begin(),
optimizations_enabled.end());
for (const auto& optimization : optimizations_default) {
if (!optimizations_disabled.contains(optimization)) {
optimizations.insert(optimization);
}
}
const auto& registered_optimizers =
grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
for (const auto& experiment : experiments) {
if (std::find(registered_optimizers.begin(), registered_optimizers.end(),
experiment) != registered_optimizers.end() &&
!optimizations_disabled.contains(experiment)) {
optimizations.insert(experiment);
}
}
return optimizations;
}
absl::StatusOr<std::string> GetDatasetNode(const GraphDef& graph_def) {
for (const auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
return node.input(0);
}
}
return errors::NotFound(
absl::Substitute("Dataset node for graph is not found:\n$0",
graph_def.ShortDebugString()));
}
absl::StatusOr<NodeDef> GetDatasetNodeDef(const GraphDef& graph_def) {
TF_ASSIGN_OR_RETURN(std::string dataset_node_name, GetDatasetNode(graph_def));
for (const auto& node : graph_def.node()) {
if (node.name() == dataset_node_name) {
return node;
}
}
return errors::NotFound(
absl::Substitute("Dataset node for graph is not found:\n$0",
graph_def.ShortDebugString()));
}
}
}
#endif | #include "tensorflow/core/data/rewrite_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::test::AsScalar;
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using ::testing::ElementsAre;
NodeDef GetMapNode(absl::string_view name, absl::string_view input_node_name,
absl::string_view function_name) {
return NDef(
name, "MapDataset", {std::string(input_node_name)},
{{"f", FunctionDefHelper::FunctionRef(std::string(function_name))},
{"Targuments", {}},
{"output_shapes", absl::Span<const TensorShape>{TensorShape()}},
{"output_types", absl::Span<const DataType>{DT_INT64}}});
}
FunctionDef XTimesX() {
return FunctionDefHelper::Create(
"XTimesX",
{"x: int64"},
{"y: int64"},
{},
{{{"y"}, "Mul", {"x", "x"}, {{"T", DT_INT64}}}},
{{"y", "y:z:0"}});
}
GraphDef GetRangeSquareDatasetDef(const int64_t range) {
return GDef(
{NDef("start", "Const", {},
{{"value", AsScalar<int64_t>(0)}, {"dtype", DT_INT64}}),
NDef("stop", "Const", {},
{{"value", AsScalar<int64_t>(range)}, {"dtype", DT_INT64}}),
NDef("step", "Const", {},
{{"value", AsScalar<int64_t>(1)}, {"dtype", DT_INT64}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{TensorShape()}},
{"output_types", absl::Span<const DataType>{DT_INT64}}}),
GetMapNode("map", "range", "XTimesX"),
NDef("dataset", "_Retval", {"map"},
{{"T", DT_VARIANT}, {"index", 0}})},
{XTimesX()});
}
TEST(GraphUtilTest, GetFetchNode) {
GraphDef graph = GetRangeSquareDatasetDef(10);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_node, GetDatasetNode(graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&graph, &dataset_node, false);
EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink"));
}
TEST(GraphUtilTest, GetFetchNodeDef) {
GraphDef graph = GetRangeSquareDatasetDef(10);
TF_ASSERT_OK_AND_ASSIGN(NodeDef dataset_nodedef, GetDatasetNodeDef(graph));
std::string dataset_node = dataset_nodedef.name();
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&graph, &dataset_node, false);
EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink"));
}
struct SelectOptimizationsTestCase {
absl::flat_hash_set<string> experiments;
absl::flat_hash_set<tstring> optimizations_enabled;
absl::flat_hash_set<tstring> optimizations_disabled;
absl::flat_hash_set<tstring> optimizations_default;
std::vector<string> expected;
};
class SelectOptimizationsTest
: public ::testing::TestWithParam<SelectOptimizationsTestCase> {};
TEST_P(SelectOptimizationsTest, DatasetUtils) {
const SelectOptimizationsTestCase test_case = GetParam();
auto optimizations = SelectOptimizations(
test_case.experiments, test_case.optimizations_enabled,
test_case.optimizations_disabled, test_case.optimizations_default);
EXPECT_THAT(std::vector<string>(optimizations.begin(), optimizations.end()),
::testing::UnorderedElementsAreArray(test_case.expected));
}
INSTANTIATE_TEST_SUITE_P(
Test, SelectOptimizationsTest,
::testing::Values(
SelectOptimizationsTestCase{
{}, {},
{}, {},
{}},
SelectOptimizationsTestCase{
{"map_and_batch_fusion"},
{"bar"},
{}, {"baz"},
{"map_and_batch_fusion", "bar", "baz"}},
SelectOptimizationsTestCase{
{"this_is_not_an_optimization"},
{"bar"},
{}, {"baz"},
{"bar", "baz"}},
SelectOptimizationsTestCase{{},
{"foo"},
{"baz"},
{"bar", "baz"},
{"foo", "bar"}},
SelectOptimizationsTestCase{
{"foo"}, {"bar"},
{"foo"},
{"baz"}, {"bar", "baz"}}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/rewrite_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/rewrite_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
16f36227-4093-4bab-99de-c06d8b586284 | cpp | tensorflow/tensorflow | compression_utils | tensorflow/core/data/compression_utils.cc | tensorflow/core/data/compression_utils_test.cc | #include "tensorflow/core/data/compression_utils.h"
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int kCompressedElementVersion = 0;
}
class Iov {
public:
explicit Iov(size_t size) : iov_(size), idx_(0), num_bytes_(0) {}
void Add(void* base, size_t len) {
iov_[idx_].iov_base = base;
iov_[idx_].iov_len = len;
num_bytes_ += len;
++idx_;
}
iovec* Data() { return iov_.data(); }
size_t NumBytes() const { return num_bytes_; }
size_t NumPieces() const { return iov_.size(); }
private:
std::vector<struct iovec> iov_;
size_t idx_;
size_t num_bytes_;
};
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out) {
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
std::vector<TensorProto> nonmemcpyable_components;
size_t total_nonmemcpyable_size = 0;
for (const auto& component : element) {
if (component.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += component.NumElements();
} else if (!DataTypeCanUseMemcpy(component.dtype())) {
nonmemcpyable_components.emplace_back();
component.AsProtoTensorContent(&nonmemcpyable_components.back());
total_nonmemcpyable_size +=
nonmemcpyable_components.back().ByteSizeLong();
}
}
Iov iov{element.size() + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
int nonmemcpyable_component_index = 0;
for (int i = 0; i < element.size(); ++i) {
const auto& component = element[i];
CompressedComponentMetadata* metadata =
out->mutable_component_metadata()->Add();
metadata->set_dtype(component.dtype());
component.shape().AsProto(metadata->mutable_tensor_shape());
if (DataTypeCanUseMemcpy(component.dtype())) {
const TensorBuffer* buffer = DMAHelper::buffer(&component);
if (buffer) {
iov.Add(buffer->data(), buffer->size());
metadata->add_uncompressed_bytes(buffer->size());
}
} else if (component.dtype() == DT_STRING) {
const auto& flats = component.unaligned_flat<tstring>();
for (int i = 0; i < flats.size(); ++i) {
iov.Add(const_cast<char*>(flats.data()[i].data()),
flats.data()[i].size());
metadata->add_uncompressed_bytes(flats.data()[i].size());
}
} else {
TensorProto& proto =
nonmemcpyable_components[nonmemcpyable_component_index++];
proto.SerializeToArray(nonmemcpyable_pos, proto.ByteSizeLong());
iov.Add(nonmemcpyable_pos, proto.ByteSizeLong());
nonmemcpyable_pos += proto.ByteSizeLong();
metadata->add_uncompressed_bytes(proto.ByteSizeLong());
}
}
if (iov.NumBytes() > kuint32max) {
return errors::OutOfRange("Encountered dataset element of size ",
iov.NumBytes(),
", exceeding the 4GB Snappy limit.");
}
if (!port::Snappy_CompressFromIOVec(iov.Data(), iov.NumBytes(),
out->mutable_data())) {
return errors::Internal("Failed to compress using snappy.");
}
out->set_version(kCompressedElementVersion);
VLOG(3) << "Compressed element from " << iov.NumBytes() << " bytes to "
<< out->data().size() << " bytes";
return absl::OkStatus();
}
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out) {
if (compressed.version() != kCompressedElementVersion) {
return errors::Internal("Unsupported compressed element version: ",
compressed.version());
}
int num_components = compressed.component_metadata_size();
out->clear();
out->reserve(num_components);
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
size_t total_nonmemcpyable_size = 0;
for (const auto& metadata : compressed.component_metadata()) {
if (metadata.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += metadata.uncompressed_bytes_size();
} else if (!DataTypeCanUseMemcpy(metadata.dtype())) {
total_nonmemcpyable_size += metadata.uncompressed_bytes(0);
}
}
Iov iov{num_components + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
for (const auto& metadata : compressed.component_metadata()) {
if (DataTypeCanUseMemcpy(metadata.dtype())) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
TensorBuffer* buffer = DMAHelper::buffer(&out->back());
if (buffer) {
iov.Add(buffer->data(), metadata.uncompressed_bytes(0));
}
} else if (metadata.dtype() == DT_STRING) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
const auto& flats = out->back().unaligned_flat<tstring>();
for (int i = 0; i < metadata.uncompressed_bytes_size(); ++i) {
flats.data()[i].resize(metadata.uncompressed_bytes(i));
iov.Add(flats.data()[i].mdata(), metadata.uncompressed_bytes(i));
}
} else {
out->emplace_back();
iov.Add(nonmemcpyable_pos, metadata.uncompressed_bytes(0));
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
const std::string& compressed_data = compressed.data();
size_t uncompressed_size;
if (!port::Snappy_GetUncompressedLength(
compressed_data.data(), compressed_data.size(), &uncompressed_size)) {
return errors::Internal(
"Could not get snappy uncompressed length. Compressed data size: ",
compressed_data.size());
}
if (uncompressed_size != static_cast<size_t>(iov.NumBytes())) {
return errors::Internal(
"Uncompressed size mismatch. Snappy expects ", uncompressed_size,
" whereas the tensor metadata suggests ", iov.NumBytes());
}
if (!port::Snappy_UncompressToIOVec(compressed_data.data(),
compressed_data.size(), iov.Data(),
iov.NumPieces())) {
return errors::Internal("Failed to perform snappy decompression.");
}
nonmemcpyable_pos = nonmemcpyable.mdata();
for (int i = 0; i < num_components; ++i) {
const CompressedComponentMetadata& metadata =
compressed.component_metadata(i);
if (!DataTypeCanUseMemcpy(metadata.dtype()) &&
metadata.dtype() != DT_STRING) {
TensorProto tp;
if (!tp.ParseFromString(
{nonmemcpyable_pos,
static_cast<size_t>(metadata.uncompressed_bytes(0))})) {
return errors::Internal("Could not parse TensorProto");
}
if (!out->at(i).FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
return absl::OkStatus();
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompressedElement,
"tensorflow.data.CompressedElement");
}
} | #include "tensorflow/core/data/compression_utils.h"
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CompressionUtilsTest, Exceeds4GB) {
std::vector<Tensor> element = {
CreateTensor<int64_t>(TensorShape{1024, 1024, 513})};
CompressedElement compressed;
EXPECT_THAT(CompressElement(element, &compressed),
StatusIs(error::OUT_OF_RANGE,
HasSubstr("exceeding the 4GB Snappy limit")));
}
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1, 2}, {"abc", "xyz"}),
CreateTensor<tstring>(TensorShape{2, 1}, {"ijk", "mnk"})},
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{1, 0})},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
{
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {1, 2, 3}),
CreateTensor<tstring>(TensorShape{}, {"abc"})}),
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {10, 11, 12}),
CreateTensor<tstring>(TensorShape{}, {"xyz"})}),
},
};
}
class ParameterizedCompressionUtilsTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {};
TEST_P(ParameterizedCompressionUtilsTest, RoundTrip) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
std::vector<Tensor> round_trip_element;
TF_ASSERT_OK(UncompressElement(compressed, &round_trip_element));
TF_EXPECT_OK(
ExpectEqual(element, round_trip_element, true));
}
TEST_P(ParameterizedCompressionUtilsTest, CompressedElementVersion) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
EXPECT_EQ(0, compressed.version());
}
TEST_P(ParameterizedCompressionUtilsTest, VersionMismatch) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
compressed.set_version(1);
std::vector<Tensor> round_trip_element;
EXPECT_THAT(UncompressElement(compressed, &round_trip_element),
StatusIs(error::INTERNAL));
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedCompressionUtilsTest,
::testing::ValuesIn(TestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/compression_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/compression_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce3f8a43-50fd-4413-8f1e-68a60566350c | cpp | tensorflow/tensorflow | name_utils | tensorflow/compiler/mlir/utils/name_utils.cc | tensorflow/core/data/name_utils_test.cc | #include "tensorflow/compiler/mlir/utils/name_utils.h"
#include <cctype>
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace {
bool IsLegalChar(char c, bool first_char) {
if (isalpha(c)) return true;
if (isdigit(c)) return true;
if (c == '.') return true;
if (c == '_') return true;
if (first_char) return false;
if (c == '/') return true;
if (c == '-') return true;
return false;
}
}
void LegalizeNodeName(std::string& name) {
if (name.empty()) return;
if (!IsLegalChar(name[0], true)) name[0] = '.';
for (char& c : llvm::drop_begin(name, 1))
if (!IsLegalChar(c, false)) c = '.';
}
std::string GetNameFromLoc(Location loc) {
llvm::SmallVector<llvm::StringRef, 8> loc_names;
llvm::SmallVector<Location, 8> locs;
locs.push_back(loc);
bool names_is_nonempty = false;
while (!locs.empty()) {
Location curr_loc = locs.pop_back_val();
if (auto name_loc = mlir::dyn_cast<NameLoc>(curr_loc)) {
auto name = name_loc.getName().strref().split('@').first;
if (!name.ends_with(":")) {
loc_names.push_back(name);
if (!name.empty()) names_is_nonempty = true;
}
continue;
} else if (auto call_loc = mlir::dyn_cast<CallSiteLoc>(curr_loc)) {
locs.push_back(call_loc.getCallee());
continue;
} else if (auto fused_loc = mlir::dyn_cast<FusedLoc>(curr_loc)) {
auto reversed_fused_locs = llvm::reverse(fused_loc.getLocations());
locs.append(reversed_fused_locs.begin(), reversed_fused_locs.end());
continue;
}
loc_names.push_back(llvm::StringRef());
}
if (names_is_nonempty)
return llvm::join(loc_names.begin(), loc_names.end(), ";");
return "";
}
} | #include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(DeviceNameUtils, ArgsToString) {
EXPECT_EQ(name_utils::ArgsToString({}), "");
EXPECT_EQ(name_utils::ArgsToString({"a"}), "(a)");
EXPECT_EQ(name_utils::ArgsToString({"1", "2", "3"}), "(1, 2, 3)");
}
TEST(NameUtilsTest, DatasetDebugString) {
EXPECT_EQ(name_utils::DatasetDebugString("Concatenate"),
"ConcatenateDatasetOp::Dataset");
name_utils::DatasetDebugStringParams range_params;
range_params.set_args(0, 10, 3);
EXPECT_EQ(name_utils::DatasetDebugString("Range", range_params),
"RangeDatasetOp(0, 10, 3)::Dataset");
name_utils::DatasetDebugStringParams shuffle_params;
shuffle_params.dataset_prefix = "FixedSeed";
shuffle_params.set_args(10, 1, 2);
EXPECT_EQ(name_utils::DatasetDebugString("Shuffle", shuffle_params),
"ShuffleDatasetOp(10, 1, 2)::FixedSeedDataset");
name_utils::DatasetDebugStringParams parallel_interleave_params;
parallel_interleave_params.op_version = 2;
EXPECT_EQ(name_utils::DatasetDebugString("ParallelInterleave",
parallel_interleave_params),
"ParallelInterleaveDatasetV2Op::Dataset");
}
TEST(NameUtilsTest, OpName) {
EXPECT_EQ(name_utils::OpName("Range"), "RangeDataset");
EXPECT_EQ(name_utils::OpName("Concatenate", name_utils::OpNameParams()),
"ConcatenateDataset");
name_utils::OpNameParams params;
params.op_version = 2;
EXPECT_EQ(name_utils::OpName("ParallelInterleave", params),
"ParallelInterleaveDatasetV2");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/utils/name_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/name_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c035c168-6335-49bb-a102-f0b941fe3711 | cpp | tensorflow/tensorflow | hash_utils | tensorflow/core/data/hash_utils.cc | tensorflow/core/data/hash_utils_test.cc | #include "tensorflow/core/data/hash_utils.h"
#include <array>
#include <memory>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
constexpr std::array<const char*, 3> kOpsWithSeed = {
"AnonymousRandomSeedGenerator",
"ShuffleDataset",
"ShuffleAndRepeatDataset"
};
constexpr char kSeedInputName[] = "seed";
constexpr char kSeed2InputName[] = "seed2";
constexpr char kSeedGeneratorInputName[] = "seed_generator";
template <std::size_t SIZE>
bool IsNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& op_types) {
for (const auto& type : op_types) {
if (MatchesAnyVersion(type, node.op())) {
return true;
}
}
return false;
}
Status GetSink(const GraphDef& graph_def, const NodeDef** sink) {
for (auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
*sink = &node;
break;
}
}
if (sink == nullptr) {
return errors::Internal("Cannot find sink node for dataset graph.");
}
return absl::OkStatus();
}
Status ShouldIgnoreInput(const NodeDef& node, int i, bool* result) {
*result = false;
if (IsNodeOfType(node, kOpsWithSeed)) {
const OpRegistrationData* reg;
auto status = OpRegistry::Global()->LookUp(node.op(), ®);
if (status.ok()) {
if (reg->op_def.input_arg_size() > i) {
const std::string input_arg_name = reg->op_def.input_arg(i).name();
if (input_arg_name == kSeedInputName ||
input_arg_name == kSeed2InputName ||
input_arg_name == kSeedGeneratorInputName) {
VLOG(2) << "Ignoring arg: " << input_arg_name
<< " from node: " << node.name();
*result = true;
return absl::OkStatus();
}
}
} else if (errors::IsNotFound(status)) {
LOG(WARNING) << "Cannot find " << node.op()
<< " in global op registry, so cannot determine which "
"inputs are seeds.";
} else {
return status;
}
}
return absl::OkStatus();
}
Status ParseInputNodeName(absl::string_view input_name,
absl::string_view* node_name,
absl::string_view* suffix, bool* is_control_input) {
if (input_name[0] == '^') {
*node_name = input_name.substr(1);
*is_control_input = true;
return absl::OkStatus();
}
std::pair<absl::string_view, absl::string_view> node_spec =
absl::StrSplit(input_name, absl::MaxSplits(':', 1));
*node_name = node_spec.first;
*suffix = node_spec.second;
*is_control_input = false;
return absl::OkStatus();
}
class GraphHasher {
using NodeCache = absl::flat_hash_map<const NodeDef*, uint64>;
using FunctionCache = absl::flat_hash_map<const FunctionDef*, uint64>;
using AttrCache =
absl::flat_hash_map<std::pair<const NodeDef*, bool>, uint64>;
public:
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib)
: graph_(graph), root_(root), flib_(flib) {
node_cache_ = std::make_shared<NodeCache>();
function_cache_ = std::make_shared<FunctionCache>();
attr_cache_ = std::make_shared<AttrCache>();
}
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib,
std::shared_ptr<NodeCache> node_cache,
std::shared_ptr<FunctionCache> function_cache,
std::shared_ptr<AttrCache> attr_cache)
: graph_(graph),
root_(root),
flib_(flib),
node_cache_(node_cache),
function_cache_(function_cache),
attr_cache_(attr_cache) {}
Status Init() {
absl::flat_hash_map<absl::string_view, const NodeDef*> node_def_by_name;
node_def_by_name.reserve(graph_->node_size());
for (const auto& node : graph_->node()) {
auto result = node_def_by_name.emplace(node.name(), &node);
if (TF_PREDICT_FALSE(!result.second)) {
auto node_name_formatter =
[](std::string* out,
const decltype(node_def_by_name)::value_type& item) {
absl::StrAppend(out, "'", item.first, "'");
};
return errors::Internal(
"Encountered graph with duplicate node name '", node.name(),
"' in [", absl::StrJoin(node_def_by_name, ",", node_name_formatter),
"]");
}
}
absl::flat_hash_set<absl::string_view> visited;
std::queue<const NodeDef*> bfs_queue;
bfs_queue.push(root_);
while (!bfs_queue.empty()) {
const NodeDef* node = bfs_queue.front();
bfs_queue.pop();
if (visited.contains(node->name())) {
continue;
}
visited.insert(node->name());
NodeRep node_rep;
for (int i = 0; i < node->input_size(); ++i) {
DCHECK_GT(node->input(i).length(), 0);
bool should_ignore_input = false;
TF_RETURN_IF_ERROR(ShouldIgnoreInput(*node, i, &should_ignore_input));
if (should_ignore_input) continue;
absl::string_view node_name, suffix;
bool is_control_input;
TF_RETURN_IF_ERROR(ParseInputNodeName(node->input(i), &node_name,
&suffix, &is_control_input));
auto* input_node = gtl::FindPtrOrNull(node_def_by_name, node_name);
if (input_node == nullptr) {
return errors::Internal("Graph node [", node->name(), "] has input [",
node_name, "] that doesn't exist in graph");
}
if (visited.contains(node_name)) {
EdgeRep cycle_edge(node, input_node);
cycle_forming_edges_.insert(cycle_edge.GetHash());
continue;
}
if (is_control_input) {
node_rep.node_control_inputs.push_back(input_node);
} else {
node_rep.node_inputs.push_back(std::make_pair(input_node, suffix));
bfs_queue.push(input_node);
}
}
nodes_[node] = node_rep;
}
return absl::OkStatus();
}
Status HashRoot(uint64* hash) { return HashNode(root_, hash); }
Status CheckEqual(GraphHasher* that) {
return CheckNodesEqual(root_, that, that->root_);
}
private:
Status HashNode(const NodeDef* node, uint64* hash) {
auto it = node_cache_->find(node);
if (it != node_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
NodeRep* node_rep = gtl::FindOrNull(nodes_, node);
if (node_rep == nullptr) {
return errors::InvalidArgument("Could not find node: ", node->name());
}
uint64 non_input_hash;
TF_RETURN_IF_ERROR(
HashNodeNonInput(node, true, &non_input_hash));
uint64 control_inputs_hash;
TF_RETURN_IF_ERROR(
HashControlInputs(node_rep->node_control_inputs, &control_inputs_hash));
uint64 inputs_hash = 0;
for (const auto& input : node_rep->node_inputs) {
uint64 node_hash = 0;
EdgeRep edge(node, input.first);
if (cycle_forming_edges_.contains(edge.GetHash())) {
TF_RETURN_IF_ERROR(
HashNodeNonInput(input.first, true, &node_hash));
} else {
TF_RETURN_IF_ERROR(HashNode(input.first, &node_hash));
}
inputs_hash = Hash64Combine(
inputs_hash, Hash64Combine(node_hash, Hash64(input.second.data(),
input.second.size())));
}
*hash = Hash64Combine(non_input_hash,
Hash64Combine(control_inputs_hash, inputs_hash));
auto result = node_cache_->emplace(node, *hash);
if (!result.second) {
return errors::Internal(absl::StrCat("Computed the hash for node ",
node->DebugString(), " twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqual(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
Status s = CheckNodesEqualHelper(this_node, that, that_node);
if (!s.ok()) {
return errors::FailedPrecondition("Nodes ", this_node->name(), " and ",
that_node->name(),
" are not the same:\n", s);
}
return s;
}
Status CheckNodesEqualHelper(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_node, that, that_node,
true));
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(nodes_[this_node].node_control_inputs, that,
that->nodes_[that_node].node_control_inputs));
auto& this_node_inputs = nodes_[this_node].node_inputs;
auto& that_node_inputs = that->nodes_[that_node].node_inputs;
if (this_node_inputs.size() != that_node_inputs.size()) {
return errors::FailedPrecondition(
"Nodes have different numbers of node inputs: ",
this_node_inputs.size(), " vs ", that_node_inputs.size());
}
for (int i = 0; i < this_node_inputs.size(); ++i) {
const NodeDef* this_input = this_node_inputs[i].first;
const NodeDef* that_input = that_node_inputs[i].first;
if (is_cycle_forming_edge(this_node, this_input)) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_input, that, that_input,
true));
} else {
TF_RETURN_IF_ERROR(CheckNodesEqual(this_input, that, that_input));
}
absl::string_view this_input_suffix = this_node_inputs[i].second;
absl::string_view that_input_suffix = that_node_inputs[i].second;
if (this_input_suffix != that_input_suffix) {
return errors::FailedPrecondition(
"Node inputs ", this_input->name(), " and ", that_input->name(),
" have different suffixes: ", this_input_suffix, " vs ",
that_input_suffix);
}
}
return absl::OkStatus();
}
Status HashNodeNonInput(const NodeDef* node, bool hash_functions,
uint64* hash) {
auto iter = attr_cache_->find(std::make_pair(node, hash_functions));
if (iter != attr_cache_->end()) {
*hash = iter->second;
return absl::OkStatus();
}
uint64 attrs_hash = 0;
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(node->op(), ®));
uint64 op_hash = 0;
if (reg->is_function_op) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(node->op(), node->attr(), &op_hash));
}
} else {
op_hash = Hash64(node->op());
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
if (DatasetOpKernel::IsDatasetOp(reg->op_def) && attr_key == "metadata")
continue;
auto node_attr_iter = node->attr().find(attr_key);
if (node_attr_iter == node->attr().end()) {
continue;
}
const auto& attr_value = node_attr_iter->second;
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
uint64 attr_hash = 0;
TF_RETURN_IF_ERROR(
HashAttr(attr_key, attr_value, hash_functions, &attr_hash));
attrs_hash = Hash64Combine(attrs_hash, attr_hash);
}
uint64 device_hash = Hash64(node->device());
*hash = Hash64Combine(op_hash, Hash64Combine(attrs_hash, device_hash));
auto result =
attr_cache_->emplace(std::make_pair(node, hash_functions), *hash);
if (!result.second) {
return errors::Internal(absl::StrCat(
"Computed the hash for non-input node: ", node->DebugString(),
" and hash function bool: ", hash_functions, "twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqualNonInput(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node,
bool compare_functions) {
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(this_node->op(), ®));
if (reg->is_function_op) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_node->op(), this_node->attr(), that,
that_node->op(), that_node->attr()));
}
} else {
if (this_node->op() != that_node->op()) {
return errors::FailedPrecondition(
"ops for nodes ", this_node->name(), " and ", that_node->name(),
" are different: ", this_node->op(), " != ", that_node->op());
}
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
const bool this_has_attr = this_node->attr().contains(attr_key);
const bool that_has_attr = that_node->attr().contains(attr_key);
if (this_has_attr != that_has_attr) {
return errors::FailedPrecondition(
"attr with key ", attr_key, " is different for nodes ",
this_node->name(), " and ", that_node->name(),
". Present in former: ", this_has_attr,
". Present in latter: ", that_has_attr);
}
if (!this_has_attr) {
continue;
}
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
const auto& this_attr = this_node->attr().at(attr_key);
const auto& that_attr = that_node->attr().at(attr_key);
TF_RETURN_IF_ERROR(CheckAttrsEqual(attr_key, this_attr, that, that_attr,
compare_functions));
}
if (this_node->device() != that_node->device()) {
return errors::FailedPrecondition(
"Devices are different for nodes ", this_node->name(), " and ",
that_node->name(), ": ", this_node->device(), " vs ",
that_node->device());
}
return absl::OkStatus();
}
Status HashAttr(const std::string& attr_name, const AttrValue& attr_value,
bool hash_functions, uint64* hash) {
uint64 value_hash = 0;
if (attr_value.has_func()) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(attr_value.func(), &value_hash));
}
} else if (attr_value.has_list() && attr_value.list().func_size() > 0) {
if (hash_functions) {
for (auto& func : attr_value.list().func()) {
uint64 func_hash;
TF_RETURN_IF_ERROR(HashFunction(func, &func_hash));
value_hash = Hash64Combine(value_hash, func_hash);
}
}
} else {
value_hash = DeterministicProtoHash64(attr_value);
}
*hash = Hash64Combine(Hash64(attr_name), value_hash);
return absl::OkStatus();
}
Status CheckAttrsEqual(const std::string& attr_name,
const AttrValue& this_attr, GraphHasher* that,
const AttrValue& that_attr, bool compare_functions) {
if (this_attr.has_func() != that_attr.has_func()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_func()) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_attr.func(), that, that_attr.func()));
}
return absl::OkStatus();
}
if (this_attr.has_list() != that_attr.has_list()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_list()) {
if (this_attr.list().func_size() != that_attr.list().func_size()) {
return errors::FailedPrecondition(
"AttrValues have func lists of different sizes: ",
this_attr.DebugString(), " vs ", that_attr.DebugString());
}
if (compare_functions) {
for (int i = 0; i < this_attr.list().func_size(); ++i) {
TF_RETURN_IF_ERROR(CheckFunctionsEqual(this_attr.list().func(i), that,
that_attr.list().func(i)));
}
}
return absl::OkStatus();
}
uint64 this_hash, that_hash;
TF_RETURN_IF_ERROR(
HashAttr(attr_name, this_attr, true, &this_hash));
TF_RETURN_IF_ERROR(that->HashAttr(attr_name, that_attr,
true, &that_hash));
if (this_hash != that_hash) {
return errors::FailedPrecondition(
"AttrValues are different: ", this_attr.DebugString(), " vs ",
that_attr.DebugString());
}
return absl::OkStatus();
}
Status HashFunction(const NameAttrList& func, uint64* hash) {
return HashFunction(func.name(), func.attr(), hash);
}
Status HashFunction(const std::string& name, const AttrValueMap& attrs,
uint64* hash) {
const FunctionDef* fdef = flib_->Find(name);
auto it = function_cache_->find(fdef);
if (it != function_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*fdef, AttrSlice(&attrs), flib_, &fbody));
GraphDef graph_def = fbody->graph->ToGraphDefDebug();
uint64 ret_nodes_hash = 0;
for (const auto& ret_node : fbody->ret_nodes) {
uint64 ret_node_hash = 0;
GraphHasher hasher(&graph_def, &ret_node->def(), flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(hasher.Init());
TF_RETURN_IF_ERROR(hasher.HashRoot(&ret_node_hash));
ret_nodes_hash = Hash64Combine(ret_nodes_hash, ret_node_hash);
}
std::vector<const NodeDef*> control_rets;
control_rets.reserve(fbody->control_ret_nodes.size());
for (const auto& control_ret_node : fbody->control_ret_nodes) {
control_rets.push_back(&control_ret_node->def());
}
uint64 control_ret_nodes_hash = 0;
TF_RETURN_IF_ERROR(
HashControlInputs(control_rets, &control_ret_nodes_hash));
*hash = Hash64Combine(ret_nodes_hash, control_ret_nodes_hash);
auto result = function_cache_->emplace(fdef, *hash);
if (!result.second) {
return errors::Internal(
absl::StrCat("Computed the hash for function ", name, " twice!"));
}
return absl::OkStatus();
}
Status CheckFunctionsEqual(const NameAttrList& this_func, GraphHasher* that,
const NameAttrList& that_func) {
return CheckFunctionsEqual(this_func.name(), this_func.attr(), that,
that_func.name(), that_func.attr());
}
Status CheckFunctionsEqual(const std::string& this_name,
const AttrValueMap& this_attrs, GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
Status s = CheckFunctionsEqualHelper(this_name, this_attrs, that, that_name,
that_attrs);
if (!s.ok()) {
return errors::FailedPrecondition("Functions ", this_name, " and ",
that_name, " are not the same:\n", s);
}
return s;
}
Status CheckFunctionsEqualHelper(const std::string& this_name,
const AttrValueMap& this_attrs,
GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
const FunctionDef* this_fdef = flib_->Find(this_name);
const FunctionDef* that_fdef = that->flib_->Find(that_name);
std::unique_ptr<FunctionBody> this_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*this_fdef, AttrSlice(&this_attrs), flib_, &this_fbody));
GraphDef this_graph_def = this_fbody->graph->ToGraphDefDebug();
std::unique_ptr<FunctionBody> that_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*that_fdef, AttrSlice(&that_attrs), that->flib_, &that_fbody));
GraphDef that_graph_def = that_fbody->graph->ToGraphDefDebug();
if (this_fbody->ret_nodes.size() != that_fbody->ret_nodes.size()) {
return errors::FailedPrecondition(
"Different numbers of ret nodes for functions ", this_name, " and ",
that_name, ": ", this_fbody->ret_nodes.size(), " vs ",
that_fbody->ret_nodes.size());
}
for (int i = 0; i < this_fbody->ret_nodes.size(); ++i) {
const NodeDef* this_root = &this_fbody->ret_nodes[i]->def();
const NodeDef* that_root = &that_fbody->ret_nodes[i]->def();
GraphHasher this_hasher(&this_graph_def, this_root, flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(this_hasher.Init());
GraphHasher that_hasher(&that_graph_def, that_root, that->flib_,
node_cache_, function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(that_hasher.Init());
TF_RETURN_IF_ERROR(this_hasher.CheckEqual(&that_hasher));
}
std::vector<const NodeDef*> this_control_rets;
this_control_rets.reserve(this_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : this_fbody->control_ret_nodes) {
this_control_rets.push_back(&control_ret_node->def());
}
std::vector<const NodeDef*> that_control_rets;
that_control_rets.reserve(that_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : that_fbody->control_ret_nodes) {
that_control_rets.push_back(&control_ret_node->def());
}
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(this_control_rets, that, that_control_rets));
return absl::OkStatus();
}
Status HashControlInputs(const std::vector<const NodeDef*>& inputs,
uint64* hash) {
*hash = 0;
for (const NodeDef* input : inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
*hash = Hash64CombineUnordered(*hash, node_hash);
}
return absl::OkStatus();
}
Status CheckControlInputsEqual(
const std::vector<const NodeDef*>& this_inputs, GraphHasher* that,
const std::vector<const NodeDef*>& that_inputs) {
absl::flat_hash_map<uint64, const NodeDef*> this_hashes;
for (const NodeDef* input : this_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
this_hashes[node_hash] = input;
}
absl::flat_hash_map<uint64, const NodeDef*> that_hashes;
for (const NodeDef* input : that_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
auto this_iter = this_hashes.find(node_hash);
if (this_iter != this_hashes.end()) {
this_hashes.erase(this_iter);
} else {
that_hashes[node_hash] = input;
}
}
if (!this_hashes.empty()) {
auto formatter = [](string* out,
const decltype(this_hashes)::value_type& item) {
out->append(item.second->name());
};
return errors::FailedPrecondition(
"Control dependencies are different. One node has dependencies [",
absl::StrJoin(this_hashes, ", ", formatter),
"], which don't match any of the other node's dependencies [",
absl::StrJoin(that_hashes, ", ", formatter), "]");
}
return absl::OkStatus();
}
private:
bool is_cycle_forming_edge(const NodeDef* start, const NodeDef* end) {
EdgeRep edge(start, end);
return cycle_forming_edges_.contains(edge.GetHash());
}
struct NodeRep {
std::vector<const NodeDef*> node_control_inputs;
std::vector<std::pair<const NodeDef*, absl::string_view>> node_inputs;
};
struct EdgeRep {
const NodeDef* start_node;
const NodeDef* end_node;
EdgeRep(const NodeDef* start, const NodeDef* end)
: start_node(start), end_node(end) {}
uint64 GetHash() {
return Hash64Combine(absl::Hash<const NodeDef*>()(start_node),
absl::Hash<const NodeDef*>()(end_node));
}
};
const GraphDef* const graph_;
const NodeDef* const root_;
const FunctionLibraryDefinition* const flib_;
absl::flat_hash_set<uint64> cycle_forming_edges_;
absl::flat_hash_map<const NodeDef*, NodeRep> nodes_;
std::shared_ptr<NodeCache> node_cache_;
std::shared_ptr<FunctionCache> function_cache_;
std::shared_ptr<AttrCache> attr_cache_;
};
}
Status HashTensor(const Tensor& tensor, uint64* hash) {
const tstring* s = nullptr;
*hash = Hash64Combine(0, tensor.dtype());
for (int i = 0; i < tensor.shape().dims(); ++i) {
*hash = Hash64Combine(*hash, tensor.shape().dim_size(i));
}
switch (tensor.dtype()) {
case DT_RESOURCE:
case DT_VARIANT:
return errors::Unimplemented("Hashing ", DataTypeString(tensor.dtype()),
" is not supported.");
case DT_STRING:
s = tensor.flat<tstring>().data();
for (int i = 0; i < tensor.NumElements(); ++i, ++s) {
*hash = Hash64Combine(*hash, Hash64(s->data(), s->size()));
}
break;
default:
*hash = Hash64(tensor.tensor_data().data(), tensor.tensor_data().size());
}
return absl::OkStatus();
}
Status HashNode(const GraphDef& graph, const NodeDef& node, uint64* hash) {
const FunctionLibraryDefinition flib_def(OpRegistry::Global(),
graph.library());
return HashNode(graph, node, flib_def, hash);
}
Status HashNode(const GraphDef& graph, const NodeDef& node,
const FunctionLibraryDefinition& flib_def, uint64* hash) {
GraphHasher hasher(&graph, &node, &flib_def);
TF_RETURN_IF_ERROR(hasher.Init());
return hasher.HashRoot(hash);
}
Status HashGraph(const GraphDef& graph_def, uint64* hash) {
const NodeDef* sink = nullptr;
TF_RETURN_IF_ERROR(GetSink(graph_def, &sink));
return HashNode(graph_def, *sink, hash);
}
Status CheckGraphsEqual(const GraphDef& a, const GraphDef& b) {
const NodeDef* sink_a;
TF_RETURN_IF_ERROR(GetSink(a, &sink_a));
const NodeDef* sink_b;
TF_RETURN_IF_ERROR(GetSink(b, &sink_b));
return CheckSubgraphsEqual(a, sink_a, b, sink_b);
}
Status CheckSubgraphsEqual(const GraphDef& a, const NodeDef* node_a,
const GraphDef& b, const NodeDef* node_b) {
const FunctionLibraryDefinition flib_def_a(OpRegistry::Global(), a.library());
GraphHasher hasher_a(&a, node_a, &flib_def_a);
TF_RETURN_IF_ERROR(hasher_a.Init());
const FunctionLibraryDefinition flib_def_b(OpRegistry::Global(), b.library());
GraphHasher hasher_b(&b, node_b, &flib_def_b);
TF_RETURN_IF_ERROR(hasher_b.Init());
return hasher_a.CheckEqual(&hasher_b);
}
}
} | #include "tensorflow/core/data/hash_utils.h"
#include <utility>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ContainsRegex;
class DatasetHashUtilsTest : public ::testing::Test {
protected:
uint64 GetHash(const FunctionDefLibrary& library, const FunctionDef& fn) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node = graph_def.add_node();
node->set_op("RemoteCall");
NameAttrList func;
func.set_name(fn.signature().name());
AddNodeAttr("f", func, node);
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph_def, *node, &hash));
return hash;
}
Status CheckEqual(const FunctionDefLibrary& library, const FunctionDef& fn1,
const FunctionDef& fn2) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node1 = graph_def.add_node();
node1->set_name("RemoteCall");
node1->set_op("RemoteCall");
NameAttrList func1;
func1.set_name(fn1.signature().name());
AddNodeAttr("f", func1, node1);
NodeDef* node2 = graph_def.add_node();
node1->set_name("RemoteCall2");
node2->set_op("RemoteCall");
NameAttrList func2;
func2.set_name(fn2.signature().name());
AddNodeAttr("f", func2, node2);
return CheckSubgraphsEqual(graph_def, node1, graph_def, node2);
}
uint64 GetHash(const GraphDef& graph, const NodeDef& node) {
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph, node, &hash));
return hash;
}
uint64 GetHash(const Tensor& tensor) {
uint64 hash = 0;
TF_CHECK_OK(HashTensor(tensor, &hash));
return hash;
}
};
TEST_F(DatasetHashUtilsTest, HashFunctionSameFunctionDifferentNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentFunctions) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndAdd", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_NE(GetHash(fl, *f1), GetHash(fl, *f2));
Status s = CheckEqual(fl, *f1, *f2);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentInternalNodeNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float", "j: float", "k: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "j"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"add:z:0", "k"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"a: float", "b: float", "c: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"mul"}, "Mul", {"add:z:0", "c"}, {{"T", DT_FLOAT}}}},
{{"o", "mul:z:0"}},
{{"must_execute", "mul"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashGraphWithMultipleCycles) {
uint64 hash = 0;
for (int i = 0; i < 1000; ++i) {
GraphDef g;
NodeDef* output_node = g.add_node();
TF_CHECK_OK(NodeDefBuilder("O", "Add")
.Input("A", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(output_node));
TF_CHECK_OK(NodeDefBuilder("A", "Abs")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("B", "Add")
.Input("C", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("C", "Ceil")
.Input("A", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("D", "Cos")
.Input("E", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("E", "Floor")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
uint64 t = GetHash(g, *output_node);
if (hash == 0) {
hash = t;
} else {
EXPECT_EQ(t, hash);
}
}
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n4->name(), 0, DT_INT32)
.Input(n5->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentGraphs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Mul")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
EXPECT_THAT(s.message(), ContainsRegex("Mul"));
}
TEST_F(DatasetHashUtilsTest, HashSameGraphDifferentSeeds) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed", "Const")
.Attr("value", 123)
.Device("CPU:0")
.Finalize(seed));
NodeDef* seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed2", "Const")
.Attr("value", 456)
.Device("CPU:0")
.Finalize(seed2));
NodeDef* range_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds));
NodeDef* shuffle_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle", "ShuffleDataset")
.Input(range_ds->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(seed->name(), 0, DT_INT64)
.Input(seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds));
NodeDef* different_seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed", "Const")
.Attr("value", 789)
.Device("CPU:0")
.Finalize(different_seed));
NodeDef* different_seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed2", "Const")
.Attr("value", 654)
.Device("CPU:0")
.Finalize(different_seed2));
NodeDef* range_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range_2", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds_2));
NodeDef* shuffle_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle_2", "ShuffleDataset")
.Input(range_ds_2->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(different_seed->name(), 0, DT_INT64)
.Input(different_seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds_2));
uint64 hash1 = GetHash(gd, *shuffle_ds);
uint64 hash2 = GetHash(gd, *shuffle_ds_2);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, shuffle_ds, gd, shuffle_ds_2));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentColocationNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_1/node_2"})
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_3/node_9"})
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeReversedOrder) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n2->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("AttrValues are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeInputPortChanged) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n1->name(), 1, DT_INT32)
.Input(n2->name(), 2, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Node inputs"));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionListsDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctions) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionLists) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentControlInputs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n4, gd, n5);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Control dependencies are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeControlInputDifferentOrdering) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.ControlInput(n3->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.ControlInput(n2->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n4, gd, n5));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentGraphSamePartialGraph) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n1);
n3->Clear();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Mul")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
uint64 hash2 = GetHash(gd, *n1);
EXPECT_EQ(hash1, hash2);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithManyControlDependencies) {
GraphDef gd;
NodeDef* n;
for (int i = 0; i < 1000; ++i) {
n = gd.add_node();
NodeDefBuilder ndb(absl::StrCat("graph_1/node_", i), "Const");
ndb.Attr("value", 1);
ndb.Device("CPU:0");
for (int j = 0; j < i; ++j) {
ndb.ControlInput(absl::StrCat("graph_1/node_", j));
}
TF_CHECK_OK(ndb.Finalize(n));
}
GetHash(gd, *n);
}
TEST_F(DatasetHashUtilsTest, HashFunctionsWithControlDependencyLoop) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
std::pair<string, FunctionDefHelper::AttrValueWrapper> func_attr = {
"body", FunctionDefHelper::AttrValueWrapper(*nal1)};
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul",
{"i: float", "j: int32"},
{"o: float"},
{},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}, {"ret"}},
{{"for"}, "For", {"j", "j", "j"}, {func_attr, {"T", DT_FLOAT}}, {"ret"}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.ControlInput("graph_1/node_2")
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
GetHash(gd, *n2);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithControlDependencyLoop) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_2")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_1")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.ControlInput("graph_1/node_1")
.ControlInput("graph_1/node_2")
.Finalize(n3));
GetHash(gd, *n3);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithControlDependencyLoopDifferentNames) {
GraphDef gd1;
NodeDef* n1 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_2")
.Finalize(n1));
NodeDef* n2 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_1")
.Finalize(n2));
NodeDef* n3 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.ControlInput("graph_1/node_1")
.ControlInput("graph_1/node_2")
.Finalize(n3));
GraphDef gd2;
NodeDef* n4 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_5")
.Finalize(n4));
NodeDef* n5 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_4")
.Finalize(n5));
NodeDef* n6 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_6", "Add")
.Device("CPU:0")
.Input(n4->name(), 0, DT_INT32)
.Input(n5->name(), 0, DT_INT32)
.ControlInput("graph_1/node_4")
.ControlInput("graph_1/node_5")
.Finalize(n6));
EXPECT_EQ(GetHash(gd1, *n3), GetHash(gd2, *n6));
}
TEST_F(DatasetHashUtilsTest, HashInt32Tensor) {
Tensor s1(42);
Tensor s2(42);
Tensor s3(43);
EXPECT_EQ(GetHash(s1), GetHash(s2));
EXPECT_NE(GetHash(s1), GetHash(s3));
Tensor v1(DT_INT32, TensorShape({2}));
v1.vec<int32>()(0) = 0;
v1.vec<int32>()(1) = 1;
Tensor v2(DT_INT32, TensorShape({2}));
v2.vec<int32>()(0) = 0;
v2.vec<int32>()(1) = 1;
Tensor v3(DT_INT32, TensorShape({2}));
v3.vec<int32>()(0) = 0;
v3.vec<int32>()(1) = 2;
EXPECT_EQ(GetHash(v1), GetHash(v2));
EXPECT_NE(GetHash(v1), GetHash(v3));
}
TEST_F(DatasetHashUtilsTest, HashStringTensor) {
Tensor s1("hello");
Tensor s2("hello");
Tensor s3("world");
EXPECT_EQ(GetHash(s1), GetHash(s2));
EXPECT_NE(GetHash(s1), GetHash(s3));
Tensor v1(DT_STRING, TensorShape({2}));
v1.vec<tstring>()(0) = "hello";
v1.vec<tstring>()(1) = "world";
Tensor v2(DT_STRING, TensorShape({2}));
v2.vec<tstring>()(0) = "hello";
v2.vec<tstring>()(1) = "world";
Tensor v3(DT_STRING, TensorShape({2}));
v3.vec<tstring>()(0) = "hello";
v3.vec<tstring>()(1) = "universe";
EXPECT_EQ(GetHash(v1), GetHash(v2));
EXPECT_NE(GetHash(v1), GetHash(v3));
}
static void BM_ParallelFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
NodeDef* target = graph_def.add_node();
target->set_name("Target");
target->set_op("NoOp");
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 100; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(absl::StrCat("PartitionedCall_", i));
node->set_op("PartitionedCall");
*node->add_input() = input->name();
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
*target->add_input() = absl::StrCat("^", node->name());
}
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, *target, &hash_value));
}
}
BENCHMARK(BM_ParallelFunctionCallsGraph);
static void BM_ChainedFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 100; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(absl::StrCat("PartitionedCall_", i));
node->set_op("PartitionedCall");
if (i > 0) {
*node->add_input() = absl::StrCat("PartitionedCall_", i - 1);
} else {
*node->add_input() = input->name();
}
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
}
const NodeDef& target = graph_def.node(graph_def.node_size() - 1);
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, target, &hash_value));
}
}
BENCHMARK(BM_ChainedFunctionCallsGraph);
static void BM_ComposedFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 99; ++i) {
NameAttrList func;
func.set_name(fd->signature().name());
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
absl::StrCat("F_", i),
{"i: float"},
{"o: float"},
{},
{
{
{"inner_call"},
"PartitionedCall",
{"i"},
{{"Ti", DT_FLOAT},
{"Tout", DT_FLOAT},
{"config", ""},
{"config_proto", config_pb.SerializeAsString()},
{"f", func}},
},
},
{{"o", "inner_call:o:0"}},
{{"must_execute", "inner_call"}});
}
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
NodeDef* node = graph_def.add_node();
node->set_name("PartitionedCall_start");
node->set_op("PartitionedCall");
*node->add_input() = input->name();
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
const NodeDef& target = graph_def.node(graph_def.node_size() - 1);
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, target, &hash_value));
}
}
BENCHMARK(BM_ComposedFunctionCallsGraph);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/hash_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/hash_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
16d598a3-12f1-4153-9343-8f010008dc25 | cpp | tensorflow/tensorflow | metric_utils | tensorflow/core/data/metric_utils.cc | tensorflow/core/data/metric_utils_test.cc | #include "tensorflow/core/data/metric_utils.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/time/time.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace data {
namespace {
uint64_t safe_sub(uint64_t x, uint64_t y) { return x >= y ? x - y : 0; }
}
IteratorMetricsCollector::IteratorMetricsCollector(
const std::string& device_type, const Env& env)
: device_type_(device_type), env_(env) {}
absl::Time IteratorMetricsCollector::RecordStart() {
const uint64_t start_time_us = env_.NowMicros();
if (!ShouldCollectMetrics()) {
return absl::FromUnixMicros(start_time_us);
}
mutex_lock l(mu_);
if (end_time_us_ == 0) {
end_time_us_ = start_time_us;
}
uint64_t gap_time_us = 0;
if (num_active_calls_ == 0) {
first_start_time_us_ = start_time_us;
gap_time_us = safe_sub(start_time_us, end_time_us_);
}
metrics::RecordTFDataIteratorGap(gap_time_us);
num_active_calls_++;
return absl::FromUnixMicros(start_time_us);
}
void IteratorMetricsCollector::RecordStop(absl::Time start_time,
const std::vector<Tensor>& output) {
if (!ShouldCollectMetrics()) {
return;
}
const uint64_t end_time_us = env_.NowMicros();
const int64_t latency_micros =
safe_sub(end_time_us, absl::ToUnixMicros(start_time));
AddLatencySample(latency_micros);
IncrementThroughput(GetTotalBytes(output));
mutex_lock l(mu_);
metrics::RecordTFDataIteratorLifetime(safe_sub(end_time_us, end_time_us_));
end_time_us_ = std::max(end_time_us_, end_time_us);
num_active_calls_--;
if (num_active_calls_ == 0) {
metrics::RecordTFDataIteratorBusy(
safe_sub(end_time_us_, first_start_time_us_));
}
}
bool IteratorMetricsCollector::ShouldCollectMetrics() const {
return device_type_ == DEVICE_CPU;
}
}
} | #include "tensorflow/core/data/metric_utils.h"
#include <cstdint>
#include "absl/memory/memory.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using tensorflow::monitoring::testing::CellReader;
using tensorflow::monitoring::testing::Histogram;
TEST(MetricUtilsTest, CollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 1.0);
EXPECT_GT(latency_histogram.sum(), 0.0);
EXPECT_GT(iterator_lifetime.Delta(), 0);
EXPECT_GT(iterator_busy.Delta(), 0);
}
TEST(MetricUtilsTest, ShouldNotCollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_TPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
}
TEST(MetricUtilsTest, ConcurrentThreads) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "Concurrent metric collection thread",
[&metrics_collector]() {
absl::Time concurrent_start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(concurrent_start_time, {});
}));
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
thread.reset();
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 2.0);
EXPECT_GT(latency_histogram.sum(),
absl::ToInt64Microseconds(absl::Seconds(2)));
EXPECT_GE(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_LT(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1.5)));
EXPECT_GE(iterator_busy.Delta(), absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_LT(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1.5)));
}
TEST(MetricUtilsTest, OverlappingThreads) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(0.5));
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "Concurrent metric collection thread",
[&metrics_collector]() {
absl::Time concurrent_start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(2));
metrics_collector.RecordStop(concurrent_start_time, {});
}));
absl::SleepFor(absl::Seconds(0.5));
metrics_collector.RecordStop(start_time, {});
absl::SleepFor(absl::Seconds(1.5));
thread.reset();
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 2.0);
EXPECT_GT(latency_histogram.sum(),
absl::ToInt64Microseconds(absl::Seconds(3)));
EXPECT_GE(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.5)));
EXPECT_LT(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.9)));
EXPECT_GE(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.5)));
EXPECT_LT(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.9)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/metric_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/metric_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2700df4d-b4a5-4d9a-94cd-6f37b3920e1d | cpp | tensorflow/tensorflow | dataset_utils | tensorflow/core/data/dataset_utils.cc | tensorflow/core/data/dataset_utils_test.cc | #include "tensorflow/core/data/dataset_utils.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <queue>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOutputSize[] = "output_size";
constexpr char kCode[] = "code";
constexpr char kExperimentOptAll[] = "all";
constexpr char kExperimentOptOutAllExceptOptIn[] = "all_except_opt_in";
constexpr char kMessage[] = "msg";
constexpr char kOutput[] = "output";
static mutex* get_dataset_experiment_registry_lock() {
static mutex dataset_experiment_registry_lock(LINKER_INITIALIZED);
return &dataset_experiment_registry_lock;
}
static absl::flat_hash_map<string,
DatasetExperimentRegistry::ExperimentSelector>*
get_dataset_experiments() {
static absl::flat_hash_map<
string, DatasetExperimentRegistry::ExperimentSelector>* experiments =
new absl::flat_hash_map<string,
DatasetExperimentRegistry::ExperimentSelector>;
return experiments;
}
constexpr char kMapAndBatchFusionOpt[] = "map_and_batch_fusion";
constexpr char kNoopEliminationOpt[] = "noop_elimination";
constexpr char kMapParallelizationOpt[] = "map_parallelization";
constexpr char kShuffleAndRepeatFusionOpt[] = "shuffle_and_repeat_fusion";
constexpr char kFilterFusionOpt[] = "filter_fusion";
constexpr char kMapAndFilterFusionOpt[] = "map_and_filter_fusion";
constexpr char kMapFusionOpt[] = "map_fusion";
constexpr char kParallelBatchOpt[] = "parallel_batch";
constexpr char kAutotuneBufferSizesOpt[] = "autotune_buffer_sizes";
constexpr char kDisablePrefetchLegacyAutotuneOpt[] =
"disable_prefetch_legacy_autotune";
constexpr char kMakeSloppyOpt[] = "make_sloppy";
constexpr char kBatchParallelizationOpt[] = "batch_parallelization";
constexpr char kEnableGradientDescentOpt[] = "enable_gradient_descent";
constexpr char kInjectPrefetchOpt[] = "inject_prefetch";
constexpr char kSeqInterleavePrefetchOpt[] = "seq_interleave_prefetch";
constexpr char kInjectIoPrefetchEligibleOpt[] = "inject_io_prefetch_eligible";
constexpr char kInjectIoPrefetchOpt[] = "inject_io_prefetch";
constexpr char kAutotuneOpt[] = "autotune";
constexpr char kSlackOpt[] = "slack";
constexpr char kSlackPeriodOpt[] = "slack_period";
constexpr char kMakeDeterministicOpt[] = "make_deterministic";
constexpr char kFilterParallelizationOpt[] = "filter_parallelization";
constexpr char kWarmStartOpt[] = "warm_start";
void DefaultOptimizationGraphRewrites(
const Options& options, absl::flat_hash_set<tstring>* optimization_enabled,
absl::flat_hash_set<tstring>* optimization_disabled,
absl::flat_hash_set<tstring>* optimization_default) {
const auto& optimization_options = options.optimization_options();
if (optimization_options.optional_apply_default_optimizations_case() !=
OptimizationOptions::kApplyDefaultOptimizations ||
optimization_options.apply_default_optimizations()) {
if (optimization_options.optional_map_and_batch_fusion_case() !=
OptimizationOptions::kMapAndBatchFusion) {
optimization_default->insert(kMapAndBatchFusionOpt);
}
if (optimization_options.optional_noop_elimination_case() !=
OptimizationOptions::kNoopElimination) {
optimization_default->insert(kNoopEliminationOpt);
}
if (optimization_options.optional_map_parallelization_case() !=
OptimizationOptions::kMapParallelization) {
optimization_default->insert(kMapParallelizationOpt);
}
if (optimization_options.optional_shuffle_and_repeat_fusion_case() !=
OptimizationOptions::kShuffleAndRepeatFusion) {
optimization_default->insert(kShuffleAndRepeatFusionOpt);
}
if (optimization_options.optional_parallel_batch_case() !=
OptimizationOptions::kParallelBatch) {
optimization_default->insert(kParallelBatchOpt);
}
if (optimization_options.optional_inject_prefetch_case() !=
OptimizationOptions::kInjectPrefetch) {
optimization_default->insert(kInjectPrefetchOpt);
}
}
if (OpDeterminismRequired()) {
optimization_enabled->insert(kMakeDeterministicOpt);
}
if (optimization_options.optional_filter_fusion_case() ==
OptimizationOptions::kFilterFusion) {
if (optimization_options.filter_fusion()) {
optimization_enabled->insert(kFilterFusionOpt);
} else {
optimization_disabled->insert(kFilterFusionOpt);
}
}
if (optimization_options.optional_map_and_batch_fusion_case() ==
OptimizationOptions::kMapAndBatchFusion) {
if (optimization_options.map_and_batch_fusion()) {
optimization_enabled->insert(kMapAndBatchFusionOpt);
} else {
optimization_disabled->insert(kMapAndBatchFusionOpt);
}
}
if (optimization_options.optional_map_and_filter_fusion_case() ==
OptimizationOptions::kMapAndFilterFusion) {
if (optimization_options.map_and_filter_fusion()) {
optimization_enabled->insert(kMapAndFilterFusionOpt);
} else {
optimization_disabled->insert(kMapAndFilterFusionOpt);
}
}
if (optimization_options.optional_map_parallelization_case() ==
OptimizationOptions::kMapParallelization) {
if (optimization_options.map_parallelization()) {
optimization_enabled->insert(kMapParallelizationOpt);
} else {
optimization_disabled->insert(kMapParallelizationOpt);
}
}
if (optimization_options.optional_filter_parallelization_case() ==
OptimizationOptions::kFilterParallelization) {
if (optimization_options.filter_parallelization()) {
optimization_enabled->insert(kFilterParallelizationOpt);
} else {
optimization_disabled->insert(kFilterParallelizationOpt);
}
}
if (optimization_options.optional_map_fusion_case() ==
OptimizationOptions::kMapFusion) {
if (optimization_options.map_fusion()) {
optimization_enabled->insert(kMapFusionOpt);
} else {
optimization_disabled->insert(kMapFusionOpt);
}
}
if (optimization_options.optional_noop_elimination_case() ==
OptimizationOptions::kNoopElimination) {
if (optimization_options.noop_elimination()) {
optimization_enabled->insert(kNoopEliminationOpt);
} else {
optimization_disabled->insert(kNoopEliminationOpt);
}
}
if (optimization_options.optional_parallel_batch_case() ==
OptimizationOptions::kParallelBatch) {
if (optimization_options.parallel_batch()) {
optimization_enabled->insert(kParallelBatchOpt);
} else {
optimization_disabled->insert(kParallelBatchOpt);
}
}
if (optimization_options.optional_shuffle_and_repeat_fusion_case() ==
OptimizationOptions::kShuffleAndRepeatFusion) {
if (optimization_options.shuffle_and_repeat_fusion()) {
optimization_enabled->insert(kShuffleAndRepeatFusionOpt);
} else {
optimization_disabled->insert(kShuffleAndRepeatFusionOpt);
}
}
if (optimization_options.optional_inject_prefetch_case() ==
OptimizationOptions::kInjectPrefetch) {
if (optimization_options.inject_prefetch()) {
optimization_enabled->insert(kInjectPrefetchOpt);
} else {
optimization_disabled->insert(kInjectPrefetchOpt);
}
}
if (optimization_options.optional_seq_interleave_prefetch_case() ==
OptimizationOptions::kSeqInterleavePrefetch) {
if (optimization_options.seq_interleave_prefetch()) {
optimization_enabled->insert(kSeqInterleavePrefetchOpt);
} else {
optimization_disabled->insert(kSeqInterleavePrefetchOpt);
}
}
}
bool IsOpAllowlisted(const OpDef* op_def) {
return (op_def->output_arg_size() == 1 &&
op_def->output_arg(0).type() == DT_VARIANT &&
(absl::EndsWith(op_def->name(), "Dataset") ||
absl::EndsWith(op_def->name(), "DatasetV2"))) ||
AllowlistedStatefulOpRegistry::Global()->Contains(op_def->name());
}
}
std::pair<int64_t, int64_t> MaybeOverrideSeeds(
std::pair<int64_t, int64_t> seeds) {
if (seeds.first == 0 && seeds.second == 0) {
return {random::New64(), random::New64()};
}
return seeds;
}
Status VerifyTypeMatch(const DataType& expected, const DataType& received,
int index) {
if (expected != received) {
return errors::InvalidArgument("Data type mismatch at component ", index,
": expected ", DataTypeString(expected),
" but got ", DataTypeString(received), ".");
}
return absl::OkStatus();
}
Status VerifyTypesMatch(const DataTypeVector& expected,
const DataTypeVector& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" types but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyTypeMatch(expected[i], received[i], i));
}
return absl::OkStatus();
}
Status VerifyTypesMatch(const DataTypeVector& expected,
const std::vector<Tensor>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" types but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyTypeMatch(expected[i], received[i].dtype(), i));
}
return absl::OkStatus();
}
Status VerifyShapeCompatible(const PartialTensorShape& expected,
const PartialTensorShape& received, int index) {
if (!expected.IsCompatibleWith(received)) {
return errors::InvalidArgument("Incompatible shapes at component ", index,
": expected ", expected.DebugString(),
" but got ", received.DebugString(), ".");
}
return absl::OkStatus();
}
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<PartialTensorShape>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" shapes but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyShapeCompatible(expected[i], received[i], i));
}
return absl::OkStatus();
}
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<Tensor>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" shapes but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(
VerifyShapeCompatible(expected[i], received[i].shape(), i));
}
return absl::OkStatus();
}
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionLibraryDefinition& to_add) {
for (const auto& fn : to_add.ListFunctionNames()) {
if (auto found = base->Find(fn)) {
if (!OpDefEqual(found->signature(), to_add.Find(fn)->signature())) {
return errors::InvalidArgument("Cannot add function '", fn,
"' because a different function with "
"the same signature already exists.");
}
TF_RETURN_IF_ERROR(base->RemoveFunction(fn));
}
}
return base->AddLibrary(to_add);
}
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionDefLibrary& to_add) {
for (const auto& fd : to_add.function()) {
if (auto found = base->Find(fd.signature().name())) {
if (!OpDefEqual(found->signature(), fd.signature())) {
return errors::InvalidArgument("Cannot add function '",
fd.signature().name(),
"' because a different function with "
"the same signature already exists.");
}
TF_RETURN_IF_ERROR(base->RemoveFunction(fd.signature().name()));
}
}
return base->AddLibrary(to_add);
}
Status IsFunctionStateful(const FunctionLibraryDefinition& library,
const FunctionDef& function_def) {
if (!function_def.signature().is_stateful()) {
return absl::OkStatus();
}
for (const NodeDef& node_def : function_def.node_def()) {
TF_RETURN_IF_ERROR(IsNodeStateful(library, node_def));
}
return absl::OkStatus();
}
Status IsNodeStateful(const FunctionLibraryDefinition& library,
const NodeDef& node) {
const OpDef* op_def;
if (!OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok() ||
IsOpAllowlisted(op_def) || !op_def->is_stateful() ||
op_def->name() == "Assert") {
return absl::OkStatus();
}
if (op_def->name() == "If") {
const FunctionDef* then_func =
library.Find(node.attr().at("then_branch").func().name());
const FunctionDef* else_func =
library.Find(node.attr().at("else_branch").func().name());
if (then_func != nullptr) {
TF_RETURN_IF_ERROR(IsFunctionStateful(library, *then_func));
}
if (else_func != nullptr) {
TF_RETURN_IF_ERROR(IsFunctionStateful(library, *else_func));
}
return absl::OkStatus();
}
if (op_def->name() == "While") {
const FunctionDef* cond_func =
library.Find(node.attr().at("cond").func().name());
const FunctionDef* body_func =
library.Find(node.attr().at("body").func().name());
if (cond_func != nullptr) {
TF_RETURN_IF_ERROR(IsFunctionStateful(library, *cond_func));
}
if (body_func != nullptr) {
TF_RETURN_IF_ERROR(IsFunctionStateful(library, *body_func));
}
return absl::OkStatus();
}
return errors::FailedPrecondition(op_def->name(), " is stateful.");
}
std::function<void(std::function<void()>)> RunnerWithMaxParallelism(
std::function<void(std::function<void()>)> runner, int max_parallelism) {
return std::bind(
[max_parallelism](
const std::function<void(std::function<void()>)>& runner,
std::function<void()> fn) {
std::function<void()> scoped_fn = std::bind(
[max_parallelism](const std::function<void()>& fn) {
ScopedPerThreadMaxParallelism scope(max_parallelism);
fn();
},
std::move(fn));
runner(std::move(scoped_fn));
},
std::move(runner), std::placeholders::_1);
}
Status DeterminismPolicy::FromString(const std::string& s,
DeterminismPolicy* out) {
DeterminismPolicy::Type type;
if (s == DeterminismPolicy::kDeterministic) {
type = DeterminismPolicy::Type::kDeterministic;
} else if (s == DeterminismPolicy::kNondeterministic) {
type = DeterminismPolicy::Type::kNondeterministic;
} else if (s == DeterminismPolicy::kDefault) {
type = DeterminismPolicy::Type::kDefault;
} else {
return errors::InvalidArgument("Unrecognized determinism policy: ", s);
}
*out = DeterminismPolicy(type);
return absl::OkStatus();
}
DeterminismPolicy::DeterminismPolicy(bool is_deterministic) {
if (is_deterministic) {
determinism_ = DeterminismPolicy::Type::kDeterministic;
} else {
determinism_ = DeterminismPolicy::Type::kNondeterministic;
}
}
std::string DeterminismPolicy::String() const {
switch (determinism_) {
case DeterminismPolicy::Type::kDeterministic:
return DeterminismPolicy::kDeterministic;
case DeterminismPolicy::Type::kNondeterministic:
return DeterminismPolicy::kNondeterministic;
case DeterminismPolicy::Type::kDefault:
return DeterminismPolicy::kDefault;
default:
LOG(ERROR) << "Unrecognized determinism value";
return "Unrecognized";
}
}
bool MatchesAnyVersion(StringPiece op_prefix, StringPiece op_to_match) {
if (!absl::StartsWith(op_to_match, op_prefix)) {
return false;
}
if (op_to_match.length() == op_prefix.length()) {
return true;
}
size_t index = op_to_match.length() - 1;
while (isdigit(op_to_match[index])) {
index--;
}
return (op_to_match[index] == 'V') && (op_prefix.length() == index);
}
absl::flat_hash_set<string> GetExperiments() {
return GetExperiments(tsl::port::JobName(), tsl::port::TaskId(),
[](const tstring& str) { return Hash64(str); });
}
absl::flat_hash_set<string> GetExperiments(
const string& job_name, int64_t task_id,
std::function<uint64_t(const string&)> hash_func) {
absl::flat_hash_set<string> experiments;
if (job_name.empty() || task_id < 0) {
return experiments;
}
const char* opt_ins_raw_cs = std::getenv("TF_DATA_EXPERIMENT_OPT_IN");
const char* opt_outs_raw_cs = std::getenv("TF_DATA_EXPERIMENT_OPT_OUT");
string opt_ins_raw;
if (opt_ins_raw_cs != nullptr) {
opt_ins_raw = string(opt_ins_raw_cs);
}
string opt_outs_raw;
if (opt_outs_raw_cs != nullptr) {
opt_outs_raw = string(opt_outs_raw_cs);
}
auto live_experiments = DatasetExperimentRegistry::Experiments();
for (const auto& [experiment, unused] : live_experiments) {
metrics::RecordTFDataExperimentLive(experiment);
}
absl::flat_hash_set<string> opt_outs;
if (opt_outs_raw == kExperimentOptAll) {
for (const auto& pair : live_experiments) {
opt_outs.insert(pair.first);
}
metrics::RecordTFDataExperimentOptOut(kExperimentOptAll);
} else {
for (const auto& experiment :
str_util::Split(opt_outs_raw, ',', str_util::SkipEmpty())) {
opt_outs.insert(experiment);
metrics::RecordTFDataExperimentOptOut(experiment);
}
}
if (opt_ins_raw == kExperimentOptAll) {
for (const auto& pair : live_experiments) {
auto experiment = pair.first;
if (!opt_outs.contains(experiment)) {
experiments.insert(experiment);
}
}
metrics::RecordTFDataExperimentOptIn(kExperimentOptAll);
} else {
for (const auto& experiment :
str_util::Split(opt_ins_raw, ',', str_util::SkipEmpty())) {
if (!opt_outs.contains(experiment)) {
experiments.insert(experiment);
}
metrics::RecordTFDataExperimentOptIn(experiment);
}
}
if (opt_outs_raw == kExperimentOptOutAllExceptOptIn) {
metrics::RecordTFDataExperimentOptOut(kExperimentOptOutAllExceptOptIn);
return experiments;
}
for (const auto& [experiment_name, experiment_selector] : live_experiments) {
uint64_t name_hash = hash_func(strings::StrCat(job_name, experiment_name));
std::mt19937_64 rng{name_hash};
std::bernoulli_distribution d{0.5};
bool evens = d(rng);
if (experiment_selector.job_selector(name_hash) &&
experiment_selector.task_selector(task_id, evens) &&
!opt_outs.contains(experiment_name)) {
experiments.insert(experiment_name);
}
}
return experiments;
}
void LogAndRecordExperiments(const absl::flat_hash_set<string>& experiments) {
if (!experiments.empty()) {
constexpr float TEN_MINUTES = 60.0 * 10.0;
LOG_EVERY_N_SEC(INFO, TEN_MINUTES)
<< "The input pipeline is subject to the following tf.data experiments:"
<< " " << absl::StrJoin(experiments, ", ") << ". "
<< "See `go/tf-data-experiments` for more details.";
}
for (auto& experiment : experiments) {
metrics::RecordTFDataExperiment(experiment);
}
}
void GetOptimizations(const Options& options,
absl::flat_hash_set<tstring>* optimizations_enabled,
absl::flat_hash_set<tstring>* optimizations_disabled,
absl::flat_hash_set<tstring>* optimizations_default) {
DefaultOptimizationGraphRewrites(options, optimizations_enabled,
optimizations_disabled,
optimizations_default);
if (!OpDeterminismRequired() &&
options.optional_deterministic_case() == Options::kDeterministic &&
!options.deterministic()) {
optimizations_enabled->insert(kMakeSloppyOpt);
}
if (options.optional_slack_case() == Options::kSlack) {
if (options.slack()) {
optimizations_enabled->insert(kSlackOpt);
} else {
optimizations_disabled->insert(kSlackOpt);
}
}
if (options.optional_warm_start_case() == Options::kWarmStart) {
if (options.warm_start()) {
optimizations_enabled->insert(kWarmStartOpt);
} else {
optimizations_disabled->insert(kWarmStartOpt);
}
}
}
Tensor MaybeCopySubSlice(const Tensor& tensor, int64 index) {
Tensor slice = tensor.SubSlice(index);
if (slice.IsAligned()) {
return slice;
} else {
return tensorflow::tensor::DeepCopy(slice);
}
}
void StripDevicePlacement(FunctionDefLibrary* library) {
for (auto& function : (*library->mutable_function())) {
for (auto& node : (*function.mutable_node_def())) {
if (!node.device().empty()) {
*node.mutable_device() = "";
}
}
}
}
Status CopyPartialBatch(int64_t num_elements, const Tensor& value,
Tensor* output) {
switch (value.dtype()) {
#define HANDLE_TYPE(type) \
case DataTypeToEnum<type>::value: { \
auto output_t = output->flat_outer_dims<type>(); \
auto value_t = value.flat_outer_dims<type>(); \
for (size_t i = 0; i < num_elements; i++) { \
output_t.template chip<0>(i) = value_t.template chip<0>(i); \
} \
return OkStatus(); \
}
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::InvalidArgument("Unsupported data type: ",
DataTypeString(value.dtype()));
}
return absl::OkStatus();
}
Status ReadBatch(IteratorContext* ctx, IteratorStateReader* reader,
int64_t batch_size, const string& iterator_prefix,
const string& batch_prefix, std::vector<Tensor>* batch) {
int64_t output_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
FullName(iterator_prefix,
strings::StrCat(batch_prefix, "_", kOutputSize)),
&output_size));
batch->reserve(output_size);
for (int i = 0; i < output_size; i++) {
Tensor t;
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), FullName(iterator_prefix, batch_prefix),
strings::StrCat(kOutput, "_", i), &t));
if (t.dim_size(0) < batch_size) {
TensorShape component_shape(t.shape());
component_shape.set_dim(0, batch_size);
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
Tensor new_t(ctx->allocator(attr), t.dtype(), component_shape);
TF_RETURN_IF_ERROR(CopyPartialBatch(t.dim_size(0), t, &new_t));
batch->emplace_back(std::move(new_t));
} else {
batch->emplace_back(std::move(t));
}
}
return absl::OkStatus();
}
Status WriteBatch(int64_t batch_size, int64_t num_elements,
const string& iterator_prefix, const string& batch_prefix,
IteratorStateWriter* writer, std::vector<Tensor>* batch) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
FullName(iterator_prefix,
strings::StrCat(batch_prefix, "_", kOutputSize)),
batch->size()));
for (int i = 0; i < batch->size(); i++) {
if (num_elements < batch_size) {
TF_RETURN_IF_ERROR(
writer->WriteTensor(FullName(iterator_prefix, batch_prefix),
strings::StrCat(kOutput, "_", i),
(*batch)[i].Slice(0, num_elements)));
} else {
TF_RETURN_IF_ERROR(
writer->WriteTensor(FullName(iterator_prefix, batch_prefix),
strings::StrCat(kOutput, "_", i), (*batch)[i]));
}
}
return absl::OkStatus();
}
Status ReadStatus(const string& iterator_prefix, const string& prefix,
IteratorStateReader* reader, Status* status) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(
FullName(iterator_prefix, strings::StrCat(prefix, "_", kCode)),
&code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
FullName(iterator_prefix, strings::StrCat(prefix, "_", kMessage)),
&error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
Status WriteStatus(const string& iterator_prefix, const string& prefix,
const Status& status, IteratorStateWriter* writer) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
FullName(iterator_prefix, strings::StrCat(prefix, "_", kCode)),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
FullName(iterator_prefix, strings::StrCat(prefix, "_", kMessage)),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ProcessBatch(int64_t batch_size, int64_t num_elements,
bool drop_remainder, const Status& status,
IteratorContext* ctx, std::vector<Tensor>* output,
bool* end_of_sequence, std::vector<Tensor>* batch) {
if (num_elements == 0) {
if (status.ok() || absl::IsOutOfRange(status)) {
*end_of_sequence = true;
return absl::OkStatus();
} else {
*end_of_sequence = false;
return status;
}
}
if (!status.ok() && !absl::IsOutOfRange(status)) {
*end_of_sequence = false;
return status;
}
if (num_elements < batch_size) {
if (drop_remainder) {
*end_of_sequence = true;
return absl::OkStatus();
}
for (size_t i = 0; i < batch->size(); ++i) {
TensorShape component_shape((*batch)[i].shape());
component_shape.set_dim(0, num_elements);
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
output->emplace_back(ctx->allocator(attr), (*batch)[i].dtype(),
component_shape);
if (!output->back().IsInitialized()) {
return errors::ResourceExhausted(
"Failed to allocate memory for the batch of component ", i);
}
TF_RETURN_IF_ERROR(
CopyPartialBatch(num_elements, (*batch)[i], &output->back()));
}
} else {
*output = std::move(*batch);
}
*end_of_sequence = false;
return absl::OkStatus();
}
Status CopyBatch(AnyContext ctx,
std::vector<std::vector<Tensor>>&& batch_elements,
bool parallel_copy, std::vector<Tensor>* out_tensors) {
const size_t num_tuple_components = batch_elements.at(0).size();
out_tensors->reserve(num_tuple_components);
const int64_t num_batch_elements = batch_elements.size();
for (size_t component_index = 0; component_index < num_tuple_components;
++component_index) {
const Tensor& first_element = batch_elements.at(0)[component_index];
TensorShape first_element_shape(first_element.shape());
TensorShape batch_component_shape({num_batch_elements});
batch_component_shape.AppendShape(first_element_shape);
out_tensors->emplace_back(ctx.allocator, first_element.dtype(),
batch_component_shape);
if (!out_tensors->back().IsInitialized()) {
return errors::ResourceExhausted(
"Failed to allocate memory for the batch of component ",
component_index);
}
}
for (size_t component_index = 0; component_index < num_tuple_components;
++component_index) {
Tensor& batch_component = out_tensors->at(component_index);
const Tensor& first_element = batch_elements.at(0)[component_index];
TensorShape first_element_shape(first_element.shape());
auto copy_element_fn = [component_index, &batch_elements, &batch_component,
&first_element_shape](int index) {
if (batch_elements.at(index)[component_index].shape() !=
first_element_shape) {
return errors::InvalidArgument(
"Cannot batch tensors with different shapes in component ",
component_index, ". First element had shape ",
first_element_shape.DebugString(), " and element ", index,
" had shape ",
batch_elements.at(index)[component_index].shape().DebugString(),
".");
}
return batch_util::CopyElementToSlice(
std::move(batch_elements.at(index)[component_index]),
&batch_component, index);
};
const auto total_bytes =
first_element.AllocatedBytes() * num_batch_elements;
if (parallel_copy && total_bytes >= (1 << 20)) {
Status status;
mutex status_mu;
const auto num_threads = ctx.runner_threadpool_size;
const auto slice_size = num_batch_elements / num_threads;
int64_t offset = 0;
BlockingCounter counter(num_threads);
for (size_t i = 0; i < num_threads; ++i) {
int64_t length = slice_size;
if (i < num_batch_elements % num_threads) ++length;
(*ctx.runner)([offset, length, &status, &status_mu, &counter,
©_element_fn]() {
Status s;
for (size_t j = offset; j < offset + length; ++j) {
s.Update(copy_element_fn(j));
}
{
mutex_lock l(status_mu);
status.Update(s);
}
counter.DecrementCount();
});
offset += length;
}
counter.Wait();
TF_RETURN_IF_ERROR(status);
} else {
for (size_t i = 0; i < num_batch_elements; ++i) {
TF_RETURN_IF_ERROR(copy_element_fn(i));
}
}
}
return absl::OkStatus();
}
absl::flat_hash_set<tstring> CreateGraphRewriteConfigs(const Options& options) {
absl::flat_hash_set<tstring> configs;
const auto& autotune_options = options.autotune_options();
std::array<tstring, 11> autotune_only_optimizations = {
kAutotuneBufferSizesOpt,
kBatchParallelizationOpt,
kDisablePrefetchLegacyAutotuneOpt,
kEnableGradientDescentOpt,
kFilterParallelizationOpt,
kMapParallelizationOpt,
kMapFusionOpt,
kSeqInterleavePrefetchOpt,
kInjectPrefetchOpt,
kInjectIoPrefetchEligibleOpt,
kInjectIoPrefetchOpt};
if (autotune_options.optional_enabled_case() == AutotuneOptions::kEnabled &&
!autotune_options.enabled()) {
for (const auto& optimization : autotune_only_optimizations) {
configs.insert(
absl::StrCat(optimization.data(), ":", kAutotuneOpt, ":false"));
}
} else {
for (const auto& optimization : autotune_only_optimizations) {
configs.insert(
absl::StrCat(optimization.data(), ":", kAutotuneOpt, ":true"));
}
}
if (options.slack()) {
int num_devices = 1;
if (options.distribute_options().optional_num_devices_case() ==
DistributeOptions::kNumDevices) {
num_devices = options.distribute_options().num_devices();
}
configs.insert(
absl::StrCat(kSlackOpt, ":", kSlackPeriodOpt, ":", num_devices));
}
return configs;
}
bool ShouldConfigureMaxIntraOpParallelism(const Options& options) {
return options.threading_options().optional_max_intra_op_parallelism_case() ==
ThreadingOptions::kMaxIntraOpParallelism;
}
bool ShouldUsePrivateThreadPool(const Options& options) {
return options.threading_options().optional_private_threadpool_size_case() ==
ThreadingOptions::kPrivateThreadpoolSize;
}
bool ShouldUseAutotuning(const Options& options) {
return options.autotune_options().optional_enabled_case() !=
AutotuneOptions::kEnabled ||
options.autotune_options().enabled();
}
bool ShouldApplyOptimizations(
const Options& options,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_default) {
return (options.optimization_options()
.optional_apply_default_optimizations_case() !=
OptimizationOptions::kApplyDefaultOptimizations ||
options.optimization_options().apply_default_optimizations() ||
!optimizations_enabled.empty() || !optimizations_default.empty());
}
int64 GetAutotuneDefaultParallelism(IteratorContext* ctx) {
int64_t initial_parallelism = 16;
if (ctx->options()) {
int64_t initial_parallelism_option =
ctx->options()->autotune_options().initial_parallelism();
if (initial_parallelism_option > 0) {
initial_parallelism = initial_parallelism_option;
}
}
int64_t runner_threadpool_size = ctx->runner_threadpool_size();
int64_t value = std::min(initial_parallelism, runner_threadpool_size);
return value;
}
IteratorContext MakeNestedIteratorContext(IteratorContext* ctx) {
if (ctx->split_providers().empty()) {
return *ctx;
}
IteratorContext::Params params(ctx);
params.split_providers.clear();
return IteratorContext(std::move(params));
}
void DatasetExperimentRegistry::Register(const string& experiment,
JobSelector job_selector,
TaskSelector task_selector) {
mutex_lock l(*get_dataset_experiment_registry_lock());
get_dataset_experiments()->insert(
std::make_pair(experiment, DatasetExperimentRegistry::ExperimentSelector{
job_selector, task_selector}));
}
absl::flat_hash_map<string, DatasetExperimentRegistry::ExperimentSelector>
DatasetExperimentRegistry::Experiments() {
mutex_lock l(*get_dataset_experiment_registry_lock());
return *get_dataset_experiments();
}
bool AllTasks(int64_t unused_task_id, bool unused_evens) { return true; }
bool IndependentHostTasks(int64_t task_id, bool evens) {
int64_t lhs = task_id & 0x2;
int64_t rhs = 0x2;
return evens ? lhs != rhs : lhs == rhs;
}
namespace {
REGISTER_DATASET_EXPERIMENT("noop_task_level", RandomJobSamplePercentage<50>,
IndependentHostTasks);
REGISTER_DATASET_EXPERIMENT("noop_job_level", RandomJobSamplePercentage<50>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("allow_small_function_optimizations",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("autotune_buffer_optimization",
RandomJobSamplePercentage<0>, IndependentHostTasks);
REGISTER_DATASET_EXPERIMENT(kFilterParallelizationOpt,
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("min_outer_interleave_parallelism",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("reduce_interleave_prefetch",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("serialize_input_cycle_length",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("stage_based_autotune",
RandomJobSamplePercentage<0>, IndependentHostTasks);
REGISTER_DATASET_EXPERIMENT("stage_based_autotune_v2",
RandomJobSamplePercentage<0>, IndependentHostTasks);
REGISTER_DATASET_EXPERIMENT("data_transfer", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("file_locality", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("file_locality_v2", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("no_compression", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("no_compression_v2", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("inject_io_prefetch", RandomJobSamplePercentage<0>,
AllTasks);
REGISTER_DATASET_EXPERIMENT("map_fusion", RandomJobSamplePercentage<0>,
IndependentHostTasks);
}
}
} | #include "tensorflow/core/data/dataset_utils.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "xla/tsl/util/determinism_test_util.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/test_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(DatasetUtilsTest, MatchesAnyVersion) {
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDataset"));
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDatasetV2"));
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDatasetV3"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "BatchDatasetXV3"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "BatchV2Dataset"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "PaddedBatchDataset"));
}
TEST(DatasetUtilsTest, AddToFunctionLibrary) {
auto make_fn_a = [](const string& fn_name) {
return FunctionDefHelper::Create(
fn_name,
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"}, "Identity", {"arg"}, {{"T", DT_INT64}}}},
{{"ret", "node:output:0"}});
};
auto make_fn_b = [](const string& fn_name) {
return FunctionDefHelper::Create(
fn_name,
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"}, "Identity", {"arg"}, {{"T", DT_INT64}}},
{{"node2"}, "Identity", {"node:output:0"}, {{"T", DT_INT64}}}},
{{"ret", "node2:output:0"}});
};
FunctionDefLibrary fdef_base;
*fdef_base.add_function() = make_fn_a("0");
*fdef_base.add_function() = make_fn_a("1");
*fdef_base.add_function() = make_fn_a("2");
FunctionDefLibrary fdef_to_add;
*fdef_to_add.add_function() = make_fn_b("0");
*fdef_to_add.add_function() = make_fn_a("1");
*fdef_to_add.add_function() = make_fn_b("3");
FunctionLibraryDefinition flib_0(OpRegistry::Global(), fdef_base);
TF_ASSERT_OK(AddToFunctionLibrary(&flib_0, fdef_to_add));
FunctionLibraryDefinition flib_1(OpRegistry::Global(), fdef_base);
FunctionLibraryDefinition flib_to_add(OpRegistry::Global(), fdef_to_add);
TF_ASSERT_OK(AddToFunctionLibrary(&flib_1, flib_to_add));
for (const auto& flib : {flib_0, flib_1}) {
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("0"), make_fn_b("0")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("1"), make_fn_a("1")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("2"), make_fn_a("2")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("3"), make_fn_b("3")));
}
}
TEST(DatasetUtilsTest, AddToFunctionLibraryWithConflictingSignatures) {
FunctionDefLibrary fdef_base;
*fdef_base.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64"},
{},
{},
{{"ret", "arg"}});
FunctionDefLibrary fdef_to_add;
*fdef_to_add.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64", "ret2: int64"},
{},
{},
{{"ret", "arg"}, {"ret2", "arg"}});
FunctionLibraryDefinition flib_0(OpRegistry::Global(), fdef_base);
Status s = AddToFunctionLibrary(&flib_0, fdef_to_add);
EXPECT_EQ(error::Code::INVALID_ARGUMENT, s.code());
EXPECT_EQ(
"Cannot add function '0' because a different function with the same "
"signature already exists.",
s.message());
FunctionLibraryDefinition flib_1(OpRegistry::Global(), fdef_base);
FunctionLibraryDefinition flib_to_add(OpRegistry::Global(), fdef_to_add);
s = AddToFunctionLibrary(&flib_1, flib_to_add);
EXPECT_EQ(error::Code::INVALID_ARGUMENT, s.code());
EXPECT_EQ(
"Cannot add function '0' because a different function with the same "
"signature already exists.",
s.message());
}
TEST(DatasetUtilsTest, StripDevicePlacement) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"},
"Identity",
{"arg"},
{{"T", DT_INT64}},
{},
"device:CPU:0"}},
{{"ret", "arg"}});
EXPECT_EQ(flib.function(0).node_def(0).device(), "device:CPU:0");
StripDevicePlacement(&flib);
EXPECT_EQ(flib.function(0).node_def(0).device(), "");
}
TEST(DatasetUtilsTest, RunnerWithMaxParallelism) {
auto runner =
RunnerWithMaxParallelism([](const std::function<void()> fn) { fn(); }, 2);
auto fn = []() { ASSERT_EQ(GetPerThreadMaxParallelism(), 2); };
runner(fn);
}
TEST(DatasetUtilsTest, ParseDeterminismPolicy) {
DeterminismPolicy determinism;
TF_ASSERT_OK(DeterminismPolicy::FromString("true", &determinism));
EXPECT_TRUE(determinism.IsDeterministic());
TF_ASSERT_OK(DeterminismPolicy::FromString("false", &determinism));
EXPECT_TRUE(determinism.IsNondeterministic());
TF_ASSERT_OK(DeterminismPolicy::FromString("default", &determinism));
EXPECT_TRUE(determinism.IsDefault());
}
TEST(DatasetUtilsTest, DeterminismString) {
for (auto s : {"true", "false", "default"}) {
DeterminismPolicy determinism;
TF_ASSERT_OK(DeterminismPolicy::FromString(s, &determinism));
EXPECT_TRUE(s == determinism.String());
}
}
TEST(DatasetUtilsTest, BoolConstructor) {
EXPECT_TRUE(DeterminismPolicy(true).IsDeterministic());
EXPECT_FALSE(DeterminismPolicy(true).IsNondeterministic());
EXPECT_FALSE(DeterminismPolicy(true).IsDefault());
EXPECT_TRUE(DeterminismPolicy(false).IsNondeterministic());
EXPECT_FALSE(DeterminismPolicy(false).IsDeterministic());
EXPECT_FALSE(DeterminismPolicy(false).IsDefault());
}
class TestSplitProvider : public SplitProvider {
public:
Status GetNext(Tensor* split, bool* end_of_splits) override {
return absl::OkStatus();
}
Status Reset() override { return absl::OkStatus(); }
Status Save(std::function<std::string(std::string)> key_name_fn,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status Restore(std::function<std::string(std::string)> key_name_fn,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
TEST(DatasetUtilsTest, MakeNestedIteratorContext) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> test_ctx,
TestContext::Create());
IteratorContext::Params params(test_ctx->op_ctx());
params.split_providers.push_back(std::make_unique<TestSplitProvider>());
IteratorContext iter_ctx(params);
IteratorContext nested_ctx = MakeNestedIteratorContext(&iter_ctx);
EXPECT_FALSE(iter_ctx.split_providers().empty());
EXPECT_TRUE(nested_ctx.split_providers().empty());
}
REGISTER_DATASET_EXPERIMENT("test_only_experiment_0",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_1",
RandomJobSamplePercentage<1>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_5",
RandomJobSamplePercentage<5>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_10",
RandomJobSamplePercentage<10>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_50",
RandomJobSamplePercentage<50>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_99",
RandomJobSamplePercentage<99>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_100",
RandomJobSamplePercentage<100>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_task_experiment_100",
RandomJobSamplePercentage<100>,
IndependentHostTasks);
struct GetExperimentsHashTestCase {
uint64 hash;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsHashTest
: public ::testing::TestWithParam<GetExperimentsHashTestCase> {};
TEST_P(GetExperimentsHashTest, DatasetUtils) {
const GetExperimentsHashTestCase test_case = GetParam();
uint64 hash_result = test_case.hash;
const std::string job_name = "job";
const int64_t task_id = 0;
auto hash_func = [hash_result](const string& str) { return hash_result; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " hash=" << hash_result;
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " hash=" << hash_result;
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsHashTest,
::testing::Values<GetExperimentsHashTestCase>(
GetExperimentsHashTestCase{
0,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"},
},
GetExperimentsHashTestCase{
5,
{"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{
"test_only_experiment_0",
"test_only_experiment_1",
"test_only_experiment_5",
},
},
GetExperimentsHashTestCase{
95,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"},
},
GetExperimentsHashTestCase{
99,
{"test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99"},
},
GetExperimentsHashTestCase{
100,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"},
},
GetExperimentsHashTestCase{
105,
{"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{
"test_only_experiment_0",
"test_only_experiment_1",
"test_only_experiment_5",
},
},
GetExperimentsHashTestCase{
195,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"},
}));
struct GetExperimentsOptTestCase {
std::vector<string> opt_ins;
std::vector<string> opt_outs;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsOptTest
: public ::testing::TestWithParam<GetExperimentsOptTestCase> {};
TEST_P(GetExperimentsOptTest, DatasetUtils) {
const GetExperimentsOptTestCase test_case = GetParam();
auto opt_ins = test_case.opt_ins;
auto opt_outs = test_case.opt_outs;
if (!opt_ins.empty()) {
setenv("TF_DATA_EXPERIMENT_OPT_IN", absl::StrJoin(opt_ins, ",").c_str(), 1);
}
if (!opt_outs.empty()) {
setenv("TF_DATA_EXPERIMENT_OPT_OUT", absl::StrJoin(opt_outs, ",").c_str(),
1);
}
const std::string job_name = "job";
const int64_t task_id = 0;
auto hash_func = [](const string& str) { return 0; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " opt_ins={"
<< absl::StrJoin(opt_ins, ",") << "} opt_outs={"
<< absl::StrJoin(opt_outs, ",") << "}";
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " opt_ins={"
<< absl::StrJoin(opt_ins, ",") << "} opt_outs={"
<< absl::StrJoin(opt_outs, ",") << "}";
}
if (!opt_ins.empty()) {
unsetenv("TF_DATA_EXPERIMENT_OPT_IN");
}
if (!opt_outs.empty()) {
unsetenv("TF_DATA_EXPERIMENT_OPT_OUT");
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsOptTest,
::testing::Values<GetExperimentsOptTestCase>(
GetExperimentsOptTestCase{
{"all"},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"},
{}},
GetExperimentsOptTestCase{
{"all"},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_0", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"}},
GetExperimentsOptTestCase{
{},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{},
{},
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsOptTestCase{
{},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_99"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"all_except_opt_in"},
{"test_only_experiment_0", "test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"},
{}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_0", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"}}));
struct GetExperimentsJobNameTestCase {
uint64_t hash;
string job_name;
int64_t task_id;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsJobNameTest
: public ::testing::TestWithParam<GetExperimentsJobNameTestCase> {};
TEST_P(GetExperimentsJobNameTest, DatasetUtils) {
const GetExperimentsJobNameTestCase test_case = GetParam();
auto job_name = test_case.job_name;
auto task_id = test_case.task_id;
uint64 hash_result = test_case.hash;
auto hash_func = [hash_result](const string& str) { return hash_result; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " job_name=" << job_name;
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " job_name=" << job_name;
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsJobNameTest,
::testing::Values(
GetExperimentsJobNameTestCase{
0,
"",
0,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"",
-1,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"",
2,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
-1,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
0,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
1,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
2,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
95,
"job_name",
1,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
95,
"job_name",
2,
{"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"}}));
struct GetOptimizationsTestCase {
Options options;
std::vector<string> expected_enabled;
std::vector<string> expected_disabled;
std::vector<string> expected_default;
};
GetOptimizationsTestCase GetOptimizationTestCase1() {
return {
Options(),
{},
{},
{"noop_elimination", "map_and_batch_fusion", "shuffle_and_repeat_fusion",
"map_parallelization", "parallel_batch", "inject_prefetch"}};
}
GetOptimizationsTestCase GetOptimizationTestCase2() {
Options options;
options.mutable_optimization_options()->set_apply_default_optimizations(
false);
return {options, {}, {},
{}};
}
GetOptimizationsTestCase GetOptimizationTestCase3() {
Options options;
options.set_deterministic(false);
options.mutable_optimization_options()->set_map_and_batch_fusion(true);
options.mutable_optimization_options()->set_map_parallelization(false);
options.mutable_optimization_options()->set_parallel_batch(false);
return {options,
{"make_sloppy", "map_and_batch_fusion"},
{"parallel_batch", "map_parallelization"},
{"noop_elimination", "shuffle_and_repeat_fusion", "inject_prefetch"}};
}
GetOptimizationsTestCase GetOptimizationTestCase4() {
Options options;
options.set_deterministic(false);
options.mutable_optimization_options()->set_filter_fusion(true);
options.mutable_optimization_options()->set_filter_parallelization(true);
options.mutable_optimization_options()->set_map_and_batch_fusion(true);
options.mutable_optimization_options()->set_map_and_filter_fusion(true);
options.mutable_optimization_options()->set_map_fusion(true);
options.mutable_optimization_options()->set_map_parallelization(true);
options.mutable_optimization_options()->set_noop_elimination(true);
options.mutable_optimization_options()->set_parallel_batch(true);
options.mutable_optimization_options()->set_shuffle_and_repeat_fusion(true);
options.mutable_optimization_options()->set_inject_prefetch(true);
options.mutable_optimization_options()->set_seq_interleave_prefetch(true);
options.set_slack(true);
return {options,
{"filter_fusion", "filter_parallelization", "make_sloppy",
"map_and_batch_fusion", "map_and_filter_fusion", "map_fusion",
"map_parallelization", "noop_elimination", "parallel_batch",
"shuffle_and_repeat_fusion", "slack", "inject_prefetch",
"seq_interleave_prefetch"},
{},
{}};
}
class GetOptimizationsTest
: public ::testing::TestWithParam<GetOptimizationsTestCase> {};
TEST_P(GetOptimizationsTest, DatasetUtils) {
const GetOptimizationsTestCase test_case = GetParam();
auto options = test_case.options;
absl::flat_hash_set<tstring> actual_enabled, actual_disabled, actual_default;
GetOptimizations(options, &actual_enabled, &actual_disabled, &actual_default);
EXPECT_THAT(std::vector<string>(actual_enabled.begin(), actual_enabled.end()),
::testing::UnorderedElementsAreArray(test_case.expected_enabled));
EXPECT_THAT(
std::vector<string>(actual_disabled.begin(), actual_disabled.end()),
::testing::UnorderedElementsAreArray(test_case.expected_disabled));
EXPECT_THAT(std::vector<string>(actual_default.begin(), actual_default.end()),
::testing::UnorderedElementsAreArray(test_case.expected_default));
}
INSTANTIATE_TEST_SUITE_P(Test, GetOptimizationsTest,
::testing::Values(GetOptimizationTestCase1(),
GetOptimizationTestCase2(),
GetOptimizationTestCase3(),
GetOptimizationTestCase4()));
TEST(DeterministicOpsTest, GetOptimizations) {
#if !defined(__APPLE__)
tsl::test::DeterministicOpsScope det_scope;
Options options;
options.set_deterministic(false);
absl::flat_hash_set<tstring> actual_enabled, actual_disabled, actual_default;
GetOptimizations(options, &actual_enabled, &actual_disabled, &actual_default);
EXPECT_THAT(std::vector<string>(actual_enabled.begin(), actual_enabled.end()),
::testing::UnorderedElementsAreArray({"make_deterministic"}));
EXPECT_EQ(actual_disabled.size(), 0);
#endif
}
REGISTER_DATASET_EXPERIMENT("test_only_experiment",
RandomJobSamplePercentage<42>, AllTasks);
TEST(DatasetUtilsTest, DatasetExperimentRegistry) {
auto experiments = DatasetExperimentRegistry::Experiments();
EXPECT_TRUE(experiments.find("test_only_experiment") != experiments.end());
EXPECT_TRUE(experiments.find("non_existing_experiment") == experiments.end());
}
TEST(DatasetUtilsTest, CountBytes) {
std::vector<Tensor> uncompressed = {
CreateTensor<int64_t>(TensorShape{128, 2}),
CreateTensor<int64_t>(TensorShape{64, 4})};
EXPECT_EQ(GetAllocatedBytes(uncompressed), 4096);
EXPECT_EQ(GetTotalBytes(uncompressed), 4096);
CompressedElement compressed_element;
TF_ASSERT_OK(CompressElement(uncompressed, &compressed_element));
std::vector<Tensor> compressed{{DT_VARIANT, TensorShape({})}};
compressed.front().scalar<Variant>()() = compressed_element;
EXPECT_EQ(GetAllocatedBytes(compressed), compressed_element.ByteSizeLong());
EXPECT_EQ(GetTotalBytes(compressed), compressed_element.ByteSizeLong());
}
TEST_F(DatasetOpsTestBase, TestVariantEqualityChecking) {
Tensor scalar_0{DT_VARIANT, TensorShape({})};
scalar_0.scalar<Variant>()() = TestVariant({CreateTensor<int64_t>({}, {0})});
TF_EXPECT_OK(ExpectEqual(scalar_0, scalar_0));
Tensor scalar_1{DT_VARIANT, TensorShape({})};
scalar_1.scalar<Variant>()() = TestVariant({CreateTensor<int64_t>({}, {1})});
EXPECT_THAT(ExpectEqual(scalar_0, scalar_1),
StatusIs(tsl::error::INTERNAL, HasSubstr("aren't equal")));
Tensor nonscalar{DT_VARIANT, TensorShape({2})};
EXPECT_THAT(ExpectEqual(nonscalar, nonscalar),
StatusIs(tsl::error::INTERNAL, HasSubstr("must be scalars")));
Tensor unsupported{DT_VARIANT, TensorShape({})};
unsupported.scalar<Variant>()() = 0;
EXPECT_THAT(ExpectEqual(unsupported, unsupported),
StatusIs(tsl::error::INTERNAL, HasSubstr("types must be")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/dataset_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/dataset_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fcc9cb62-4002-4ced-aa88-1c3431b4b183 | cpp | tensorflow/tensorflow | unbounded_thread_pool | tensorflow/core/data/unbounded_thread_pool.cc | tensorflow/core/data/unbounded_thread_pool_test.cc | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool::LogicalThreadWrapper : public Thread {
public:
explicit LogicalThreadWrapper(std::shared_ptr<Notification> done)
: done_(std::move(done)) {}
~LogicalThreadWrapper() override {
done_->WaitForNotification();
}
private:
std::shared_ptr<Notification> done_;
};
class UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory {
public:
explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {}
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
auto done = std::make_shared<Notification>();
pool_->ScheduleOnWorkQueue(std::move(fn), done);
return std::make_unique<LogicalThreadWrapper>(std::move(done));
}
private:
UnboundedThreadPool* const pool_;
};
std::shared_ptr<ThreadFactory> UnboundedThreadPool::get_thread_factory() {
return std::make_shared<LogicalThreadFactory>(this);
}
void UnboundedThreadPool::Schedule(std::function<void()> fn) {
auto tagged_fn = [fn = std::move(fn)]() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "ThreadPool");
fn();
};
ScheduleOnWorkQueue(std::move(tagged_fn), nullptr);
}
int UnboundedThreadPool::NumThreads() const { return -1; }
int UnboundedThreadPool::CurrentThreadId() const { return -1; }
namespace {
void WorkQueueFunc(const std::function<void()>& fn,
std::shared_ptr<Notification> done) {
fn();
if (done) {
done->Notify();
}
}
}
void UnboundedThreadPool::ScheduleOnWorkQueue(
std::function<void()> fn, std::shared_ptr<Notification> done) {
unbounded_work_queue_.Schedule(
std::bind(&WorkQueueFunc, std::move(fn), std::move(done)));
}
}
} | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <atomic>
#include <memory>
#include <vector>
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(UnboundedThreadPool, ConcurrentThreadCreation) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
const int kNumThreadsToCreate = 10;
std::atomic<int> i(0);
for (int j = 0; j < kNumThreadsToCreate; ++j) {
threads.push_back(thread_factory->StartThread("", [=, &i,
&thread_factory]() {
std::vector<std::unique_ptr<Thread>> nested_threads;
for (int k = 0; k < kNumThreadsToCreate; ++k) {
nested_threads.push_back(
thread_factory->StartThread("", [&i]() { ++i; }));
}
nested_threads.clear();
}));
}
threads.clear();
EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate);
}
TEST(UnboundedThreadPool, MultipleBlockingThreads) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
std::vector<int> round_sizes = {5, 10, 15, 20};
for (const int round_size : round_sizes) {
Notification n;
BlockingCounter bc(round_size);
for (int j = 0; j < round_size; ++j) {
threads.push_back(thread_factory->StartThread("", [&bc, &n]() {
bc.DecrementCount();
n.WaitForNotification();
}));
}
bc.Wait();
n.Notify();
threads.clear();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37abeb31-83e0-4027-b89c-e603f6e35271 | cpp | tensorflow/tensorflow | standalone | tensorflow/core/data/standalone.cc | tensorflow/core/data/standalone_test.cc | #include "tensorflow/core/data/standalone.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/tf_data_memory_logger.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_handle_cache.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace standalone {
namespace {
OpKernelContext::Params CreateParams(
ProcessFunctionLibraryRuntime* pflr, DeviceMgr* device_mgr,
std::function<void(std::function<void()>)>* runner) {
OpKernelContext::Params params;
params.function_library = pflr->GetFLR("/device:CPU:0");
params.device = device_mgr->ListDevices()[0];
params.runner = runner;
return params;
}
}
Iterator::Iterator(IteratorBase* iterator, IteratorContext* ctx,
SerializationContext* serialization_ctx)
: iterator_(iterator), ctx_(ctx), serialization_ctx_(serialization_ctx) {
if (DatasetBaseIterator* dataset_iterator =
dynamic_cast<DatasetBaseIterator*>(iterator_.get())) {
tf_dataz_metrics_collector_ = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), dataset_iterator, ctx_->model());
TfDatazMetricsRegistry::Register(tf_dataz_metrics_collector_);
EnsureIteratorMemoryLoggerStarted();
}
}
Iterator::~Iterator() {
if (tf_dataz_metrics_collector_) {
TfDatazMetricsRegistry::Deregister(tf_dataz_metrics_collector_);
}
}
Status Iterator::GetNext(std::vector<Tensor>* outputs, bool* end_of_input) {
return iterator_->GetNext(ctx_.get(), outputs, end_of_input);
}
absl::StatusOr<std::vector<Tensor>> Iterator::Save() {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(iterator_->Save(serialization_ctx_.get(), &writer));
std::vector<std::unique_ptr<VariantTensorData>> data;
writer.ReleaseData(&data);
std::vector<Tensor> serialized;
for (size_t i = 0; i < data.size(); ++i) {
Tensor tensor(DT_VARIANT, TensorShape({1}));
IteratorStateVariant variant;
TF_RETURN_IF_ERROR(variant.InitializeFromVariantData(std::move(data[i])));
tensor.vec<Variant>()(0) = std::move(variant);
serialized.push_back(std::move(tensor));
}
return serialized;
}
Status Iterator::Restore(const std::vector<Tensor>& saved_iterator) {
std::vector<const VariantTensorData*> data;
data.reserve(saved_iterator.size());
for (int i = 0; i < saved_iterator.size(); ++i) {
auto saved_vec = saved_iterator[i].vec<Variant>();
auto* variant = saved_vec(0).get<IteratorStateVariant>();
if (!variant) {
return errors::Internal(
"Cannot initialize an iterator from tensor ",
saved_vec(0).DebugString(),
". Expected a variant tensor of type IteratorStateVariant.");
}
data.push_back(variant->GetData());
}
VariantTensorDataReader reader(data);
return iterator_->Restore(ctx_.get(), &reader);
}
std::shared_ptr<model::Model> Iterator::model() const { return ctx_->model(); }
Status Dataset::FromGraph(Params params, const GraphDef& graph_def,
std::unique_ptr<Dataset>* result) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
auto device_mgr = std::make_unique<StaticDeviceMgr>(DeviceFactory::NewDevice(
"CPU", params.session_options, "/job:localhost/replica:0/task:0"));
Device* device = device_mgr->ListDevices()[0];
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), graph_def.library());
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def.get(), OptimizerOptions{},
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}});
string fetch_node = "";
for (const auto& node : graph_def.node()) {
if (node.op() == "_Retval") {
fetch_node = node.input(0);
}
}
if (fetch_node.empty()) {
return errors::NotFound("Failed to find a _Retval op in the given dataset");
}
std::vector<Tensor> outputs;
GraphRunner graph_runner(device);
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, pflr->GetFLR("/device:CPU:0"), {},
{fetch_node}, &outputs));
data::DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], &dataset));
data::DatasetBase* finalized_dataset;
std::unique_ptr<thread::ThreadPool> pool(
NewThreadPoolFromSessionOptions(params.session_options));
std::function<void(std::function<void()>)> runner =
[&pool](std::function<void()> c) { pool->Schedule(std::move(c)); };
OpKernelContext::Params op_params =
CreateParams(pflr.get(), device_mgr.get(), &runner);
OpKernelContext ctx(&op_params, 0);
TF_RETURN_IF_ERROR(data::FinalizeDataset(&ctx, dataset, &finalized_dataset));
core::ScopedUnref unref(finalized_dataset);
*result = absl::WrapUnique(new Dataset(
finalized_dataset, dataset, device_mgr.release(), pflr.release(),
flib_def.release(), pool.release(), std::move(runner)));
return absl::OkStatus();
}
Status Dataset::MakeIterator(
std::vector<std::unique_ptr<SplitProvider>> split_providers,
std::unique_ptr<Iterator>* result) {
std::unique_ptr<IteratorContext> ctx;
OpKernelContext::Params op_params =
CreateParams(pflr_.get(), device_mgr_.get(), &runner_);
OpKernelContext op_ctx(&op_params, 0);
IteratorContext::Params params(&op_ctx);
params.cancellation_manager = &cancellation_manager_;
params.function_handle_cache = function_handle_cache_.get();
params.resource_mgr = &resource_mgr_;
std::move(split_providers.begin(), split_providers.end(),
std::back_inserter(params.split_providers));
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
if (ShouldUseAutotuning(finalized_dataset_->options())) {
params.model = std::make_shared<model::Model>();
}
params.run_mode = RunMode::STANDALONE;
ctx = std::make_unique<IteratorContext>(std::move(params));
SerializationContext::Params serialization_params(&op_ctx);
auto serialization_ctx =
std::make_unique<SerializationContext>(std::move(serialization_params));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset_->MakeIterator(
ctx.get(), nullptr, "Iterator", &iterator));
*result = absl::WrapUnique(new Iterator(iterator.release(), ctx.release(),
serialization_ctx.release()));
return absl::OkStatus();
}
Status Dataset::MakeIterator(std::unique_ptr<Iterator>* result) {
return MakeIterator({}, result);
}
Status Dataset::MakeSplitProviders(
std::vector<std::unique_ptr<SplitProvider>>* result) {
return finalized_dataset_->MakeSplitProviders(result);
}
const DatasetBase* Dataset::Get() const { return finalized_dataset_; }
Dataset::Dataset(DatasetBase* finalized_dataset, DatasetBase* original_dataset,
DeviceMgr* device_mgr, ProcessFunctionLibraryRuntime* pflr,
FunctionLibraryDefinition* flib_def, thread::ThreadPool* pool,
std::function<void(std::function<void()>)> runner)
: finalized_dataset_(finalized_dataset),
original_dataset_(original_dataset),
device_mgr_(device_mgr),
flib_def_(flib_def),
pflr_(pflr),
interop_threadpool_(pool),
runner_(std::move(runner)),
unbounded_thread_pool_(Env::Default(), "tf_data_standalone") {
finalized_dataset_->Ref();
original_dataset_->Ref();
function_handle_cache_ =
std::make_unique<FunctionHandleCache>(pflr_->GetFLR("/device:CPU:0"));
}
Dataset::~Dataset() {
finalized_dataset_->Unref();
original_dataset_->Unref();
}
}
}
} | #include "tensorflow/core/data/standalone.h"
#include <memory>
#include <optional>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace standalone {
namespace {
constexpr const char* const kRangeGraphProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
}
node {
name: "dataset"
op: "_Retval"
input: "RangeDataset/_3"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {}
versions { producer: 96 }
)pb";
constexpr const char* const kMapGraphProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
}
node {
name: "MapDataset/_4"
op: "MapDataset"
input: "RangeDataset/_3"
attr {
key: "Targuments"
value { list {} }
}
attr {
key: "f"
value { func { name: "__inference_Dataset_map_<lambda>_67" } }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "preserve_cardinality"
value { b: false }
}
attr {
key: "use_inter_op_parallelism"
value { b: true }
}
}
node {
name: "dataset"
op: "_Retval"
input: "MapDataset/_4"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {
function {
signature {
name: "__inference_Dataset_map_<lambda>_67"
input_arg { name: "args_0" type: DT_INT64 }
output_arg { name: "identity" type: DT_INT64 }
}
node_def {
name: "mul"
op: "Mul"
input: "args_0"
input: "args_0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "mul:z:0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
ret { key: "identity" value: "Identity:output:0" }
arg_attr {
key: 0
value {
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
}
}
}
versions { producer: 96 min_consumer: 12 }
)pb";
constexpr const char* const kMapGraphNoAutotuneProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "metadata"
value { s: "\n\017RangeDataset:13" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "replicate_on_split"
value { b: false }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "MapDataset/_4"
op: "MapDataset"
input: "RangeDataset/_3"
attr {
key: "Targuments"
value { list {} }
}
attr {
key: "f"
value { func { name: "__inference_Dataset_map_lambda_74" } }
}
attr {
key: "metadata"
value { s: "\n\rMapDataset:14" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "preserve_cardinality"
value { b: true }
}
attr {
key: "use_inter_op_parallelism"
value { b: true }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "OptionsDataset/_5"
op: "OptionsDataset"
input: "MapDataset/_4"
attr {
key: "metadata"
value { s: "\n\021OptionsDataset:15" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "serialized_options"
value { s: "\022\000\032\003\240\001\000*\000:\002\010\000" }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "dataset"
op: "_Retval"
input: "OptionsDataset/_5"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {
function {
signature {
name: "__inference_Dataset_map_lambda_74"
input_arg { name: "args_0" type: DT_INT64 }
output_arg { name: "identity" type: DT_INT64 }
}
node_def {
name: "mul"
op: "Mul"
input: "args_0"
input: "args_0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "mul:z:0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
ret { key: "identity" value: "Identity:output:0" }
attr {
key: "_construction_context"
value { s: "kEagerRuntime" }
}
attr {
key: "_tf_data_function"
value { b: true }
}
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value { list { shape {} } }
}
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
}
}
}
versions { producer: 1594 }
)pb";
TEST(Scalar, Standalone) {
struct TestCase {
string graph_string;
std::vector<int64_t> expected_outputs;
};
auto test_cases = {
TestCase{kRangeGraphProto, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
TestCase{kMapGraphProto, {0, 1, 4, 9, 16, 25, 36, 49, 64, 81}},
};
for (auto test_case : test_cases) {
GraphDef graph_def;
protobuf::TextFormat::ParseFromString(test_case.graph_string, &graph_def);
std::unique_ptr<Dataset> dataset;
TF_EXPECT_OK(Dataset::FromGraph({}, graph_def, &dataset));
std::unique_ptr<Iterator> iterator;
TF_EXPECT_OK(dataset->MakeIterator(&iterator));
EXPECT_DOUBLE_EQ(iterator->model()->ComputeSnapshotProcessingTimeNsec(), 0);
bool end_of_input = false;
for (int num_outputs = 0; !end_of_input; ++num_outputs) {
std::vector<tensorflow::Tensor> outputs;
TF_EXPECT_OK(iterator->GetNext(&outputs, &end_of_input));
if (!end_of_input) {
EXPECT_EQ(outputs[0].scalar<int64_t>()(),
test_case.expected_outputs[num_outputs]);
} else {
EXPECT_EQ(test_case.expected_outputs.size(), num_outputs);
}
}
absl::SleepFor(absl::Seconds(1));
EXPECT_GT(iterator->model()->ComputeSnapshotProcessingTimeNsec(), 0);
}
}
TEST(NoAutotune, Standalone) {
std::vector<int64_t> expected_outputs({0, 1, 4, 9, 16, 25, 36, 49, 64, 81});
GraphDef graph_def;
protobuf::TextFormat::ParseFromString(kMapGraphNoAutotuneProto, &graph_def);
std::unique_ptr<Dataset> dataset;
TF_EXPECT_OK(Dataset::FromGraph({}, graph_def, &dataset));
std::unique_ptr<Iterator> iterator;
TF_EXPECT_OK(dataset->MakeIterator(&iterator));
EXPECT_EQ(iterator->model(), nullptr);
bool end_of_input = false;
for (int num_outputs = 0; !end_of_input; ++num_outputs) {
std::vector<tensorflow::Tensor> outputs;
TF_EXPECT_OK(iterator->GetNext(&outputs, &end_of_input));
if (!end_of_input) {
EXPECT_EQ(outputs[0].scalar<int64_t>()(), expected_outputs[num_outputs]);
} else {
EXPECT_EQ(expected_outputs.size(), num_outputs);
}
}
absl::SleepFor(absl::Seconds(1));
EXPECT_EQ(iterator->model(), nullptr);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/standalone.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/standalone_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba3ccac4-9de1-481a-a94c-43c91831dbcf | cpp | tensorflow/tensorflow | tfdataz_metrics | tensorflow/core/data/tfdataz_metrics.cc | tensorflow/core/data/tfdataz_metrics_test.cc | #include "tensorflow/core/data/tfdataz_metrics.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
ApproximateLatencyEstimator::ApproximateLatencyEstimator(const Env& env)
: env_(env),
last_updated_time_mins_(0),
latency_value_counter_(0),
latency_count_counter_(0),
next_slot_(0) {
for (int i = 0; i < kSlots; ++i) {
latency_value_[i] = 0;
latency_count_[i] = 0;
}
}
void ApproximateLatencyEstimator::AddLatency(const int64_t latency_usec)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
latency_value_counter_ += latency_usec;
latency_count_counter_ += 1;
}
void ApproximateLatencyEstimator::UpdateRingBuffer() TF_LOCKS_EXCLUDED(mu_) {
int64_t now_minutes =
absl::ToInt64Minutes(absl::Microseconds(env_.NowMicros()));
mutex_lock l(mu_);
int64_t elapsed_minutes = now_minutes - last_updated_time_mins_;
int64_t minutes_to_update = std::min(elapsed_minutes, kSlots);
for (int i = 0; i < minutes_to_update; ++i) {
latency_value_[next_slot_] = latency_value_counter_;
latency_count_[next_slot_] = latency_count_counter_;
IncrementNextSlot();
}
last_updated_time_mins_ = now_minutes;
}
void ApproximateLatencyEstimator::IncrementNextSlot()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
next_slot_ = (next_slot_ + 1) % kSlots;
}
int ApproximateLatencyEstimator::PrevSlot(int steps)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (next_slot_ - steps + kSlots) % kSlots;
}
absl::Duration ApproximateLatencyEstimator::GetAverageLatency(Duration duration)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
double interval_latency =
static_cast<double>(latency_value_counter_ -
latency_value_[PrevSlot(static_cast<int>(duration))]);
double interval_count =
static_cast<double>(latency_count_counter_ -
latency_count_[PrevSlot(static_cast<int>(duration))]);
if (interval_count == 0) {
return absl::ZeroDuration();
}
return absl::Duration(absl::Microseconds(interval_latency)) / interval_count;
}
TfDatazMetricsCollector::TfDatazMetricsCollector(
const Env& env, DatasetBaseIterator* iterator,
std::shared_ptr<model::Model> model)
: iterator_(iterator), model_(std::move(model)), latency_estimator_(env) {}
void TfDatazMetricsCollector::RecordGetNextLatency(
int64_t get_next_latency_usec) {
if (get_next_latency_usec > 0) {
latency_estimator_.AddLatency(get_next_latency_usec);
}
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastOneMinute() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kMinute);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastFiveMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kFiveMinutes);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastSixtyMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kSixtyMinutes);
}
std::optional<std::string> TfDatazMetricsCollector::DatasetName() {
auto options = iterator_->dataset()->options();
if (options.has_dataset_name()) {
return std::make_optional(options.dataset_name());
}
return std::nullopt;
}
int64_t TfDatazMetricsCollector::GetIteratorTotalMemoryUsage() {
return iterator_->TotalBufferedBytes();
}
std::shared_ptr<model::Model> TfDatazMetricsCollector::GetModel() {
return model_;
}
namespace {
static mutex* get_tfdataz_metrics_registry_lock() {
static mutex tfdataz_metrics_registry_lock(LINKER_INITIALIZED);
return &tfdataz_metrics_registry_lock;
}
using TfDatazMetricsCollectors =
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>;
TfDatazMetricsCollectors& tfdataz_metric_collectors() {
static auto& collectors = *new TfDatazMetricsCollectors();
return collectors;
}
}
void TfDatazMetricsRegistry::Register(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().insert(collector);
}
void TfDatazMetricsRegistry::Deregister(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().erase(collector);
}
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>
TfDatazMetricsRegistry::GetIteratorMetricCollectors() {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
return tfdataz_metric_collectors();
}
}
} | #include "tensorflow/core/data/tfdataz_metrics.h"
#include <memory>
#include <utility>
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/fake_clock_env.h"
namespace tensorflow {
namespace data {
namespace {
static int64_t k1MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(1));
static int64_t k2MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(2));
static int64_t k5MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(5));
static int64_t k59MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(59));
static int64_t k60MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(60));
static int64_t k61MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(61));
class TfDatazMetricsTest : public ::testing::Test {
protected:
void SetUp() override {
env_ = std::make_unique<FakeClockEnv>(Env::Default());
tfdataz_metrics_ = std::make_unique<TfDatazMetricsCollector>(
*env_, iterator_.get(), nullptr);
}
void TearDown() override {
env_.reset();
tfdataz_metrics_.reset();
}
std::unique_ptr<DatasetBaseIterator> iterator_;
std::unique_ptr<FakeClockEnv> env_;
std::unique_ptr<TfDatazMetricsCollector> tfdataz_metrics_;
};
TEST_F(TfDatazMetricsTest, RecordGetNextLatency) {
tfdataz_metrics_->RecordGetNextLatency(1);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastOneMinute) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k2MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastFiveMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k5MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceByFiftyNineMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k59MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
4.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyOneMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k61MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
tfdataz_metrics_->RecordGetNextLatency(4);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
3.0);
}
TEST_F(TfDatazMetricsTest, GetMultipleAverageLatencies) {
tfdataz_metrics_->RecordGetNextLatency(1);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
1.0);
env_->AdvanceByMicroseconds(k1MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
2.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
2.0);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyWithZeroGetNextCalls) {
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
0);
}
class ScopedTfDataMetricsRegistration {
public:
explicit ScopedTfDataMetricsRegistration(
std::shared_ptr<TfDatazMetricsCollector> collector)
: collector_(std::move(collector)) {
TfDatazMetricsRegistry::Register(collector_);
}
~ScopedTfDataMetricsRegistration() {
TfDatazMetricsRegistry::Deregister(collector_);
}
void Deregister() { TfDatazMetricsRegistry::Deregister(collector_); }
private:
std::shared_ptr<TfDatazMetricsCollector> collector_;
};
TEST(TfDatazMetricsRegistryTest, Register) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
}
TEST(TfDatazMetricsRegistryTest, Deregister) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_three = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
ScopedTfDataMetricsRegistration scoped_registration_three(collector_three);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 3);
scoped_registration_one.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
scoped_registration_two.Deregister();
scoped_registration_three.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/tfdataz_metrics.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/tfdataz_metrics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61925c0b-7afa-459a-b403-0769cea80bda | cpp | tensorflow/tensorflow | snapshot_utils | tensorflow/core/data/snapshot_utils.cc | tensorflow/core/data/snapshot_utils_test.cc | #include "tensorflow/core/data/snapshot_utils.h"
#include <algorithm>
#include <climits>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "xla/tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/io/zlib_outputbuffer.h"
#include "tensorflow/core/platform/coding.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
constexpr const char* const kOutputTypes = "output_types";
constexpr const char* const kOutputShapes = "output_shapes";
constexpr const char* const kCompression = "compression";
constexpr const char* const kVersion = "version";
constexpr const char* const kCurrentCheckpointID = "current_checkpoint_id";
constexpr const char* const kIndex = "index";
constexpr const char* const kStartIndex = "start_index";
std::string ProtoSerializationErrorMessage(const TensorProto& proto,
const std::string& output_file) {
const auto proto_byte_size = proto.ByteSizeLong();
std::string error_message =
absl::StrCat("Failed to serialize tensor proto of ", proto_byte_size,
" bytes to file: ", output_file);
if (proto_byte_size > INT_MAX) {
absl::StrAppend(&error_message, ": exceeded maximum protobuf size of 2GB.");
}
return error_message;
}
}
constexpr const int64_t
CustomReader::kSnappyReaderInputBufferSizeBytes;
constexpr const int64_t
CustomReader::kSnappyReaderOutputBufferSizeBytes;
std::string HashDirectory(const std::string& path, uint64 hash) {
return io::JoinPath(
path, strings::Printf("%llu", static_cast<unsigned long long>(hash)));
}
std::string RunDirectory(const std::string& hash_directory, uint64 run_id) {
return RunDirectory(
hash_directory,
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
}
std::string RunDirectory(const std::string& hash_directory,
const std::string& run_id) {
return io::JoinPath(hash_directory, run_id);
}
std::string ShardDirectory(const std::string& run_directory, int64_t shard_id) {
return io::JoinPath(
run_directory,
strings::Printf("%08llu%s", static_cast<unsigned long long>(shard_id),
kShardDirectorySuffix));
}
std::string GetCheckpointFileName(const std::string& shard_directory,
uint64 checkpoint_id) {
return io::JoinPath(
shard_directory,
strings::Printf("%08llu.snapshot",
static_cast<unsigned long long>(checkpoint_id)));
}
Status Writer::Create(Env* env, const std::string& filename,
const std::string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Writer>* out_writer) {
switch (version) {
case 1:
*out_writer =
std::make_unique<CustomWriter>(filename, compression_type, dtypes);
break;
case 2:
*out_writer =
std::make_unique<TFRecordWriter>(filename, compression_type);
break;
default:
return errors::InvalidArgument("Snapshot writer version: ", version,
" is not supported.");
}
return (*out_writer)->Initialize(env);
}
TFRecordWriter::TFRecordWriter(const std::string& filename,
const std::string& compression_type)
: filename_(filename), compression_type_(compression_type) {}
Status TFRecordWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
record_writer_ = std::make_unique<io::RecordWriter>(
dest_.get(), io::RecordWriterOptions::CreateRecordWriterOptions(
compression_type_));
return absl::OkStatus();
}
Status TFRecordWriter::WriteTensors(const std::vector<Tensor>& tensors) {
for (const auto& tensor : tensors) {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
#if defined(TF_CORD_SUPPORT)
auto* proto_buffer = new std::string();
if (!proto.SerializeToString(proto_buffer)) {
delete proto_buffer;
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
absl::Cord proto_serialized = absl::MakeCordFromExternal(
*proto_buffer,
[proto_buffer](absl::string_view) { delete proto_buffer; });
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#else
std::string proto_serialized;
if (!proto.SerializeToString(&proto_serialized)) {
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#endif
}
return absl::OkStatus();
}
Status TFRecordWriter::Sync() {
TF_RETURN_IF_ERROR(record_writer_->Flush());
return dest_->Flush();
}
Status TFRecordWriter::Close() {
if (record_writer_ != nullptr) {
TF_RETURN_IF_ERROR(Sync());
TF_RETURN_IF_ERROR(record_writer_->Close());
TF_RETURN_IF_ERROR(dest_->Close());
record_writer_ = nullptr;
dest_ = nullptr;
}
return absl::OkStatus();
}
TFRecordWriter::~TFRecordWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Failed to close snapshot file " << filename_ << ": " << s;
}
}
CustomWriter::CustomWriter(const std::string& filename,
const std::string& compression_type,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
dtypes_(dtypes) {}
Status CustomWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
zlib_underlying_dest_.swap(dest_);
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
io::ZlibOutputBuffer* zlib_output_buffer = new io::ZlibOutputBuffer(
zlib_underlying_dest_.get(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options);
TF_CHECK_OK(zlib_output_buffer->Init());
dest_.reset(zlib_output_buffer);
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomWriter::WriteTensors(const std::vector<Tensor>& tensors) {
if (compression_type_ != io::compression::kSnappy) {
experimental::SnapshotRecord record;
for (const auto& tensor : tensors) {
TensorProto* t = record.add_tensor();
tensor.AsProtoTensorContent(t);
}
#if defined(TF_CORD_SUPPORT)
auto record_buffer = new std::string();
record.SerializeToString(record_buffer);
absl::Cord record_serialized = absl::MakeCordFromExternal(
*record_buffer,
[record_buffer](absl::string_view) { delete record_buffer; });
return WriteRecord(record_serialized);
#else
return WriteRecord(record.SerializeAsString());
#endif
}
std::vector<const TensorBuffer*> tensor_buffers;
tensor_buffers.reserve(num_simple_);
std::vector<TensorProto> tensor_protos;
tensor_protos.reserve(num_complex_);
experimental::SnapshotTensorMetadata metadata;
int64_t total_size = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const Tensor& tensor = tensors[i];
experimental::TensorMetadata* tensor_metadata =
metadata.add_tensor_metadata();
tensor.shape().AsProto(tensor_metadata->mutable_tensor_shape());
int64_t size = 0;
if (simple_tensor_mask_[i]) {
auto tensor_buffer = DMAHelper::buffer(&tensor);
tensor_buffers.push_back(tensor_buffer);
size = tensor_buffer->size();
} else {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
size = proto.ByteSizeLong();
tensor_protos.push_back(std::move(proto));
}
tensor_metadata->set_tensor_size_bytes(size);
total_size += size;
}
std::vector<char> uncompressed(total_size);
char* position = uncompressed.data();
int buffer_index = 0;
int proto_index = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const auto& tensor_metadata = metadata.tensor_metadata(i);
if (simple_tensor_mask_[i]) {
memcpy(position, tensor_buffers[buffer_index]->data(),
tensor_metadata.tensor_size_bytes());
buffer_index++;
} else {
tensor_protos[proto_index].SerializeToArray(
position, tensor_metadata.tensor_size_bytes());
proto_index++;
}
position += tensor_metadata.tensor_size_bytes();
}
DCHECK_EQ(position, uncompressed.data() + total_size);
string output;
if (!tsl::port::Snappy_Compress(uncompressed.data(), total_size, &output)) {
return errors::Internal("Failed to compress using snappy.");
}
#if defined(TF_CORD_SUPPORT)
auto metadata_buffer = new std::string();
metadata.SerializeToString(metadata_buffer);
absl::Cord metadata_serialized = absl::MakeCordFromExternal(
*metadata_buffer,
[metadata_buffer](absl::string_view) { delete metadata_buffer; });
#else
std::string metadata_serialized = metadata.SerializeAsString();
#endif
TF_RETURN_IF_ERROR(WriteRecord(metadata_serialized));
TF_RETURN_IF_ERROR(WriteRecord(output));
return absl::OkStatus();
}
Status CustomWriter::Sync() { return dest_->Sync(); }
Status CustomWriter::Close() {
if (dest_ != nullptr) {
TF_RETURN_IF_ERROR(dest_->Close());
dest_ = nullptr;
}
if (zlib_underlying_dest_ != nullptr) {
TF_RETURN_IF_ERROR(zlib_underlying_dest_->Close());
zlib_underlying_dest_ = nullptr;
}
return absl::OkStatus();
}
CustomWriter::~CustomWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Could not finish writing file: " << s;
}
}
Status CustomWriter::WriteRecord(const StringPiece& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#if defined(TF_CORD_SUPPORT)
Status CustomWriter::WriteRecord(const absl::Cord& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#endif
Status Reader::Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader) {
switch (version) {
case 0:
case 1:
*out_reader = std::make_unique<CustomReader>(filename, compression_type,
version, dtypes);
break;
case 2:
*out_reader =
std::make_unique<TFRecordReader>(filename, compression_type, dtypes);
break;
default:
return errors::InvalidArgument("Snapshot reader version: ", version,
" is not supported.");
}
return (*out_reader)->Initialize(env);
}
Status Reader::SkipRecords(int64_t num_records) {
for (int i = 0; i < num_records; ++i) {
std::vector<Tensor> unused_tensors;
TF_RETURN_IF_ERROR(ReadTensors(&unused_tensors));
}
return absl::OkStatus();
}
class Reader::Dataset : public DatasetBase {
public:
Dataset(DatasetContext&& ctx, const std::string& shard_dir,
const std::string& compression, const int64_t version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index)
: DatasetBase(std::move(ctx)),
shard_dir_(shard_dir),
compression_(compression),
version_(version),
dtypes_(dtypes),
shapes_(shapes),
start_index_(start_index) {}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
std::string DebugString() const override { return "SnapshotDatasetReader"; }
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
Node* shard_dir = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(shard_dir_, &shard_dir));
Node* start_index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_index_, &start_index));
AttrValue compression;
b->BuildAttrValue(compression_, &compression);
AttrValue version;
b->BuildAttrValue(version_, &version);
return b->AddDataset(
this,
{std::make_pair(0, shard_dir), std::make_pair(1, start_index)},
{},
{{kCompression, compression}, {kVersion, version}},
true, node);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
start_index_(dataset()->start_index_) {}
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
Status s = reader_->ReadTensors(out_tensors);
if (!absl::IsOutOfRange(s)) {
start_index_++;
return s;
}
Status status = AdvanceToNextFile(ctx->env());
if (absl::IsNotFound(status)) {
*end_of_sequence = true;
return absl::OkStatus();
}
return status;
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kCurrentCheckpointID),
current_checkpoint_id_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kStartIndex), start_index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointID),
¤t_checkpoint_id_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kStartIndex), &start_index_));
TF_RETURN_IF_ERROR(ctx->env()->FileExists(GetCurrentFilename()));
TF_RETURN_IF_ERROR(Reader::Create(
ctx->env(), GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_));
return AdvanceToStartIndex(ctx);
}
private:
Status AdvanceToNextFile(Env* env) {
start_index_ = 0;
current_checkpoint_id_++;
TF_RETURN_IF_ERROR(env->FileExists(GetCurrentFilename()));
return Reader::Create(env, GetCurrentFilename(), dataset()->compression_,
dataset()->version_, dataset()->dtypes_, &reader_);
}
std::string GetCurrentFilename() {
return GetCheckpointFileName(dataset()->shard_dir_,
current_checkpoint_id_);
}
Status AdvanceToStartIndex(IteratorContext* ctx) {
for (int64_t i = 0; i < start_index_; ++i) {
std::vector<Tensor> unused;
TF_RETURN_IF_ERROR(reader_->ReadTensors(&unused));
}
return absl::OkStatus();
}
std::unique_ptr<Reader> reader_;
int64_t current_checkpoint_id_ = 0;
int64_t start_index_;
};
const tstring shard_dir_;
const std::string compression_;
const int64_t version_;
const DataTypeVector dtypes_;
const std::vector<PartialTensorShape> shapes_;
const int64_t start_index_;
};
Reader::DatasetOp::DatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kVersion, &version_));
}
void Reader::DatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
tstring shard_dir;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "shard_dir", &shard_dir));
int64_t start_index;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "start_index", &start_index));
*output =
new Reader::Dataset(DatasetContext(ctx), shard_dir, compression_,
version_, output_types_, output_shapes_, start_index);
}
class Reader::NestedDataset : public DatasetBase {
public:
explicit NestedDataset(DatasetContext&& ctx,
std::vector<DatasetBase*> datasets)
: DatasetBase(std::move(ctx)), datasets_(datasets) {
dtypes_.push_back(DT_VARIANT);
absl::InlinedVector<int64_t, 1UL> element_dim_sizes;
element_dim_sizes.push_back(1);
partial_shapes_.emplace_back(element_dim_sizes);
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
std::string DebugString() const override {
return "SnapshotNestedDatasetReader";
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->clear();
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(datasets_.size());
for (const auto& dataset : datasets_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, dataset, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(
b->AddDataset(this, {},
{std::make_pair(0, input_graph_nodes)},
{}, node));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
std::vector<DatasetBase*> datasets_;
DataTypeVector dtypes_;
std::vector<PartialTensorShape> partial_shapes_;
class Iterator : public DatasetIterator<NestedDataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<NestedDataset>(params) {}
protected:
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64_t num_datasets = dataset()->datasets_.size();
*end_of_sequence = num_datasets == index_;
if (!*end_of_sequence) {
Tensor tensor(DT_VARIANT, TensorShape({}));
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(dataset()->datasets_[index_], &tensor));
out_tensors->clear();
out_tensors->push_back(std::move(tensor));
index_++;
}
return absl::OkStatus();
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kIndex), index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kIndex), &index_));
return absl::OkStatus();
}
private:
int64_t index_ = 0;
};
};
Reader::NestedDatasetOp::NestedDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void Reader::NestedDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Reader::NestedDataset(DatasetContext(ctx), inputs);
(*output)->Initialize({});
}
Status Reader::MakeNestedDataset(Env* env,
const std::vector<std::string>& shard_dirs,
const string& compression_type, int version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index,
DatasetBase** output) {
std::vector<DatasetBase*> datasets;
datasets.reserve(shard_dirs.size());
for (int64_t i = 0; i < shard_dirs.size(); ++i) {
int64_t dataset_start_index = start_index / shard_dirs.size();
if (start_index % shard_dirs.size() > datasets.size()) {
dataset_start_index++;
}
datasets.push_back(
new Dataset(DatasetContext(DatasetContext::Params(
{"SnapshotDatasetReader",
strings::StrCat("SnapshotDatasetReader/_", i)})),
shard_dirs.at(i), compression_type, version, dtypes, shapes,
dataset_start_index));
datasets.back()->Initialize({});
}
if (!shard_dirs.empty()) {
std::rotate(datasets.begin(),
datasets.begin() + (start_index % shard_dirs.size()),
datasets.end());
}
MakeNestedDataset(datasets, output);
return absl::OkStatus();
}
void Reader::MakeNestedDataset(const std::vector<DatasetBase*>& datasets,
DatasetBase** output) {
*output = new NestedDataset(
DatasetContext(DatasetContext::Params(
{"SnapshotNestedDatasetReader", "SnapshotNestedDatasetReader"})),
datasets);
(*output)->Initialize({});
}
TFRecordReaderImpl::TFRecordReaderImpl(
const std::string& filename, const string& compression,
std::optional<int64_t> output_buffer_size)
: filename_(filename),
offset_(0),
bytes_read_(0),
compression_(compression),
output_buffer_size_(output_buffer_size) {}
Status TFRecordReaderImpl::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
auto options = io::RecordReaderOptions::CreateRecordReaderOptions(
compression_);
#if !defined(IS_SLIM_BUILD)
if (output_buffer_size_.has_value()) {
options.snappy_options.output_buffer_size = *output_buffer_size_;
options.zlib_options.output_buffer_size = *output_buffer_size_;
}
#endif
record_reader_ = std::make_unique<io::RecordReader>(file_.get(), options);
bytes_read_ = 0;
return absl::OkStatus();
}
absl::StatusOr<Tensor> TFRecordReaderImpl::GetNext() {
tstring record;
TF_RETURN_IF_ERROR(record_reader_->ReadRecord(&offset_, &record));
bytes_read_ += record.size();
return Parse(record);
}
absl::StatusOr<std::vector<Tensor>> TFRecordReaderImpl::GetTensors() {
std::vector<Tensor> tensors;
while (true) {
absl::StatusOr<Tensor> tensor = GetNext();
if (absl::IsOutOfRange(tensor.status())) {
return tensors;
}
TF_RETURN_IF_ERROR(tensor.status());
tensors.push_back(std::move(*tensor));
}
return tensors;
}
absl::StatusOr<Tensor> TFRecordReaderImpl::Parse(const tstring& record) {
TensorProto proto;
if (!proto.ParseFromArray(record.data(), record.size())) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". Serialized proto: ", record);
}
Tensor tensor;
if (!tensor.FromProto(proto)) {
return errors::DataLoss(
"Unable to parse tensor from stored proto in file: ", filename_,
", record ", offset_, ". TensorProto: ", proto.ShortDebugString());
}
return tensor;
}
Status TFRecordReader::ReadTensors(std::vector<Tensor>* read_tensors) {
read_tensors->clear();
read_tensors->reserve(dtypes_.size());
for (int i = 0; i < dtypes_.size(); ++i) {
TF_ASSIGN_OR_RETURN(Tensor tensor, reader_impl_.GetNext());
read_tensors->push_back(std::move(tensor));
}
return absl::OkStatus();
}
CustomReader::CustomReader(const std::string& filename,
const string& compression_type, const int version,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
version_(version),
dtypes_(dtypes) {}
Status CustomReader::Initialize(Env* env) {
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename_, &file_));
input_stream_ = std::make_unique<io::RandomAccessInputStream>(file_.get());
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
input_stream_ = std::make_unique<io::ZlibInputStream>(
input_stream_.release(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options, true);
} else if (compression_type_ == io::compression::kSnappy) {
if (version_ == 0) {
input_stream_ = std::make_unique<tsl::io::SnappyInputBuffer>(
file_.get(), kSnappyReaderInputBufferSizeBytes,
kSnappyReaderOutputBufferSizeBytes);
} else {
input_stream_ =
std::make_unique<io::BufferedInputStream>(file_.get(), 64 << 20);
}
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomReader::ReadTensors(std::vector<Tensor>* read_tensors) {
tsl::profiler::TraceMe activity(
[&]() { return absl::StrCat(kClassName, kSeparator, "ReadTensors"); },
tsl::profiler::TraceMeLevel::kInfo);
if (version_ == 0 || compression_type_ != io::compression::kSnappy) {
return ReadTensorsV0(read_tensors);
}
if (version_ != 1) {
return errors::InvalidArgument("Version: ", version_, " is not supported.");
}
if (compression_type_ != io::compression::kSnappy) {
return errors::InvalidArgument("Compression ", compression_type_,
" is not supported.");
}
experimental::SnapshotTensorMetadata metadata;
tstring metadata_str;
TF_RETURN_IF_ERROR(ReadRecord(&metadata_str));
if (!metadata.ParseFromArray(metadata_str.data(), metadata_str.size())) {
return errors::DataLoss("Could not parse SnapshotTensorMetadata");
}
read_tensors->reserve(metadata.tensor_metadata_size());
std::vector<Tensor> simple_tensors;
simple_tensors.reserve(num_simple_);
std::vector<std::pair<std::unique_ptr<char[]>, size_t>> tensor_proto_strs;
tensor_proto_strs.reserve(num_complex_);
TF_RETURN_IF_ERROR(
SnappyUncompress(&metadata, &simple_tensors, &tensor_proto_strs));
int simple_index = 0;
int complex_index = 0;
for (int i = 0, end = simple_tensor_mask_.size(); i < end; ++i) {
if (simple_tensor_mask_[i]) {
read_tensors->push_back(std::move(simple_tensors[simple_index]));
simple_index++;
} else {
auto tensor_proto_str = std::move(tensor_proto_strs[complex_index].first);
size_t tensor_proto_size = tensor_proto_strs[complex_index].second;
TensorProto tp;
if (!tp.ParseFromArray(tensor_proto_str.get(), tensor_proto_size)) {
return errors::Internal("Could not parse TensorProto");
}
Tensor t;
if (!t.FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
read_tensors->push_back(std::move(t));
complex_index++;
}
}
return absl::OkStatus();
}
Status CustomReader::ReadTensorsV0(std::vector<Tensor>* read_tensors) {
experimental::SnapshotRecord record;
#if defined(PLATFORM_GOOGLE)
absl::Cord c;
TF_RETURN_IF_ERROR(ReadRecord(&c));
record.ParseFromCord(c);
#else
tstring record_bytes;
TF_RETURN_IF_ERROR(ReadRecord(&record_bytes));
record.ParseFromArray(record_bytes.data(), record_bytes.size());
#endif
read_tensors->reserve(record.tensor_size());
for (int i = 0; i < record.tensor_size(); ++i) {
read_tensors->emplace_back();
if (!read_tensors->back().FromProto(record.tensor(i))) {
return errors::DataLoss("Unable to parse tensor from proto.");
}
}
return absl::OkStatus();
}
Status CustomReader::SnappyUncompress(
const experimental::SnapshotTensorMetadata* metadata,
std::vector<Tensor>* simple_tensors,
std::vector<std::pair<std::unique_ptr<char[]>, size_t>>*
tensor_proto_strs) {
tstring compressed;
TF_RETURN_IF_ERROR(ReadRecord(&compressed));
size_t size;
if (!tsl::port::Snappy_GetUncompressedLength(compressed.data(),
compressed.size(), &size)) {
return errors::Internal("Could not get snappy uncompressed length");
}
int num_tensors = metadata->tensor_metadata_size();
std::vector<tsl::iovec> iov(num_tensors);
int index = 0;
int64_t total_size = 0;
for (int i = 0, end = simple_tensor_mask_.size(); i < end; ++i) {
const auto& tensor_metadata = metadata->tensor_metadata(i);
if (simple_tensor_mask_[i]) {
TensorShape shape(tensor_metadata.tensor_shape());
Tensor simple_tensor(dtypes_[i], shape);
TensorBuffer* buffer = DMAHelper::buffer(&simple_tensor);
iov[index].iov_base = buffer->data();
iov[index].iov_len = buffer->size();
simple_tensors->push_back(std::move(simple_tensor));
} else {
auto tensor_proto_str =
std::make_unique<char[]>(tensor_metadata.tensor_size_bytes());
iov[index].iov_base = tensor_proto_str.get();
iov[index].iov_len = tensor_metadata.tensor_size_bytes();
tensor_proto_strs->push_back(std::make_pair(
std::move(tensor_proto_str), tensor_metadata.tensor_size_bytes()));
}
total_size += iov[index].iov_len;
index++;
}
const int64_t size_int = size;
if (size_int != total_size) {
return errors::Internal("Uncompressed size mismatch. Snappy expects ", size,
" whereas the tensor metadata suggests ",
total_size);
}
if (!tsl::port::Snappy_UncompressToIOVec(compressed.data(), compressed.size(),
iov.data(), num_tensors)) {
return errors::Internal("Failed to perform snappy decompression.");
}
return absl::OkStatus();
}
Status CustomReader::ReadRecord(tstring* record) {
tstring header;
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(kHeaderSize, &header));
uint64 length = core::DecodeFixed64(header.data());
return input_stream_->ReadNBytes(length, record);
}
#if defined(TF_CORD_SUPPORT)
Status CustomReader::ReadRecord(absl::Cord* record) {
tstring header;
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(kHeaderSize, &header));
uint64 length = core::DecodeFixed64(header.data());
if (compression_type_ == io::compression::kNone) {
return input_stream_->ReadNBytes(length, record);
} else {
auto tmp_str = new tstring();
TF_RETURN_IF_ERROR(input_stream_->ReadNBytes(length, tmp_str));
absl::string_view tmp_str_view(*tmp_str);
record->Append(absl::MakeCordFromExternal(
tmp_str_view, [tmp_str](absl::string_view) { delete tmp_str; }));
return absl::OkStatus();
}
}
#endif
Status WriteMetadataFile(Env* env, const string& dir,
const experimental::SnapshotMetadataRecord* metadata) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir));
std::string tmp_filename =
absl::StrCat(metadata_filename, "-tmp-", random::New64());
TF_RETURN_IF_ERROR(WriteBinaryProto(env, tmp_filename, *metadata));
return env->RenameFile(tmp_filename, metadata_filename);
}
Status WriteMetadataFile(
Env* env, const string& dir,
const experimental::DistributedSnapshotMetadata* metadata) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir));
std::string tmp_filename =
absl::StrCat(metadata_filename, "-tmp-", random::New64());
TF_RETURN_IF_ERROR(WriteBinaryProto(env, tmp_filename, *metadata));
return env->RenameFile(tmp_filename, metadata_filename);
}
Status ReadMetadataFile(Env* env, const string& dir,
experimental::SnapshotMetadataRecord* metadata,
bool* file_exists) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
Status s = env->FileExists(metadata_filename);
*file_exists = s.ok();
if (*file_exists) {
return ReadBinaryProto(env, metadata_filename, metadata);
} else {
return absl::OkStatus();
}
}
Status ReadMetadataFile(Env* env, const string& dir,
experimental::DistributedSnapshotMetadata* metadata,
bool* file_exists) {
string metadata_filename = io::JoinPath(dir, kMetadataFilename);
Status s = env->FileExists(metadata_filename);
*file_exists = s.ok();
if (*file_exists) {
return ReadBinaryProto(env, metadata_filename, metadata);
} else {
return absl::OkStatus();
}
}
Status DumpDatasetGraph(Env* env, const std::string& path, uint64 hash,
const GraphDef* graph) {
std::string hash_hex =
strings::StrCat(strings::Hex(hash, strings::kZeroPad16));
std::string graph_file =
io::JoinPath(path, absl::StrCat(hash_hex, "-graph.pbtxt"));
LOG(INFO) << "Graph hash is " << hash_hex << ", writing to " << graph_file;
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(path));
return WriteTextProto(env, graph_file, *graph);
}
Status DetermineOpState(const std::string& mode_string, bool file_exists,
const experimental::SnapshotMetadataRecord* metadata,
const uint64 pending_snapshot_expiry_seconds,
Mode* mode) {
if (mode_string == kModeRead) {
if (!file_exists) {
return errors::NotFound("Metadata file does not exist.");
}
LOG(INFO) << "Overriding mode to reader.";
*mode = READER;
return absl::OkStatus();
}
if (mode_string == kModeWrite) {
LOG(INFO) << "Overriding mode to writer.";
*mode = WRITER;
return absl::OkStatus();
}
if (mode_string == kModePassthrough) {
LOG(INFO) << "Overriding mode to passthrough.";
*mode = PASSTHROUGH;
return absl::OkStatus();
}
if (!file_exists) {
*mode = WRITER;
return absl::OkStatus();
}
if (metadata->finalized()) {
*mode = READER;
return absl::OkStatus();
}
int64_t expiration_timer = static_cast<int64_t>(EnvTime::NowMicros()) -
pending_snapshot_expiry_seconds * 1000000;
if (metadata->creation_timestamp() >= expiration_timer) {
*mode = PASSTHROUGH;
return absl::OkStatus();
} else {
*mode = WRITER;
return absl::OkStatus();
}
}
AsyncWriter::AsyncWriter(Env* env, int64_t file_index,
const std::string& shard_directory,
uint64 checkpoint_id, const std::string& compression,
int64_t version, const DataTypeVector& output_types,
std::function<void(Status)> done) {
thread_ = absl::WrapUnique(env->StartThread(
ThreadOptions(), absl::StrCat("writer_thread_", file_index),
[this, env, shard_directory, checkpoint_id, compression, version,
&output_types, done = std::move(done)] {
done(WriterThread(env, shard_directory, checkpoint_id, compression,
version, output_types));
}));
}
void AsyncWriter::Write(const std::vector<Tensor>& tensors) {
mutex_lock l(mu_);
ElementOrEOF element;
element.value = tensors;
deque_.push_back(std::move(element));
}
void AsyncWriter::SignalEOF() {
mutex_lock l(mu_);
ElementOrEOF be;
be.end_of_sequence = true;
deque_.push_back(std::move(be));
}
void AsyncWriter::Consume(ElementOrEOF* be) {
mutex_lock l(mu_);
mu_.Await(tensorflow::Condition(this, &AsyncWriter::ElementAvailable));
*be = deque_.front();
deque_.pop_front();
}
bool AsyncWriter::ElementAvailable() { return !deque_.empty(); }
Status AsyncWriter::WriterThread(Env* env, const std::string& shard_directory,
uint64 checkpoint_id,
const std::string& compression,
int64_t version, DataTypeVector output_types) {
std::unique_ptr<snapshot_util::Writer> writer;
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(shard_directory));
TF_RETURN_IF_ERROR(snapshot_util::Writer::Create(
env, GetCheckpointFileName(shard_directory, checkpoint_id), compression,
version, std::move(output_types), &writer));
while (true) {
ElementOrEOF be;
Consume(&be);
if (be.end_of_sequence) {
TF_RETURN_IF_ERROR(writer->Close());
break;
}
TF_RETURN_IF_ERROR(writer->WriteTensors(be.value));
}
return absl::OkStatus();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SnapshotDatasetReader").Device(DEVICE_CPU),
Reader::DatasetOp);
REGISTER_KERNEL_BUILDER(Name("SnapshotNestedDatasetReader").Device(DEVICE_CPU),
Reader::NestedDatasetOp);
}
}
}
} | #include "tensorflow/core/data/snapshot_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/compression.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::LocalTempFilename;
void GenerateTensorVector(tensorflow::DataTypeVector& dtypes,
std::vector<Tensor>& tensors) {
std::string tensor_data(1024, 'a');
for (int i = 0; i < 10; ++i) {
Tensor t(tensor_data.data());
dtypes.push_back(t.dtype());
tensors.push_back(t);
}
}
void SnapshotRoundTrip(std::string compression_type, int version) {
std::vector<Tensor> tensors;
tensorflow::DataTypeVector dtypes;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (int i = 0; i < 100; ++i) {
TF_ASSERT_OK(writer->WriteTensors(tensors));
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (int i = 0; i < 100; ++i) {
std::vector<Tensor> read_tensors;
TF_ASSERT_OK(reader->ReadTensors(&read_tensors));
EXPECT_EQ(tensors.size(), read_tensors.size());
for (int j = 0; j < read_tensors.size(); ++j) {
TensorProto proto;
TensorProto read_proto;
tensors[j].AsProtoTensorContent(&proto);
read_tensors[j].AsProtoTensorContent(&read_proto);
std::string proto_serialized, read_proto_serialized;
proto.AppendToString(&proto_serialized);
read_proto.AppendToString(&read_proto_serialized);
EXPECT_EQ(proto_serialized, read_proto_serialized);
}
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
TEST(SnapshotUtilTest, CombinationRoundTripTest) {
SnapshotRoundTrip(io::compression::kNone, 1);
SnapshotRoundTrip(io::compression::kGzip, 1);
SnapshotRoundTrip(io::compression::kSnappy, 1);
SnapshotRoundTrip(io::compression::kNone, 2);
SnapshotRoundTrip(io::compression::kGzip, 2);
SnapshotRoundTrip(io::compression::kSnappy, 2);
}
TEST(SnapshotUtilTest, MetadataFileRoundTrip) {
experimental::DistributedSnapshotMetadata metadata_in;
metadata_in.set_compression(io::compression::kGzip);
std::string dir = LocalTempFilename();
TF_ASSERT_OK(WriteMetadataFile(Env::Default(), dir, &metadata_in));
experimental::DistributedSnapshotMetadata metadata_out;
bool file_exists;
TF_ASSERT_OK(
ReadMetadataFile(Env::Default(), dir, &metadata_out, &file_exists));
EXPECT_THAT(metadata_in, EqualsProto(metadata_out));
}
TEST(SnapshotUtilTest, MetadataFileDoesntExist) {
experimental::DistributedSnapshotMetadata metadata;
bool file_exists;
TF_ASSERT_OK(ReadMetadataFile(Env::Default(), LocalTempFilename(), &metadata,
&file_exists));
EXPECT_FALSE(file_exists);
}
void SnapshotReaderBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (auto s : state) {
std::vector<Tensor> read_tensors;
reader->ReadTensors(&read_tensors).IgnoreError();
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomReaderSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 2);
}
BENCHMARK(SnapshotCustomReaderNoneBenchmark);
BENCHMARK(SnapshotCustomReaderGzipBenchmark);
BENCHMARK(SnapshotCustomReaderSnappyBenchmark);
BENCHMARK(SnapshotTFRecordReaderNoneBenchmark);
BENCHMARK(SnapshotTFRecordReaderGzipBenchmark);
void SnapshotWriterBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
writer->Close().IgnoreError();
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 2);
}
void SnapshotTFRecordWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 2);
}
BENCHMARK(SnapshotCustomWriterNoneBenchmark);
BENCHMARK(SnapshotCustomWriterGzipBenchmark);
BENCHMARK(SnapshotCustomWriterSnappyBenchmark);
BENCHMARK(SnapshotTFRecordWriterNoneBenchmark);
BENCHMARK(SnapshotTFRecordWriterGzipBenchmark);
BENCHMARK(SnapshotTFRecordWriterSnappyBenchmark);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/snapshot_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/snapshot_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3207d09c-b623-41ab-a161-44bb10022704 | cpp | tensorflow/tensorflow | dispatcher_state | tensorflow/core/data/service/dispatcher_state.cc | tensorflow/core/data/service/dispatcher_state_test.cc | #include "tensorflow/core/data/service/dispatcher_state.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/journal.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
DispatcherState::DispatcherState()
: worker_index_resolver_(std::vector<std::string>{}) {}
DispatcherState::DispatcherState(
const experimental::DispatcherConfig& dispatcher_config)
: worker_index_resolver_(dispatcher_config.worker_addresses()) {}
Status DispatcherState::Apply(const Update& update) {
switch (update.update_type_case()) {
case Update::kRegisterDataset:
RegisterDataset(update.register_dataset());
break;
case Update::kRegisterWorker:
RegisterWorker(update.register_worker());
break;
case Update::kCreateJob:
CreateJob(update.create_job());
break;
case Update::kCreateIteration:
CreateIteration(update.create_iteration());
break;
case Update::kProduceSplit:
ProduceSplit(update.produce_split());
break;
case Update::kAcquireIterationClient:
AcquireIterationClient(update.acquire_iteration_client());
break;
case Update::kReleaseIterationClient:
ReleaseIterationClient(update.release_iteration_client());
break;
case Update::kGarbageCollectIteration:
GarbageCollectIteration(update.garbage_collect_iteration());
break;
case Update::kRemoveTask:
RemoveTask(update.remove_task());
break;
case Update::kCreatePendingTask:
CreatePendingTask(update.create_pending_task());
break;
case Update::kClientHeartbeat:
ClientHeartbeat(update.client_heartbeat());
break;
case Update::kCreateTask:
CreateTask(update.create_task());
break;
case Update::kFinishTask:
FinishTask(update.finish_task());
break;
case Update::kSnapshot:
Snapshot(update.snapshot());
break;
case Update::kCompressionDisabledAtRuntime:
CompressionDisabledAtRuntime(update.compression_disabled_at_runtime());
break;
case Update::UPDATE_TYPE_NOT_SET:
return errors::Internal("Update type not set.");
}
return absl::OkStatus();
}
void DispatcherState::RegisterDataset(
const RegisterDatasetUpdate& register_dataset) {
std::string dataset_id = register_dataset.dataset_id();
auto dataset =
std::make_shared<Dataset>(dataset_id, register_dataset.metadata());
DCHECK(!datasets_by_id_.contains(dataset_id));
datasets_by_id_[dataset_id] = dataset;
UpdateNextAvailableDatasetId();
}
void DispatcherState::RegisterWorker(
const RegisterWorkerUpdate& register_worker) {
std::string address = register_worker.worker_address();
DCHECK(!workers_.contains(address));
workers_[address] = std::make_shared<Worker>(register_worker);
tasks_by_worker_[address] =
absl::flat_hash_map<int64_t, std::shared_ptr<Task>>();
worker_index_resolver_.AddWorker(address);
}
void DispatcherState::CreateJob(const CreateJobUpdate& create_job) {
int64_t job_id = create_job.job_id();
std::string job_name = create_job.job_name();
std::optional<int64_t> num_consumers;
if (create_job.optional_num_consumers_case() ==
CreateJobUpdate::kNumConsumers) {
num_consumers = create_job.num_consumers();
}
auto job = std::make_shared<Job>(
job_id, create_job.dataset_id(), create_job.processing_mode_def(),
job_name, num_consumers, create_job.use_cross_trainer_cache(),
create_job.target_workers());
DCHECK(!jobs_by_id_.contains(job_id));
jobs_by_id_[job_id] = job;
DCHECK(!jobs_by_name_.contains(job_name));
jobs_by_name_[job_name] = job;
next_available_job_id_ = std::max(next_available_job_id_, job_id + 1);
}
Status DispatcherState::JobFromId(int64_t job_id,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_id_.find(job_id);
if (it == jobs_by_id_.end()) {
return errors::NotFound("Job with id ", job_id, " not found");
}
job = it->second;
return absl::OkStatus();
}
Status DispatcherState::JobByName(const std::string& job_name,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_name_.find(job_name);
if (it == jobs_by_name_.end()) {
return errors::NotFound("Job with name ", job_name, " not found");
}
job = it->second;
return absl::OkStatus();
}
void DispatcherState::CreateIteration(
const CreateIterationUpdate& create_iteration) {
int64_t iteration_id = create_iteration.iteration_id();
int64_t job_id = create_iteration.job_id();
DCHECK(jobs_by_id_.contains(job_id));
auto& job = jobs_by_id_[job_id];
DCHECK(job);
IterationKey iteration_key(job->job_name, create_iteration.repetition());
auto iteration = std::make_shared<Iteration>(
iteration_id, iteration_key, create_iteration.num_split_providers(), job);
DCHECK(!iterations_.contains(iteration_id));
iterations_[iteration_id] = iteration;
tasks_by_iteration_[iteration_id] = std::vector<std::shared_ptr<Task>>();
DCHECK(!iterations_by_key_.contains(iteration_key) ||
iterations_by_key_[iteration_key]->garbage_collected);
iterations_by_key_[iteration_key] = iteration;
next_available_iteration_id_ =
std::max(next_available_iteration_id_, iteration_id + 1);
}
void DispatcherState::ProduceSplit(const ProduceSplitUpdate& produce_split) {
std::shared_ptr<Iteration> iteration =
iterations_[produce_split.iteration_id()];
DCHECK(iteration->distributed_epoch_state.has_value());
DistributedEpochState& state = iteration->distributed_epoch_state.value();
int64_t provider_index = produce_split.split_provider_index();
DCHECK_GE(produce_split.repetition(), state.repetitions[provider_index]);
state.repetitions[provider_index] = produce_split.repetition();
if (produce_split.finished()) {
state.repetitions[provider_index]++;
state.indices[provider_index] = 0;
return;
}
state.indices[provider_index]++;
}
void DispatcherState::AcquireIterationClient(
const AcquireIterationClientUpdate& acquire_iteration_client) {
int64_t iteration_client_id = acquire_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration);
iteration = iterations_[acquire_iteration_client.iteration_id()];
DCHECK(iteration);
iteration->num_clients++;
next_available_iteration_client_id_ =
std::max(next_available_iteration_client_id_, iteration_client_id + 1);
}
void DispatcherState::ReleaseIterationClient(
const ReleaseIterationClientUpdate& release_iteration_client) {
int64_t iteration_client_id = release_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(iteration);
iteration->num_clients--;
DCHECK_GE(iteration->num_clients, 0);
iteration->last_client_released_micros =
release_iteration_client.time_micros();
iterations_for_client_ids_.erase(iteration_client_id);
}
void DispatcherState::GarbageCollectIteration(
const GarbageCollectIterationUpdate& garbage_collect_iteration) {
int64_t iteration_id = garbage_collect_iteration.iteration_id();
for (auto& task : tasks_by_iteration_[iteration_id]) {
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
}
iterations_[iteration_id]->finished = true;
iterations_[iteration_id]->garbage_collected = true;
}
void DispatcherState::RemoveTask(const RemoveTaskUpdate& remove_task) {
std::shared_ptr<Task>& task = tasks_[remove_task.task_id()];
DCHECK(task);
task->removed = true;
auto& tasks_for_iteration =
tasks_by_iteration_[task->iteration->iteration_id];
for (auto it = tasks_for_iteration.begin(); it != tasks_for_iteration.end();
++it) {
if ((*it)->task_id == task->task_id) {
tasks_for_iteration.erase(it);
break;
}
}
tasks_by_worker_[task->worker_address].erase(task->task_id);
tasks_.erase(task->task_id);
VLOG(1) << "Removed task " << remove_task.task_id() << " from worker "
<< task->worker_address;
}
void DispatcherState::CreatePendingTask(
const CreatePendingTaskUpdate& create_pending_task) {
int64_t task_id = create_pending_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_pending_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_pending_task, iteration);
iteration->pending_tasks.emplace(task, create_pending_task.starting_round());
tasks_by_worker_[create_pending_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::ClientHeartbeat(
const ClientHeartbeatUpdate& client_heartbeat) {
int64_t iteration_client_id = client_heartbeat.iteration_client_id();
auto& iteration = iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration->pending_tasks.empty());
auto& task = iteration->pending_tasks.front();
if (client_heartbeat.has_task_rejected()) {
task.failures++;
task.ready_consumers.clear();
task.target_round = client_heartbeat.task_rejected().new_target_round();
}
if (client_heartbeat.task_accepted()) {
task.ready_consumers.insert(iteration_client_id);
if (task.ready_consumers.size() == iteration->job->num_consumers.value()) {
VLOG(1) << "Promoting task " << task.task->task_id
<< " from pending to active";
task.task->starting_round = task.target_round;
tasks_by_iteration_[iteration->iteration_id].push_back(task.task);
iteration->pending_tasks.pop();
}
}
}
void DispatcherState::CreateTask(const CreateTaskUpdate& create_task) {
int64_t task_id = create_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_task, iteration);
tasks_by_iteration_[create_task.iteration_id()].push_back(task);
tasks_by_worker_[create_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::FinishTask(const FinishTaskUpdate& finish_task) {
VLOG(2) << "Marking task " << finish_task.task_id() << " as finished";
int64_t task_id = finish_task.task_id();
auto& task = tasks_[task_id];
DCHECK(task != nullptr);
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
bool all_finished = true;
for (const auto& task_for_iteration :
tasks_by_iteration_[task->iteration->iteration_id]) {
if (!task_for_iteration->finished) {
all_finished = false;
}
}
VLOG(3) << "Iteration " << task->iteration->iteration_id
<< " finished: " << all_finished;
iterations_[task->iteration->iteration_id]->finished = all_finished;
}
std::string DispatcherState::NextAvailableDatasetId() const {
return absl::StrCat(next_available_dataset_id_);
}
void DispatcherState::UpdateNextAvailableDatasetId() {
while (datasets_by_id_.contains(absl::StrCat(next_available_dataset_id_))) {
++next_available_dataset_id_;
}
}
Status DispatcherState::DatasetFromId(
const std::string& id, std::shared_ptr<const Dataset>& dataset) const {
auto it = datasets_by_id_.find(id);
if (it == datasets_by_id_.end()) {
return errors::NotFound("Dataset id ", id, " not found");
}
dataset = it->second;
return absl::OkStatus();
}
Status DispatcherState::WorkerFromAddress(
const std::string& address, std::shared_ptr<const Worker>& worker) const {
auto it = workers_.find(address);
if (it == workers_.end()) {
return errors::NotFound("Worker with address ", address, " not found.");
}
worker = it->second;
return absl::OkStatus();
}
std::vector<std::shared_ptr<const DispatcherState::Worker>>
DispatcherState::ListWorkers() const {
std::vector<std::shared_ptr<const Worker>> workers;
workers.reserve(workers_.size());
for (const auto& it : workers_) {
workers.push_back(it.second);
}
return workers;
}
std::vector<std::shared_ptr<const DispatcherState::Iteration>>
DispatcherState::ListIterations() const {
std::vector<std::shared_ptr<const DispatcherState::Iteration>> iterations;
iterations.reserve(iterations_.size());
for (const auto& it : iterations_) {
iterations.push_back(it.second);
}
return iterations;
}
Status DispatcherState::IterationFromId(
int64_t id, std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_.find(id);
if (it == iterations_.end()) {
return errors::NotFound("Iteration id ", id, " not found");
}
iteration = it->second;
return absl::OkStatus();
}
Status DispatcherState::IterationByKey(
IterationKey iteration_key,
std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_by_key_.find(iteration_key);
if (it == iterations_by_key_.end()) {
return errors::NotFound("Iteration key ", iteration_key.DebugString(),
" not found");
}
iteration = it->second;
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableJobId() const {
return next_available_job_id_;
}
int64_t DispatcherState::NextAvailableIterationId() const {
return next_available_iteration_id_;
}
Status DispatcherState::IterationForIterationClientId(
int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration) {
iteration = iterations_for_client_ids_[iteration_client_id];
if (!iteration) {
return errors::NotFound("Iteration client id not found: ",
iteration_client_id);
}
return absl::OkStatus();
}
std::vector<int64_t> DispatcherState::ListActiveClientIds() {
std::vector<int64_t> ids;
for (const auto& it : iterations_for_client_ids_) {
if (it.second && !it.second->finished) {
ids.push_back(it.first);
}
}
return ids;
}
int64_t DispatcherState::NextAvailableIterationClientId() const {
return next_available_iteration_client_id_;
}
Status DispatcherState::TaskFromId(int64_t id,
std::shared_ptr<const Task>& task) const {
auto it = tasks_.find(id);
if (it == tasks_.end()) {
return errors::NotFound("Task ", id, " not found");
}
task = it->second;
return absl::OkStatus();
}
Status DispatcherState::TasksForIteration(
int64_t iteration_id,
std::vector<std::shared_ptr<const Task>>& tasks) const {
auto it = tasks_by_iteration_.find(iteration_id);
if (it == tasks_by_iteration_.end()) {
return errors::NotFound("Iteration ", iteration_id, " not found");
}
tasks.clear();
tasks.reserve(it->second.size());
for (const auto& task : it->second) {
tasks.push_back(task);
}
return absl::OkStatus();
}
Status DispatcherState::TasksForWorker(
absl::string_view worker_address,
std::vector<std::shared_ptr<const Task>>& tasks) const {
tasks.clear();
auto it = tasks_by_worker_.find(worker_address);
if (it == tasks_by_worker_.end()) {
return errors::NotFound("Worker ", worker_address, " not found");
}
const absl::flat_hash_map<int64_t, std::shared_ptr<Task>>& worker_tasks =
it->second;
tasks.reserve(worker_tasks.size());
for (const auto& task : worker_tasks) {
tasks.push_back(task.second);
}
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableTaskId() const {
return next_available_task_id_;
}
Status DispatcherState::ValidateWorker(absl::string_view worker_address) const {
return worker_index_resolver_.ValidateWorker(worker_address);
}
absl::StatusOr<int64_t> DispatcherState::GetWorkerIndex(
absl::string_view worker_address) const {
return worker_index_resolver_.GetWorkerIndex(worker_address);
}
void DispatcherState::Snapshot(const SnapshotUpdate& snapshot) {
snapshot_paths_.insert(snapshot.path());
}
void DispatcherState::CompressionDisabledAtRuntime(
const CompressionDisabledAtRuntimeUpdate& compression_disabled_at_runtime) {
compression_disabled_at_runtime_.insert({
compression_disabled_at_runtime.dataset_id(),
compression_disabled_at_runtime.compression_disabled(),
});
}
std::optional<bool> DispatcherState::CompressionDisabledAtRuntime(
const std::string& dataset_id) const {
if (auto it = compression_disabled_at_runtime_.find(dataset_id);
it != compression_disabled_at_runtime_.end()) {
return it->second;
}
return std::nullopt;
}
}
} | #include "tensorflow/core/data/service/dispatcher_state.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using Dataset = DispatcherState::Dataset;
using Worker = DispatcherState::Worker;
using IterationKey = DispatcherState::IterationKey;
using Job = DispatcherState::Job;
using Iteration = DispatcherState::Iteration;
using Task = DispatcherState::Task;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::StatusIs;
Status RegisterDataset(const std::string& dataset_id, DispatcherState& state) {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
return state.Apply(update);
}
Status RegisterWorker(std::string worker_address, DispatcherState& state) {
Update update;
update.mutable_register_worker()->set_worker_address(worker_address);
return state.Apply(update);
}
Status CreateJob(int64_t job_id, const std::string& dataset_id,
const std::string& job_name, DispatcherState& state) {
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_job_name(job_name);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
const IterationKey& named_iteration_key,
DispatcherState& state) {
int64_t job_id = state.NextAvailableJobId();
TF_RETURN_IF_ERROR(
CreateJob(job_id, dataset_id, named_iteration_key.name, state));
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
create_iteration->set_repetition(named_iteration_key.repetition);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
DispatcherState& state) {
IterationKey key(absl::StrCat(random::New64()), 0);
return CreateIteration(iteration_id, dataset_id, key, state);
}
Status AcquireIterationClientId(int64_t iteration_id,
int64_t iteration_client_id,
DispatcherState& state) {
Update update;
AcquireIterationClientUpdate* acquire_iteration_client =
update.mutable_acquire_iteration_client();
acquire_iteration_client->set_iteration_id(iteration_id);
acquire_iteration_client->set_iteration_client_id(iteration_client_id);
return state.Apply(update);
}
Status ReleaseIterationClientId(int64_t iteration_client_id,
int64_t release_time, DispatcherState& state) {
Update update;
ReleaseIterationClientUpdate* release_iteration_client =
update.mutable_release_iteration_client();
release_iteration_client->set_iteration_client_id(iteration_client_id);
release_iteration_client->set_time_micros(release_time);
return state.Apply(update);
}
Status CreateTask(int64_t task_id, int64_t iteration_id,
const std::string& worker_address, DispatcherState& state) {
Update update;
CreateTaskUpdate* create_task = update.mutable_create_task();
create_task->set_task_id(task_id);
create_task->set_iteration_id(iteration_id);
create_task->set_worker_address(worker_address);
return state.Apply(update);
}
Status FinishTask(int64_t task_id, DispatcherState& state) {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(task_id);
return state.Apply(update);
}
Status Snapshot(const std::string& path, DispatcherState& state) {
Update update;
SnapshotUpdate* snapshot = update.mutable_snapshot();
snapshot->set_path(path);
return state.Apply(update);
}
}
TEST(DispatcherState, RegisterDataset) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_TRUE(dataset->metadata.element_spec().empty());
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_UNSPECIFIED);
}
TEST(DispatcherState, RegisterDatasetWithExplicitID) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id");
}
TEST(DispatcherState, RegisterDatasetsWithDifferentIDs) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id1", state));
TF_EXPECT_OK(RegisterDataset("dataset_id2", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id1", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id1");
TF_EXPECT_OK(state.DatasetFromId("dataset_id2", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id2");
}
TEST(DispatcherState, RegisterDatasetCompression) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_compression(
DataServiceMetadata::COMPRESSION_SNAPPY);
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_SNAPPY);
}
}
TEST(DispatcherState, RegisterDatasetElementSpec) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_element_spec(
"encoded_element_spec");
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.element_spec(), "encoded_element_spec");
}
}
TEST(DispatcherState, MissingDatasetId) {
DispatcherState state;
std::shared_ptr<const Dataset> dataset;
Status s = state.DatasetFromId("missing_dataset_id", dataset);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, NextAvailableDatasetId) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_NE(state.NextAvailableDatasetId(), dataset_id);
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
EXPECT_EQ(state.NextAvailableDatasetId(), state.NextAvailableDatasetId());
}
TEST(DispatcherState, RegisterWorker) {
DispatcherState state;
std::string address = "test_worker_address";
TF_EXPECT_OK(RegisterWorker(address, state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress(address, worker));
EXPECT_EQ(worker->address, address);
}
TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000"));
TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker));
EXPECT_EQ(worker->address, "/worker/task/0:20000");
}
TEST(DispatcherState, RegisterInvalidWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
EXPECT_THAT(state.ValidateWorker("localhost:20000"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
TF_EXPECT_OK(RegisterWorker("localhost:20000", state));
std::shared_ptr<const Worker> worker;
EXPECT_THAT(state.WorkerFromAddress("/worker/task/0:20000", worker),
StatusIs(error::NOT_FOUND,
"Worker with address /worker/task/0:20000 not found."));
}
TEST(DispatcherState, ListWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, IsEmpty());
}
TF_EXPECT_OK(RegisterWorker(address_1, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(1));
}
TF_EXPECT_OK(RegisterWorker(address_2, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(2));
}
}
TEST(DispatcherState, MissingWorker) {
DispatcherState state;
std::shared_ptr<const Worker> worker;
Status s = state.WorkerFromAddress("test_worker_address", worker);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, UnknownUpdate) {
DispatcherState state;
Update update;
Status s = state.Apply(update);
EXPECT_EQ(s.code(), error::INTERNAL);
}
TEST(DispatcherState, JobName) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
std::string job_name = "test_name";
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state));
std::shared_ptr<const Job> job;
TF_EXPECT_OK(state.JobByName(job_name, job));
EXPECT_EQ(state.NextAvailableJobId(), job_id + 1);
EXPECT_EQ(job->dataset_id, dataset_id);
EXPECT_FALSE(job->use_cross_trainer_cache);
}
TEST(DispatcherState, JobData) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
int64_t num_consumers = 8;
bool use_cross_trainer_cache = true;
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_num_consumers(num_consumers);
create_job->set_use_cross_trainer_cache(use_cross_trainer_cache);
TF_ASSERT_OK(state.Apply(update));
std::shared_ptr<const Job> job;
TF_ASSERT_OK(state.JobFromId(job_id, job));
EXPECT_EQ(job->num_consumers, num_consumers);
EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache);
}
TEST(DispatcherState, CrossTrainerCacheTask) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
std::string worker_address = "test_worker_address";
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
int64_t job_id = state.NextAvailableJobId();
Update job_update;
CreateJobUpdate* create_job = job_update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_use_cross_trainer_cache(true);
TF_ASSERT_OK(state.Apply(job_update));
int64_t iteration_id = state.NextAvailableIterationId();
Update iteration_update;
CreateIterationUpdate* create_iteration =
iteration_update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
TF_ASSERT_OK(state.Apply(iteration_update));
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_TRUE(task->iteration->job->use_cross_trainer_cache);
}
TEST(DispatcherState, CreateTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
std::string worker_address = "test_worker_address";
DispatcherState state;
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
EXPECT_EQ(state.NextAvailableTaskId(), task_id + 1);
{
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_FALSE(task->iteration->job->use_cross_trainer_cache);
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForSameIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(2));
}
}
TEST(DispatcherState, CreateTasksForDifferentIterations) {
std::string dataset_id = "dataset_id";
int64_t iteration_id_1 = 3;
int64_t iteration_id_2 = 4;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_1, dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_2, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id_1, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id_2, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_1, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_2, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
}
TEST(DispatcherState, CreateTasksForSameWorker) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(2, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForDifferentWorkers) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address_1 = "test_worker_address_1";
std::string worker_address_2 = "test_worker_address_2";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address_1, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address_2, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_1, tasks));
EXPECT_EQ(1, tasks.size());
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_2, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, GetTasksForWorkerEmpty) {
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterWorker(worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(0, tasks.size());
}
}
TEST(DispatcherState, FinishTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id = 4;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_TRUE(task->finished);
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
TEST(DispatcherState, FinishMultiTaskIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 4;
int64_t task_id_2 = 5;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_FALSE(iteration->finished);
}
TF_EXPECT_OK(FinishTask(task_id_2, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
}
TEST(DispatcherState, AcquireIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 1;
int64_t iteration_client_id_2 = 2;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 1);
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
EXPECT_EQ(iteration->num_clients, 2);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_1, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_2, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
}
TEST(DispatcherState, ReleaseIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 0);
Status s =
state.IterationForIterationClientId(iteration_client_id, iteration);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, ListActiveClientsEmpty) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
}
TEST(DispatcherState, ListActiveClients) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 6;
int64_t iteration_client_id_2 = 7;
int64_t iteration_client_id_3 = 8;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id_2, release_time, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_3, state));
EXPECT_THAT(state.ListActiveClientIds(), UnorderedElementsAre(6, 8));
}
TEST(DispatcherState, ListSnapshotPaths) {
DispatcherState state;
absl::flat_hash_set<std::string> snapshot_paths = {"p1", "p2"};
for (const auto& snapshot_path : snapshot_paths) {
TF_EXPECT_OK(Snapshot(snapshot_path, state));
}
EXPECT_EQ(state.ListSnapshotPaths(), snapshot_paths);
}
TEST(DispatcherState, GetNumberOfRegisteredWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 0);
TF_EXPECT_OK(RegisterWorker(address_1, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 1);
TF_EXPECT_OK(RegisterWorker(address_2, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
476923f3-b0e5-4116-ab25-59dc056d648d | cpp | tensorflow/tensorflow | grpc_dispatcher_impl | tensorflow/core/data/service/grpc_dispatcher_impl.cc | tensorflow/core/data/service/grpc_dispatcher_impl_test.cc | #include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
GrpcDispatcherImpl::GrpcDispatcherImpl(
const experimental::DispatcherConfig& config, ServerBuilder& server_builder)
: impl_(config) {
server_builder.RegisterService(this);
VLOG(1) << "Registered data service dispatcher";
}
Status GrpcDispatcherImpl::Start() { return impl_.Start(); }
void GrpcDispatcherImpl::Stop() { impl_.Stop(); }
size_t GrpcDispatcherImpl::NumActiveIterations() {
return impl_.NumActiveIterations();
}
DispatcherStateExport GrpcDispatcherImpl::ExportState() const {
return impl_.ExportState();
}
#define HANDLER(method) \
grpc::Status GrpcDispatcherImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_.method(request, response)); \
}
HANDLER(WorkerHeartbeat);
HANDLER(WorkerUpdate);
HANDLER(GetDatasetDef);
HANDLER(GetSplit);
HANDLER(GetVersion);
HANDLER(GetOrRegisterDataset);
HANDLER(ReleaseIterationClient);
HANDLER(MaybeRemoveTask);
HANDLER(GetOrCreateJob);
HANDLER(GetOrCreateIteration);
HANDLER(ClientHeartbeat);
HANDLER(GetWorkers);
HANDLER(GetDataServiceMetadata);
HANDLER(GetDataServiceConfig);
HANDLER(Snapshot);
HANDLER(GetSnapshotSplit);
HANDLER(GetSnapshotStreams);
HANDLER(DisableCompressionAtRuntime);
#undef HANDLER
}
} | #include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::grpc::Channel;
using ::grpc::ChannelArguments;
using ::grpc::ChannelCredentials;
using ::grpc::ClientContext;
constexpr const char kHostAddress[] = "localhost";
constexpr const char kProtocol[] = "grpc";
class GrpcDispatcherImplTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(SetUpDispatcherServer());
TF_ASSERT_OK(SetUpDispatcherClientStub());
}
Status SetUpDispatcherServer() {
experimental::DispatcherConfig config;
config.set_protocol(kProtocol);
TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));
return dispatcher_server_->Start();
}
Status SetUpDispatcherClientStub() {
std::shared_ptr<ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));
ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
std::shared_ptr<Channel> channel =
::grpc::CreateCustomChannel(GetDispatcherAddress(), credentials, args);
dispatcher_client_stub_ = DispatcherService::NewStub(channel);
return absl::OkStatus();
}
std::string GetDispatcherAddress() const {
return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort());
}
std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_;
std::unique_ptr<DispatcherService::Stub> dispatcher_client_stub_;
};
TEST_F(GrpcDispatcherImplTest, GrpcTest) {
ClientContext ctx;
GetVersionRequest req;
GetVersionResponse resp;
TF_ASSERT_OK(
FromGrpcStatus(dispatcher_client_stub_->GetVersion(&ctx, req, &resp)));
EXPECT_EQ(resp.version(), kDataServiceVersion);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
785e640c-83bd-4859-b375-a19a3da0f900 | cpp | tensorflow/tensorflow | dataset_store | tensorflow/core/data/service/dataset_store.cc | tensorflow/core/data/service/dataset_store_test.cc | #include "tensorflow/core/data/service/dataset_store.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/utils.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
namespace tensorflow {
namespace data {
FileSystemDatasetStore::FileSystemDatasetStore(const std::string& datasets_dir)
: datasets_dir_(datasets_dir) {}
Status FileSystemDatasetStore::Put(const std::string& key,
const DatasetDef& dataset) {
std::string path_to_write = io::JoinPath(datasets_dir_, key);
TF_RETURN_IF_ERROR(WriteDatasetDef(path_to_write, dataset));
return absl::OkStatus();
}
Status FileSystemDatasetStore::Get(
const std::string& key, std::shared_ptr<const DatasetDef>& dataset_def) {
std::string path = io::JoinPath(datasets_dir_, key);
TF_RETURN_IF_ERROR(Env::Default()->FileExists(path));
DatasetDef def;
TF_RETURN_IF_ERROR(ReadDatasetDef(path, def));
dataset_def = std::make_shared<const DatasetDef>(def);
return absl::OkStatus();
}
Status MemoryDatasetStore::Put(const std::string& key,
const DatasetDef& dataset) {
auto& stored_dataset = datasets_[key];
stored_dataset = std::make_shared<const DatasetDef>(dataset);
return absl::OkStatus();
}
Status MemoryDatasetStore::Get(const std::string& key,
std::shared_ptr<const DatasetDef>& dataset_def) {
auto& stored_dataset = datasets_[key];
if (!stored_dataset) {
return errors::NotFound("Dataset with key ", key, " not found");
}
dataset_def = stored_dataset;
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/dataset_store.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
const char kFileSystem[] = "file_system";
const char kMemory[] = "memory";
std::string NewDatasetsDir() {
std::string dir = io::JoinPath(testing::TmpDir(), "datasets");
if (Env::Default()->FileExists(dir).ok()) {
int64_t undeleted_files;
int64_t undeleted_dirs;
CHECK(Env::Default()
->DeleteRecursively(dir, &undeleted_files, &undeleted_dirs)
.ok());
}
CHECK(Env::Default()->RecursivelyCreateDir(dir).ok());
return dir;
}
std::unique_ptr<DatasetStore> MakeStore(const std::string& type) {
if (type == kFileSystem) {
return std::make_unique<FileSystemDatasetStore>(NewDatasetsDir());
} else if (type == kMemory) {
return std::make_unique<MemoryDatasetStore>();
} else {
CHECK(false) << "unexpected type: " << type;
}
}
DatasetDef DatasetDefWithVersion(int32_t version) {
DatasetDef def;
def.mutable_graph()->set_version(version);
return def;
}
}
class DatasetStoreTest : public ::testing::Test,
public ::testing::WithParamInterface<std::string> {};
TEST_P(DatasetStoreTest, StoreAndGet) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
std::string key = "key";
DatasetDef dataset_def = DatasetDefWithVersion(1);
TF_ASSERT_OK(store->Put(key, dataset_def));
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(key, result));
EXPECT_EQ(result->graph().version(), dataset_def.graph().version());
}
TEST_P(DatasetStoreTest, StoreAndGetMultiple) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
int64_t num_datasets = 10;
std::vector<std::string> keys;
for (int i = 0; i < num_datasets; ++i) {
std::string key = absl::StrCat("key", i);
DatasetDef dataset_def = DatasetDefWithVersion(i);
TF_ASSERT_OK(store->Put(key, dataset_def));
keys.push_back(key);
}
for (int i = 0; i < num_datasets; ++i) {
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(keys[i], result));
EXPECT_EQ(result->graph().version(), i);
}
}
TEST_P(DatasetStoreTest, StoreAlreadyExists) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
int32_t version = 1;
DatasetDef dataset_def = DatasetDefWithVersion(version);
std::string key = "key";
TF_ASSERT_OK(store->Put(key, dataset_def));
TF_EXPECT_OK(store->Put(key, dataset_def));
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(key, result));
EXPECT_EQ(result->graph().version(), version);
}
TEST_P(DatasetStoreTest, GetMissing) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
std::shared_ptr<const DatasetDef> result;
Status s = store->Get("missing", result);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
INSTANTIATE_TEST_SUITE_P(DatasetStoreTests, DatasetStoreTest,
::testing::Values(kFileSystem, kMemory));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dataset_store.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dataset_store_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ce618e4-f93e-4209-bb33-5fcea4ce081e | cpp | tensorflow/tensorflow | worker_client | tensorflow/core/data/service/worker_client.cc | tensorflow/core/data/service/worker_client_test.cc | #include "tensorflow/core/data/service/worker_client.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/host_info.h"
namespace tensorflow {
namespace data {
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
CreateDataServiceWorkerClient(
const std::string& dispatcher_protocol, const DataTransferServerInfo& info,
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator) {
auto client = std::make_unique<DataServiceWorkerClient>(
info.address(), dispatcher_protocol, info.protocol(),
accelerator_device_info, allocator);
TF_RETURN_IF_ERROR(client->Initialize());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
client->CheckCompatibility(info.compatibility_info()),
"for data transfer protocol '", client->GetDataTransferProtocol(),
"', the compatibility check between the trainer worker and the ",
"tf.data service worker at ", info.address(), "failed");
return client;
}
Status DataServiceWorkerClient::GetElement(const GetElementRequest& req,
GetElementResult& result) {
TF_RETURN_IF_ERROR(EnsureInitialized());
return client_->GetElement(req, result);
}
Status DataServiceWorkerClient::EnsureInitialized() {
mutex_lock l(mu_);
if (client_) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DataTransferClient::Build(
GetDataTransferProtocol(),
{protocol_, address_, accelerator_device_info_, allocator_}, &client_));
return absl::OkStatus();
}
std::string DataServiceWorkerClient::GetDataTransferProtocol() const {
if (ForceLocalProtocol(address_)) {
return kLocalTransferProtocol;
}
return transfer_protocol_;
}
void DataServiceWorkerClient::TryCancel() { client_->TryCancel(); }
class GrpcDataTransferClient : public DataTransferClient {
public:
GrpcDataTransferClient(std::shared_ptr<grpc::ChannelCredentials> credentials,
std::string address, Allocator* allocator)
: allocator_(allocator) {
VLOG(2) << "Create GrpcDataTransferClient for worker " << address << ".";
grpc::ChannelArguments args;
args.SetMaxReceiveMessageSize(-1);
auto channel = grpc::CreateCustomChannel(address, credentials, args);
stub_ = WorkerService::NewStub(channel);
}
Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
VLOG(3) << "GetElement for task " << req.task_id() << " from gRPC worker "
<< "server.";
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("Client was cancelled.");
}
}
grpc::ClientContext ctx;
gtl::Cleanup<std::function<void()>> cleanup;
{
mutex_lock l(mu_);
active_contexts_.insert(&ctx);
cleanup = gtl::MakeCleanup([this, &ctx] {
mutex_lock l(mu_);
active_contexts_.erase(&ctx);
});
}
GetElementResponse resp;
int64_t start_time_us = env_->NowMicros();
grpc::Status s = stub_->GetElement(&ctx, req, &resp);
int64_t end_time_us = env_->NowMicros();
if (!s.ok()) {
return grpc_util::WrapError("Failed to get element", s);
}
metrics::RecordTFDataServiceGetElementDuration(kGrpcTransferProtocol,
end_time_us - start_time_us);
result.end_of_sequence = resp.end_of_sequence();
result.skip = resp.skip_task();
switch (resp.element_case()) {
case GetElementResponse::kCompressed: {
Tensor tensor(DT_VARIANT, TensorShape{});
tensor.scalar<Variant>()() = std::move(resp.compressed());
result.components.push_back(tensor);
break;
}
case GetElementResponse::kUncompressed:
for (const auto& component : resp.uncompressed().components()) {
result.components.emplace_back();
bool success =
allocator_ != nullptr
? result.components.back().FromProto(allocator_, component)
: result.components.back().FromProto(component);
if (!success) {
return errors::Internal("Failed to parse tensor.");
}
}
break;
case GetElementResponse::ELEMENT_NOT_SET:
break;
}
return absl::OkStatus();
}
void TryCancel() override {
VLOG(2) << "Cancel GrpcDataTransferClient.";
mutex_lock l(mu_);
cancelled_ = true;
for (const auto& ctx : active_contexts_) {
ctx->TryCancel();
}
}
private:
Allocator* const allocator_;
mutex mu_;
std::unique_ptr<WorkerService::Stub> stub_;
absl::flat_hash_set<::grpc::ClientContext*> active_contexts_
TF_GUARDED_BY(mu_);
bool cancelled_ TF_GUARDED_BY(mu_) = false;
};
class GrpcTransferClientRegistrar {
public:
GrpcTransferClientRegistrar() {
DataTransferClient::Register(
kGrpcTransferProtocol, [](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* out) {
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(CredentialsFactory::CreateClientCredentials(
config.protocol, &credentials));
*out = std::make_unique<GrpcDataTransferClient>(
credentials, config.address, config.allocator);
return absl::OkStatus();
});
}
};
static GrpcTransferClientRegistrar gprc_client_registrar;
class LocalDataTransferClient : public DataTransferClient {
public:
explicit LocalDataTransferClient(absl::string_view worker_address)
: worker_address_(worker_address) {
VLOG(2) << "Create LocalDataTransferClient for worker " << worker_address_
<< ".";
}
Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
VLOG(3) << "GetElement for task " << req.task_id() << " from local worker.";
TF_RETURN_IF_ERROR(VerifyClientIsNotCancelled());
TF_ASSIGN_OR_RETURN(std::shared_ptr<DataServiceWorkerImpl> worker,
GetWorker(req));
int64_t start_time_us = env_->NowMicros();
Status s = worker->GetElementResult(&req, &result);
int64_t end_time_us = env_->NowMicros();
TF_RETURN_IF_ERROR(s);
metrics::RecordTFDataServiceGetElementDuration(kLocalTransferProtocol,
end_time_us - start_time_us);
return s;
}
void TryCancel() override {
VLOG(2) << "Cancel LocalDataTransferClient for worker " << worker_address_
<< ".";
mutex_lock l(mu_);
cancelled_ = true;
}
private:
Status VerifyClientIsNotCancelled() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled(absl::Substitute(
"Client for worker $0 has been cancelled.", worker_address_));
}
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<DataServiceWorkerImpl>> GetWorker(
const GetElementRequest& req) const {
std::shared_ptr<DataServiceWorkerImpl> worker =
LocalWorkers::Get(worker_address_);
if (!worker) {
return errors::Cancelled(absl::Substitute(
"Local worker at address $0 is no longer available; cancel request "
"for task $1.",
worker_address_, req.task_id()));
}
return worker;
}
const std::string worker_address_;
mutex mu_;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
};
class LocalTransferClientRegistrar {
public:
LocalTransferClientRegistrar() {
DataTransferClient::Register(
kLocalTransferProtocol, [](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* out) {
*out = std::make_unique<LocalDataTransferClient>(config.address);
return absl::OkStatus();
});
}
};
static LocalTransferClientRegistrar local_client_registrar;
bool ForceLocalProtocol(const std::string& worker_address) {
if (tsl::port::JobUid() == -1) {
return false;
}
return LocalWorkers::Get(worker_address) != nullptr;
}
}
} | #include "tensorflow/core/data/service/worker_client.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::RangeSquareDataset;
using ::tensorflow::testing::StatusIs;
using ::testing::MatchesRegex;
constexpr const char kProtocol[] = "grpc";
constexpr const char kAltTransferProtocol[] = "alt";
class WorkerClientTest : public ::testing::TestWithParam<std::string> {
protected:
void SetUp() override { InitializeTestCluster(); }
void InitializeTestCluster(
std::optional<std::string> data_transfer_protocol = std::nullopt) {
test_cluster_ = std::make_unique<TestCluster>(1,
data_transfer_protocol);
TF_ASSERT_OK(test_cluster_->Initialize());
dispatcher_client_ = std::make_unique<DataServiceDispatcherClient>(
test_cluster_->DispatcherAddress(), kProtocol);
}
absl::StatusOr<std::string> RegisterDataset(const int64_t range) {
const auto dataset_def = RangeSquareDataset(range);
std::string dataset_id;
TF_RETURN_IF_ERROR(dispatcher_client_->RegisterDataset(
dataset_def, DataServiceMetadata(),
std::nullopt, dataset_id));
return dataset_id;
}
absl::StatusOr<int64_t> CreateIteration(const std::string& dataset_id) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
int64_t job_id = 0;
TF_RETURN_IF_ERROR(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, std::nullopt,
std::nullopt, false,
TARGET_WORKERS_AUTO, job_id));
int64_t iteration_client_id = 0;
TF_RETURN_IF_ERROR(dispatcher_client_->GetOrCreateIteration(
job_id, 0, iteration_client_id));
return iteration_client_id;
}
absl::StatusOr<int64_t> GetTaskToRead(const int64_t iteration_client_id) {
ClientHeartbeatRequest request;
ClientHeartbeatResponse response;
request.set_iteration_client_id(iteration_client_id);
TF_RETURN_IF_ERROR(dispatcher_client_->ClientHeartbeat(request, response));
if (response.task_info().empty()) {
return errors::NotFound(absl::Substitute(
"No task found for iteration $0.", iteration_client_id));
}
return response.task_info(0).task_id();
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> GetWorkerClient(
const std::string& data_transfer_protocol) {
DataTransferServerInfo info;
info.set_address(GetWorkerAddress());
info.set_protocol(data_transfer_protocol);
return CreateDataServiceWorkerClient(kProtocol, info,
nullptr,
nullptr);
}
absl::StatusOr<GetElementResult> GetElement(DataServiceWorkerClient& client,
const int64_t task_id) {
GetElementRequest request;
GetElementResult result;
request.set_task_id(task_id);
TF_RETURN_IF_ERROR(client.GetElement(request, result));
return result;
}
std::string GetDispatcherAddress() const {
return test_cluster_->DispatcherAddress();
}
std::string GetWorkerAddress() const {
return test_cluster_->WorkerAddress(0);
}
std::unique_ptr<TestCluster> test_cluster_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_client_;
};
class AltDataTransferServer : public DataTransferServer {
public:
explicit AltDataTransferServer(DataTransferServer::GetElementT get_element)
: get_element_(get_element) {}
absl::Status GetElement(const GetElementRequest& req,
GetElementResult& result) {
return get_element_(&req, &result);
}
absl::Status Start(const experimental::WorkerConfig& config) override {
return absl::OkStatus();
}
int Port() const override { return -1; }
private:
DataTransferServer::GetElementT get_element_;
};
class AltDataTransferClient : public DataTransferClient {
public:
explicit AltDataTransferClient(std::shared_ptr<AltDataTransferServer> server)
: server_(server) {}
absl::Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
return server_->GetElement(req, result);
}
void TryCancel() override {}
private:
std::shared_ptr<AltDataTransferServer> server_;
};
class AltDataTransferRegistrar {
public:
AltDataTransferRegistrar() {
DataTransferServer::Register(
kAltTransferProtocol,
[this](DataTransferServer::GetElementT get_element,
std::shared_ptr<DataTransferServer>* server) {
server_ = std::make_shared<AltDataTransferServer>(get_element);
*server = server_;
return absl::OkStatus();
});
DataTransferClient::Register(
kAltTransferProtocol,
[this](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* client) {
*client = std::make_unique<AltDataTransferClient>(server_);
return absl::OkStatus();
});
}
private:
std::shared_ptr<AltDataTransferServer> server_ = nullptr;
};
static AltDataTransferRegistrar alt_data_transfer_registrar;
class DataTransferProtocolWorkerClientTest : public WorkerClientTest {
protected:
void SetUp() override {
std::string data_transfer_protocol = GetParam();
InitializeTestCluster(data_transfer_protocol);
}
};
TEST_F(WorkerClientTest, LocalRead) {
const int64_t range = 5;
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id, RegisterDataset(range));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
for (int64_t i = 0; i < range; ++i) {
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
test::ExpectEqual(result.components[0], Tensor(int64_t{i * i}));
EXPECT_FALSE(result.end_of_sequence);
}
LocalWorkers::Remove(GetWorkerAddress());
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_F(WorkerClientTest, LocalReadEmptyDataset) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(0));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
EXPECT_TRUE(result.end_of_sequence);
LocalWorkers::Remove(GetWorkerAddress());
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_P(DataTransferProtocolWorkerClientTest, NetworkRead) {
std::string data_transfer_protocol = GetParam();
LocalWorkers::Remove(GetWorkerAddress());
const int64_t range = 5;
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id, RegisterDataset(range));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(data_transfer_protocol));
for (int64_t i = 0; i < range; ++i) {
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
test::ExpectEqual(result.components[0], Tensor(int64_t{i * i}));
EXPECT_FALSE(result.end_of_sequence);
}
}
INSTANTIATE_TEST_SUITE_P(
NetworkProtocols, DataTransferProtocolWorkerClientTest,
::testing::Values(kGrpcTransferProtocol, kAltTransferProtocol),
[](const ::testing::TestParamInfo<
DataTransferProtocolWorkerClientTest::ParamType>& info) {
return info.param;
});
TEST_F(WorkerClientTest, LocalServerShutsDown) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(5));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
test_cluster_->StopWorkers();
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_F(WorkerClientTest, CancelClient) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(5));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
client->TryCancel();
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Client for worker.*has been cancelled.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
11c70d79-3753-4076-a1b8-1c481f3ba170 | cpp | tensorflow/tensorflow | common | tensorflow/compiler/mlir/lite/kernels/internal/common.cc | tensorflow/lite/core/c/common_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/common.h"
namespace tflite_migration {
#if TFLITE_SINGLE_ROUNDING
int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift <= 30);
const int64_t total_shift = 31 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift < 8);
TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
const int32_t reduced_multiplier =
(quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
const int64_t total_shift = 15 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
#else
int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
int left_shift = shift > 0 ? shift : 0;
int right_shift = shift > 0 ? 0 : -shift;
return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
x * (1 << left_shift), quantized_multiplier),
right_shift);
}
int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier,
int shift) {
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
assert(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
int total_shift = 15 - shift;
x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
int32_t result = x >> total_shift;
return result;
}
#endif
} | #include "tensorflow/lite/core/c/common.h"
#include <cstddef>
#include <cstdlib>
#include <limits>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/util.h"
namespace tflite {
using ::testing::ElementsAreArray;
TEST(IntArray, TestIntArrayCreate) {
TfLiteIntArray* a = TfLiteIntArrayCreate(0);
TfLiteIntArray* b = TfLiteIntArrayCreate(3);
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
}
TEST(IntArray, TestIntArrayCopy) {
TfLiteIntArray* a = TfLiteIntArrayCreate(2);
a->data[0] = 22;
a->data[1] = 24;
TfLiteIntArray* b = TfLiteIntArrayCopy(a);
ASSERT_NE(a, b);
ASSERT_EQ(a->size, b->size);
ASSERT_EQ(a->data[0], b->data[0]);
ASSERT_EQ(a->data[1], b->data[1]);
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
}
TEST(IntArray, TestIntArrayEqual) {
TfLiteIntArray* a = TfLiteIntArrayCreate(1);
a->data[0] = 1;
TfLiteIntArray* b = TfLiteIntArrayCreate(2);
b->data[0] = 5;
b->data[1] = 6;
TfLiteIntArray* c = TfLiteIntArrayCreate(2);
c->data[0] = 5;
c->data[1] = 6;
TfLiteIntArray* d = TfLiteIntArrayCreate(2);
d->data[0] = 6;
d->data[1] = 6;
EXPECT_FALSE(TfLiteIntArrayEqual(a, b));
EXPECT_TRUE(TfLiteIntArrayEqual(b, c));
EXPECT_TRUE(TfLiteIntArrayEqual(b, b));
EXPECT_FALSE(TfLiteIntArrayEqual(c, d));
EXPECT_FALSE(TfLiteIntArrayEqual(nullptr, a));
EXPECT_FALSE(TfLiteIntArrayEqual(a, nullptr));
EXPECT_TRUE(TfLiteIntArrayEqual(nullptr, nullptr));
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
TfLiteIntArrayFree(c);
TfLiteIntArrayFree(d);
}
TEST(FloatArray, TestFloatArrayCreate) {
TfLiteFloatArray* a = TfLiteFloatArrayCreate(0);
TfLiteFloatArray* b = TfLiteFloatArrayCreate(3);
TfLiteFloatArrayFree(a);
TfLiteFloatArrayFree(b);
}
TEST(FloatArray, TestFloatArrayCopy) {
TfLiteFloatArray* a = TfLiteFloatArrayCreate(2);
a->data[0] = 22.0;
a->data[1] = 24.0;
TfLiteFloatArray* b = TfLiteFloatArrayCopy(a);
ASSERT_NE(a, b);
ASSERT_EQ(a->size, b->size);
ASSERT_EQ(a->data[0], b->data[0]);
ASSERT_EQ(a->data[1], b->data[1]);
TfLiteFloatArrayFree(a);
TfLiteFloatArrayFree(b);
}
TEST(Types, TestTypeNames) {
auto type_name = [](TfLiteType t) {
return std::string(TfLiteTypeGetName(t));
};
EXPECT_EQ(type_name(kTfLiteNoType), "NOTYPE");
EXPECT_EQ(type_name(kTfLiteFloat64), "FLOAT64");
EXPECT_EQ(type_name(kTfLiteFloat32), "FLOAT32");
EXPECT_EQ(type_name(kTfLiteFloat16), "FLOAT16");
EXPECT_EQ(type_name(kTfLiteBFloat16), "BFLOAT16");
EXPECT_EQ(type_name(kTfLiteInt16), "INT16");
EXPECT_EQ(type_name(kTfLiteUInt16), "UINT16");
EXPECT_EQ(type_name(kTfLiteInt32), "INT32");
EXPECT_EQ(type_name(kTfLiteUInt32), "UINT32");
EXPECT_EQ(type_name(kTfLiteUInt8), "UINT8");
EXPECT_EQ(type_name(kTfLiteUInt64), "UINT64");
EXPECT_EQ(type_name(kTfLiteInt8), "INT8");
EXPECT_EQ(type_name(kTfLiteInt64), "INT64");
EXPECT_EQ(type_name(kTfLiteBool), "BOOL");
EXPECT_EQ(type_name(kTfLiteComplex64), "COMPLEX64");
EXPECT_EQ(type_name(kTfLiteComplex128), "COMPLEX128");
EXPECT_EQ(type_name(kTfLiteString), "STRING");
EXPECT_EQ(type_name(kTfLiteResource), "RESOURCE");
EXPECT_EQ(type_name(kTfLiteVariant), "VARIANT");
EXPECT_EQ(type_name(kTfLiteInt4), "INT4");
}
TEST(Quantization, TestQuantizationFree) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
t.dims = nullptr;
t.dims_signature = nullptr;
t.quantization.type = kTfLiteAffineQuantization;
t.sparsity = nullptr;
auto* params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
params->scale = TfLiteFloatArrayCreate(3);
params->zero_point = TfLiteIntArrayCreate(3);
t.quantization.params = reinterpret_cast<void*>(params);
TfLiteTensorFree(&t);
}
TEST(Sparsity, TestSparsityFree) {
TfLiteTensor t = {};
t.allocation_type = kTfLiteArenaRw;
t.dims = nullptr;
t.dims_signature = nullptr;
t.sparsity = static_cast<TfLiteSparsity*>(malloc(sizeof(TfLiteSparsity)));
t.sparsity->traversal_order = TfLiteIntArrayCreate(2);
t.sparsity->block_map = nullptr;
t.sparsity->dim_metadata = static_cast<TfLiteDimensionMetadata*>(
malloc(sizeof(TfLiteDimensionMetadata) * 2));
t.sparsity->dim_metadata_size = 2;
t.sparsity->dim_metadata[0].format = kTfLiteDimDense;
t.sparsity->dim_metadata[0].dense_size = 4;
t.sparsity->dim_metadata[1].format = kTfLiteDimSparseCSR;
t.sparsity->dim_metadata[1].array_segments = TfLiteIntArrayCreate(2);
t.sparsity->dim_metadata[1].array_indices = TfLiteIntArrayCreate(3);
TfLiteTensorFree(&t);
}
TEST(TensorCopy, TensorCopy_VALID) {
const int kNumElements = 32;
const int kBytes = sizeof(float) * kNumElements;
TfLiteTensor src;
TfLiteTensor dst;
TfLiteDelegate delegate;
memset(&delegate, 0, sizeof(delegate));
memset(&src, 0, sizeof(TfLiteTensor));
memset(&dst, 0, sizeof(TfLiteTensor));
src.data.raw = static_cast<char*>(malloc(kBytes));
for (int i = 0; i < kNumElements; ++i) {
src.data.f[i] = i;
}
dst.data.raw = static_cast<char*>(malloc(kBytes));
src.bytes = dst.bytes = kBytes;
src.delegate = &delegate;
src.data_is_stale = true;
src.allocation_type = kTfLiteDynamic;
src.type = kTfLiteFloat32;
src.dims = TfLiteIntArrayCreate(1);
src.dims->data[0] = 1;
src.dims_signature = TfLiteIntArrayCopy(src.dims);
src.buffer_handle = 5;
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(&src, &dst));
EXPECT_EQ(dst.bytes, src.bytes);
EXPECT_EQ(dst.delegate, src.delegate);
EXPECT_EQ(dst.data_is_stale, src.data_is_stale);
EXPECT_EQ(dst.type, src.type);
EXPECT_EQ(1, TfLiteIntArrayEqual(dst.dims, src.dims));
EXPECT_EQ(dst.buffer_handle, src.buffer_handle);
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(dst.data.f[i], src.data.f[i]);
}
TfLiteTensorFree(&src);
free(dst.data.raw);
TfLiteTensorFree(&dst);
}
TEST(TensorCopy, TensorCopy_INVALID) {
TfLiteTensor src;
TfLiteTensor dst;
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(&src, nullptr));
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(nullptr, &dst));
src.bytes = 10;
dst.bytes = 12;
EXPECT_EQ(kTfLiteError, TfLiteTensorCopy(&src, &dst));
}
TEST(TestTensorRealloc, TensorReallocMoreBytesSucceeds) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 6;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
ASSERT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, new_bytes);
ASSERT_THAT(std::vector<int>(tensor->data.f, tensor->data.f + num_elements),
ElementsAreArray({0, 0, 0, 0}));
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocLessBytesSucceeds) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 2;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
tensor->quantization.type = kTfLiteNoQuantization;
ASSERT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, new_bytes);
ASSERT_THAT(std::vector<int>(tensor->data.f, tensor->data.f + 2),
ElementsAreArray({0, 0}));
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocNonDynamicNoChange) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 6;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteArenaRw;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
EXPECT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, bytes);
EXPECT_THAT(std::vector<int>(tensor->data.i32, tensor->data.i32 + 4),
ElementsAreArray({0, 0, 0, 0}));
free(tensor->data.data);
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocNumByte0) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 0;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
EXPECT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 0);
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocLargeBytesFails) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const size_t bytes = sizeof(float) * num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
tensor->quantization.type = kTfLiteNoQuantization;
const size_t large_bytes = std::numeric_limits<size_t>::max() - 16;
EXPECT_EQ(TfLiteTensorRealloc(large_bytes, tensor), kTfLiteError);
TfLiteTensorFree(tensor);
free(data);
free(tensor);
}
TEST(TestTfLiteTensorGetAllocationStrategy, MemNoneIsAllocatedWithNone) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyNone);
}
TEST(TestTfLiteTensorGetAllocationStrategy, MmapRoIsAllocatedWithMMap) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyMMap);
}
TEST(TestTfLiteTensorGetAllocationStrategy, ArenaRwIsAllocatedWithArena) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyArena);
}
TEST(TestTfLiteTensorGetAllocationStrategy,
ArenaRwPersistentIsAllocatedWithArena) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyArena);
}
TEST(TestTfLiteTensorGetAllocationStrategy, DynamicIsAllocatedWithMalloc) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyMalloc);
}
TEST(TestTfLiteTensorGetAllocationStrategy,
PersistentRoIsAllocatedWithUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyUnknown);
}
TEST(TestTfLiteTensorGetAllocationStrategy, CustomIsAllocatedWithUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyUnknown);
}
TEST(TestTfLiteTensorGetAllocationStrategy, VariantObjectIsAllocatedWithNew) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyNew);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
MemNoneBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
MmapRoBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetBufferAddressStability, ArenaRwBufferIsStableUnstable) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnstable);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
ArenaRwPersistentBufferIsStableUnstable) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnstable);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
DynamicBufferIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
PersistentRoBufferIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetBufferAddressStability, CustomBufferIsStableUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnknown);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
VariantObjectBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, MemNoneDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, MmapRoDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, ArenaRwDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability,
ArenaRwPersistentDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, DynamicDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability, PersistentRoDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability, CustomDataIsStableUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityUnknown);
}
TEST(TestTfLiteTensorGetDataStability, VariantObjectDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataKnownStep, MemNoneDataIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetDataKnownStep, MmapRoDataIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetDataKnownStep, ArenaRwDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, ArenaRwPersistentDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, DynamicDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, PersistentRoDataIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetDataKnownStep, CustomDataIsKnownAtUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepUnknown);
}
TEST(TestTfLiteTensorGetDataKnownStep, VariantObjectDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetShapeKnownStep, MemNoneShapeIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetShapeKnownStep, MmapRoShapeIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetShapeKnownStep, ArenaRwShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep,
ArenaRwPersistentShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep, DynamicShapeIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetShapeKnownStep, PersistentRoShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep, CustomShapeIsKnownAtUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepUnknown);
}
TEST(TestTfLiteTensorGetShapeKnownStep, VariantObjectShapeIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepEval);
}
struct Foo {
int data;
bool copied;
};
class VariantFoo : public AbstractVariantData<VariantFoo> {
public:
explicit VariantFoo(int number) : foo_data_(Foo{number, false}) {}
VariantFoo(const VariantFoo& other) {
foo_data_ = other.foo_data_;
foo_data_.copied = true;
}
int GetFooInt() { return foo_data_.data; }
bool GetFooCopied() { return foo_data_.copied; }
private:
Foo foo_data_;
};
class VariantFoo2 : public AbstractVariantData<VariantFoo2> {
public:
explicit VariantFoo2(int number, float float_number)
: foo_data_(Foo{number, false}), float_data_(float_number) {}
VariantFoo2(const VariantFoo2& other) {
foo_data_ = other.foo_data_;
foo_data_.copied = true;
float_data_ = other.float_data_;
}
int GetFooInt() { return foo_data_.data; }
bool GetFooCopied() { return foo_data_.copied; }
float GetFloatData() { return float_data_; }
private:
Foo foo_data_;
float float_data_;
};
TEST(TestTfLiteReallocWithObject, ConstructSingleParamVariant) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteOk);
ASSERT_EQ(reinterpret_cast<VariantFoo*>(t->data.data)->GetFooInt(), 3);
ASSERT_EQ(t->type, kTfLiteVariant);
ASSERT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject, ConstructMultiParamVariant) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 3, 1.0)),
kTfLiteOk);
VariantFoo2* data = reinterpret_cast<VariantFoo2*>(t->data.data);
ASSERT_EQ(data->GetFooInt(), 3);
ASSERT_EQ(data->GetFloatData(), 1.0);
ASSERT_EQ(t->type, kTfLiteVariant);
ASSERT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject,
ConstructSingleParamVariantWithAlreadyAllocated) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteOk);
void* before_address = t->data.data;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 5)), kTfLiteOk);
EXPECT_EQ(t->data.data, before_address);
EXPECT_EQ(reinterpret_cast<VariantFoo*>(t->data.data)->GetFooInt(), 5);
EXPECT_EQ(t->type, kTfLiteVariant);
EXPECT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject,
ConstructMutliParamVariantWithAlreadyAllocated) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 3, 1.0)),
kTfLiteOk);
void* before_address = t->data.data;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 5, 2.0)),
kTfLiteOk);
EXPECT_EQ(t->data.data, before_address);
VariantFoo2* data = reinterpret_cast<VariantFoo2*>(t->data.data);
EXPECT_EQ(data->GetFooInt(), 5);
EXPECT_EQ(data->GetFloatData(), 2.0);
EXPECT_EQ(t->type, kTfLiteVariant);
EXPECT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject, NonVariantTypeError) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteInt32;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteError);
}
TEST(TestVariantData, CopyVariantTensorCallsDerivedCopyCstor) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->allocation_type = kTfLiteVariantObject;
tensor->type = kTfLiteVariant;
}
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
auto* src_variant_data =
reinterpret_cast<VariantFoo*>(src_variant_tensor->data.data);
EXPECT_EQ(src_variant_data->GetFooInt(), 1);
EXPECT_EQ(src_variant_data->GetFooCopied(), false);
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_data->GetFooCopied(), true);
}
TEST(TestVariantData, CopyVariantTensorCallsDerivedCopyCstorWithAllocation) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->allocation_type = kTfLiteVariantObject;
tensor->type = kTfLiteVariant;
}
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(dst_variant_tensor.get(), 2)),
kTfLiteOk);
void* before_address = dst_variant_tensor->data.data;
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_tensor->data.data, before_address);
}
TEST(TestVariantData, CopyTensorToNonVariantObjectSetsAllocationType) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->type = kTfLiteVariant;
}
src_variant_tensor->allocation_type = kTfLiteVariantObject;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(dst_variant_tensor.get(), 2)),
kTfLiteOk);
void* before_address = dst_variant_tensor->data.data;
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
ASSERT_EQ(dst_variant_tensor->allocation_type, kTfLiteVariantObject);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_tensor->data.data, before_address);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/common.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/common_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33f82346-9f4f-407e-9852-6d1bd1bf3883 | cpp | tensorflow/tensorflow | url | tensorflow/core/data/service/url.cc | tensorflow/core/data/service/url_test.cc | #include "tensorflow/core/data/service/url.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
URL::URL(absl::string_view url) { Parse(url); }
void URL::Parse(absl::string_view url) {
absl::string_view regexp = "(.*):([a-zA-Z0-9_]+|%port(_[a-zA-Z0-9_]+)?%)";
if (!RE2::FullMatch(url, regexp, &host_, &port_)) {
host_ = std::string(url);
port_ = "";
}
}
}
} | #include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(URLTest, ParseUrl) {
URL url("localhost");
EXPECT_EQ(url.host(), "localhost");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithPort) {
URL url("localhost:1234");
EXPECT_EQ(url.host(), "localhost");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddress) {
URL url("/worker/task/0");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithCustomProtocol) {
URL url("worker:/worker/task/0");
EXPECT_EQ(url.host(), "worker:/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithNamedPort) {
URL url("/worker/task/0:worker");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseBorgAddressWithDynamicPort) {
URL url("/worker/task/0:%port%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddressWithDynamicNamedPort) {
URL url("/worker/task/0:%port_worker%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_worker%");
}
TEST(URLTest, ParseIPv4Address) {
URL url("127.0.0.1");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv4AddressWithPort) {
URL url("127.0.0.1:8000");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "8000");
}
TEST(URLTest, ParseIPv6Address) {
URL url("[::1]");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithPort) {
URL url("[::1]:23456");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_name%");
}
TEST(URLTest, ParseNonLocalIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseNonLocalIPv6AddressWithNamedPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseEmptyIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseEmptyAddress) {
URL url("");
EXPECT_EQ(url.host(), "");
EXPECT_FALSE(url.has_port());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/url.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/url_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d202f695-4abf-4358-aa4f-6952eaf62e6d | cpp | tensorflow/tensorflow | validate_utils | tensorflow/core/data/service/client/validate_utils.cc | tensorflow/core/data/service/client/validate_utils_test.cc | #include "tensorflow/core/data/service/client/validate_utils.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
Status ValidateLocalWorkers(const DataServiceParams& data_service_params) {
if (data_service_params.target_workers != TARGET_WORKERS_LOCAL) {
return absl::OkStatus();
}
if (LocalWorkers::Empty()) {
if (IsStaticShard(data_service_params.processing_mode)) {
return errors::InvalidArgument(
"Static sharding policy <",
ProcessingModeDef::ShardingPolicy_Name(
data_service_params.processing_mode.sharding_policy()),
"> requires local tf.data workers, but no local worker is found. "
"You need to run local tf.data service workers in your training "
"workers. Static sharding also requires a fixed worker pool and "
"a list of worker addresses in the DispatcherConfig. See the "
"\"Processing Modes\" section in the module doc for details.");
}
return errors::InvalidArgument(
"Local reads require local tf.data workers, but no local worker "
"is found. You need to run local tf.data service workers in your "
"training workers.");
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Coordinated reads require non-local workers, but `target_workers` "
"is \"LOCAL\".");
}
return absl::OkStatus();
}
Status ValidateCrossTrainerCache(const DataServiceParams& data_service_params) {
if (!data_service_params.cross_trainer_cache_options.has_value()) {
return absl::OkStatus();
}
if (data_service_params.job_name.empty()) {
return errors::InvalidArgument(
"Cross-trainer caching requires named jobs. Got empty `job_name`.");
}
if (data_service_params.metadata.cardinality() >= 0) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"Got input with cardinality ",
data_service_params.metadata.cardinality());
}
if (data_service_params.repetition > 1) {
return errors::InvalidArgument(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset. Got repetition ",
data_service_params.repetition);
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Cross-trainer caching does not support coordinated reads. "
"Got number of coordinated consumers: ",
data_service_params.num_consumers.value());
}
return absl::OkStatus();
}
}
Status ValidateDataServiceParams(const DataServiceParams& data_service_params) {
TF_RETURN_IF_ERROR(ValidateLocalWorkers(data_service_params));
TF_RETURN_IF_ERROR(ValidateCrossTrainerCache(data_service_params));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/client/validate_utils.h"
#include <memory>
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
DataServiceParams GetDefaultParams() {
DataServiceParams params;
params.dataset_id = "dataset_id";
params.processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
params.address = "localhost";
params.protocol = "grpc";
params.data_transfer_protocol = "grpc";
params.metadata.set_cardinality(kUnknownCardinality);
return params;
}
std::shared_ptr<DataServiceWorkerImpl> GetLocalWorker() {
experimental::WorkerConfig config;
config.set_protocol("grpc");
config.set_dispatcher_address("localhost");
config.set_worker_address("localhost");
return std::make_shared<DataServiceWorkerImpl>(config);
}
TEST(ValidateUtilsTest, DefaultParams) {
TF_EXPECT_OK(ValidateDataServiceParams(GetDefaultParams()));
}
TEST(ValidateUtilsTest, LocalWorkerSuccess) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.target_workers = TARGET_WORKERS_LOCAL;
TF_EXPECT_OK(ValidateDataServiceParams(params));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, NoLocalWorker) {
DataServiceParams params = GetDefaultParams();
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Local reads require local tf.data workers, but no local worker "
"is found.")));
}
TEST(ValidateUtilsTest, NoLocalWorkerStaticSharding) {
DataServiceParams params = GetDefaultParams();
params.processing_mode.set_sharding_policy(ProcessingModeDef::FILE_OR_DATA);
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Static sharding policy <FILE_OR_DATA> requires local tf.data "
"workers, but no local worker is found.")));
}
TEST(ValidateUtilsTest, LocalReadDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.num_consumers = 1;
params.consumer_index = 0;
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Coordinated reads require non-local workers, but "
"`target_workers` is \"LOCAL\".")));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, CrossTrainerCacheSuccess) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
TF_EXPECT_OK(ValidateDataServiceParams(params));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresJobName) {
DataServiceParams params = GetDefaultParams();
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
"Cross-trainer caching requires named jobs. Got empty `job_name`."));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresInfiniteDataset) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(10);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsRepetition) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 5;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.num_consumers = 1;
params.consumer_index = 0;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching does not support coordinated reads.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/validate_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/validate_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7992b87a-34e9-459f-b578-130df373c9f1 | cpp | tensorflow/tensorflow | split_provider | tensorflow/core/data/service/split_provider.cc | tensorflow/core/data/service/split_provider_test.cc | #include "tensorflow/core/data/service/split_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
Status DataServiceSplitProvider::GetNext(Tensor* split, bool* end_of_splits)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
if (!dispatcher_) {
dispatcher_ =
std::make_unique<DataServiceDispatcherClient>(address_, protocol_);
}
TF_RETURN_IF_ERROR(grpc_util::Retry(
[this, split, end_of_splits]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dispatcher_->GetSplit(iteration_id_, repetition_,
split_provider_index_, *split,
*end_of_splits);
},
"get next split",
Env::Default()->NowMicros() +
(timeout_ms_ * EnvTime::kMillisToMicros)));
if (*end_of_splits) {
VLOG(1) << "Reached end of splits for iteration_id=" << iteration_id_
<< ", repetition=" << repetition_;
} else {
VLOG(1) << "Requested split: " << split->DebugString()
<< "; with iteration_id=" << iteration_id_
<< ", repetition=" << repetition_;
}
return absl::OkStatus();
}
Status DataServiceSplitProvider::Reset() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
repetition_++;
return absl::OkStatus();
}
Status DataServiceSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
return errors::Unimplemented(
"Save is not implemented for DataServiceSplitProvider");
}
Status DataServiceSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
return errors::Unimplemented(
"Restore is not implemented for DataServiceSplitProvider");
}
Status CreateSplitProviders(
const DatasetDef& dataset_def,
std::vector<std::unique_ptr<SplitProvider>>& split_providers) {
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> standalone_dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(params, dataset_def.graph(),
&standalone_dataset));
TF_RETURN_IF_ERROR(standalone_dataset->MakeSplitProviders(&split_providers));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/split_provider.h"
#include <array>
#include <cstdint>
#include <memory>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
std::vector<int64_t> GetCardinalities(
const std::vector<std::unique_ptr<SplitProvider>>& split_providers) {
std::vector<int64_t> cardinalities;
for (const auto& split_provider : split_providers) {
cardinalities.push_back(split_provider->Cardinality());
}
return cardinalities;
}
TEST(SplitProviderTest, RangeCardinality) {
DatasetDef range_dataset = testing::RangeDataset(10);
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(range_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers), UnorderedElementsAre(10));
}
class RepeatedSplitProviderTest
: public ::testing::TestWithParam<std::tuple<int64_t, int64_t, int64_t>> {
public:
int64_t Range() const { return std::get<0>(GetParam()); }
int64_t RepeatCount() const { return std::get<1>(GetParam()); }
int64_t ExpectedCardinality() const { return std::get<2>(GetParam()); }
};
constexpr std::array<std::tuple<int64_t, int64_t, int64_t>, 5>
kRepeatedSplitProviderTestCases{{{9, 9, 81},
{9, 0, 0},
{9, -1, kInfiniteCardinality},
{0, -1, 0},
{-1, 1, 0}}};
TEST_P(RepeatedSplitProviderTest, RepeatedDatasetCardinality) {
TF_ASSERT_OK_AND_ASSIGN(
DatasetDef repeated_dataset,
testing::GetTestDataset(
"repeated_dataset",
{absl::StrCat(Range()), absl::StrCat(RepeatCount())}));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(repeated_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
ElementsAre(ExpectedCardinality()));
}
INSTANTIATE_TEST_SUITE_P(MyGroup, RepeatedSplitProviderTest,
::testing::ValuesIn(kRepeatedSplitProviderTestCases));
TEST(SplitProviderTest, EnumerateCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef enumerate_dataset,
testing::GetTestDataset("enumerate_dataset"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(enumerate_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(3, kInfiniteCardinality));
}
TEST(SplitProviderTest, ChooseFromDatasetsCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef sample_from_datasets,
testing::GetTestDataset("choose_from_datasets"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(sample_from_datasets, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(5, 5, 5, kInfiniteCardinality));
}
TEST(SplitProviderTest, SampleFromDatasetsCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef sample_from_datasets,
testing::GetTestDataset("sample_from_datasets"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(sample_from_datasets, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(5, 5, 5, kInfiniteCardinality));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/split_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/split_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e37c532-87dd-48a8-b0b5-c412fc21335e | cpp | tensorflow/tensorflow | dispatcher_client | tensorflow/core/data/service/dispatcher_client.cc | tensorflow/core/data/service/dispatcher_client_test.cc | #include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace data {
Status DataServiceDispatcherClient::Initialize() {
mutex_lock l(mu_);
if (stub_) {
return absl::OkStatus();
}
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(protocol_, &credentials));
grpc::ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
auto channel = grpc::CreateCustomChannel(address_, credentials, args);
stub_ = DispatcherService::NewStub(channel);
GetVersionRequest req;
GetVersionResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetVersion(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get dispatcher version from dispatcher "
"running at ",
address_),
s);
}
if (resp.version() != kDataServiceVersion) {
return errors::FailedPrecondition(
"Version mismatch with tf.data service server. The server is running "
"version ",
resp.version(), ", while the client is running version ",
kDataServiceVersion,
". Please ensure that the client and server side are running the "
"same version of TensorFlow. If you're running an MPM binary, make "
"sure the server is running an up-to-date MPM.");
}
return absl::OkStatus();
}
absl::StatusOr<WorkerHeartbeatResponse>
DataServiceDispatcherClient::WorkerHeartbeat(
const WorkerHeartbeatRequest& request) {
WorkerHeartbeatResponse response;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerHeartbeat(&client_ctx, request, &response);
if (!status.ok()) {
return grpc_util::WrapError("Failed to perform worker heartbeat", status);
}
return response;
}
Status DataServiceDispatcherClient::WorkerUpdate(
const std::string& worker_address,
std::vector<TaskProgress>& task_progress) {
WorkerUpdateRequest req;
req.set_worker_address(worker_address);
for (const auto& update : task_progress) {
*(req.add_updates()) = update;
}
WorkerUpdateResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerUpdate(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to send worker update", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDatasetDef(const std::string& dataset_id,
DatasetDef& dataset_def) {
GetDatasetDefRequest req;
req.set_dataset_id(dataset_id);
GetDatasetDefResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetDatasetDef(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get dataset def", status);
}
dataset_def = resp.dataset_def();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSplit(int64_t iteration_id,
int64_t repetition,
int64_t split_provider_index,
Tensor& split,
bool& end_of_splits) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetSplitRequest req;
req.set_iteration_id(iteration_id);
req.set_repetition(repetition);
req.set_split_provider_index(split_provider_index);
GetSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get split", status);
}
end_of_splits = resp.end_of_splits();
if (!end_of_splits) {
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto");
}
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::Snapshot(
const DatasetDef& dataset, const std::string& path,
const experimental::DistributedSnapshotMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
SnapshotRequest req;
*req.mutable_dataset() = dataset;
req.set_path(path);
*req.mutable_metadata() = metadata;
SnapshotResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->Snapshot(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to snapshot", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSnapshotSplit(
const std::string& worker_address, const std::string& base_path,
int64_t stream_index, int64_t source_index, int64_t repetition_index,
Tensor& split, int64_t& local_split_index, bool& end_of_splits) {
GetSnapshotSplitRequest req;
req.set_worker_address(worker_address);
req.set_base_path(base_path);
req.set_stream_index(stream_index);
req.set_source_index(source_index);
req.set_repetition_index(repetition_index);
GetSnapshotSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSnapshotSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get snapshot split", status);
}
local_split_index = resp.local_split_index();
end_of_splits = resp.end_of_splits();
if (end_of_splits) {
return absl::OkStatus();
}
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto: ",
resp.split().DebugString());
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id,
std::string& dataset_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrRegisterDatasetRequest req;
*req.mutable_dataset() = dataset;
*req.mutable_metadata() = metadata;
if (requested_dataset_id.has_value()) {
req.set_dataset_id(*requested_dataset_id);
}
GetOrRegisterDatasetResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrRegisterDataset(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to register dataset", status);
}
dataset_id = resp.dataset_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateJob(
const std::string& dataset_id, const ProcessingModeDef& processing_mode,
const std::optional<std::string>& job_name,
std::optional<int64_t> num_consumers, bool use_cross_trainer_cache,
TargetWorkers target_workers, int64_t& job_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateJobRequest req;
req.set_dataset_id(dataset_id);
*req.mutable_processing_mode_def() = processing_mode;
if (job_name.has_value()) {
req.set_job_name(job_name.value());
}
if (num_consumers.has_value()) {
req.set_num_consumers(num_consumers.value());
}
req.set_target_workers(target_workers);
req.set_use_cross_trainer_cache(use_cross_trainer_cache);
GetOrCreateJobResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateJob(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create job for dataset with id ",
dataset_id),
status);
}
job_id = resp.job_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateIteration(
int64_t job_id, int64_t repetition, int64_t& iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateIterationRequest req;
req.set_job_id(job_id);
req.set_repetition(repetition);
GetOrCreateIterationResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateIteration(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create iteration for job with id ",
job_id),
status);
}
iteration_client_id = resp.iteration_client_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ReleaseIterationClient(
int64_t iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
ReleaseIterationClientRequest req;
req.set_iteration_client_id(iteration_client_id);
ReleaseIterationClientResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->ReleaseIterationClient(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to release iteration client with id ",
iteration_client_id),
status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::MaybeRemoveTask(int64_t task_id,
int64_t consumer_index,
int64_t round,
bool& removed) {
TF_RETURN_IF_ERROR(EnsureInitialized());
MaybeRemoveTaskRequest req;
req.set_task_id(task_id);
req.set_consumer_index(consumer_index);
req.set_round(round);
MaybeRemoveTaskResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->MaybeRemoveTask(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to call MaybeRemoveTask", status);
}
removed = resp.removed();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ClientHeartbeat(
ClientHeartbeatRequest& req, ClientHeartbeatResponse& resp) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
grpc::Status s = stub_->ClientHeartbeat(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get tasks", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetWorkers(
std::vector<WorkerInfo>& workers) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetWorkersRequest req;
GetWorkersResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetWorkers(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get workers", s);
}
workers.clear();
for (auto& worker : resp.workers()) {
workers.push_back(worker);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceMetadata(
const std::string& dataset_id, DataServiceMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceMetadataRequest req;
req.set_dataset_id(dataset_id);
GetDataServiceMetadataResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceMetadata(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service metadata", s);
}
metadata = resp.metadata();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceConfig(
DataServiceConfig& config) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceConfigRequest request;
GetDataServiceConfigResponse response;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceConfig(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service config", s);
}
config = response.config();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::DisableCompressionAtRuntime(
const std::string& dataset_id, bool disable_compression_at_runtime,
DisableCompressionAtRuntimeResponse& response) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
DisableCompressionAtRuntimeRequest request;
request.set_dataset_id(dataset_id);
request.set_disable_compression_at_runtime(disable_compression_at_runtime);
grpc::Status s = stub_->DisableCompressionAtRuntime(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError(
"Failed to get runtime compression disabling decision", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::EnsureInitialized() {
return grpc_util::Retry([this] { return Initialize(); },
"Initialize dispatcher client",
kint64max);
}
}
} | #include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dataset_store.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::DistributedSnapshotMetadata;
using ::tensorflow::data::testing::CreateDummyDistributedSnapshotMetadata;
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::InfiniteDataset;
using ::tensorflow::data::testing::LocalTempFilename;
using ::tensorflow::data::testing::RangeDataset;
using ::tensorflow::testing::StatusIs;
using ::testing::AllOf;
using ::testing::ContainsRegex;
using ::testing::HasSubstr;
constexpr const char kProtocol[] = "grpc";
DataServiceMetadata GetDefaultMetadata() {
StructuredValue decoded_spec;
TensorShapeProto::Dim* dim =
decoded_spec.mutable_tensor_shape_value()->add_dim();
dim->set_size(1);
dim->set_name(absl::StrCat("dim"));
DataServiceMetadata metadata;
metadata.set_element_spec(decoded_spec.SerializeAsString());
metadata.set_compression(DataServiceMetadata::COMPRESSION_SNAPPY);
metadata.set_cardinality(kUnknownCardinality);
return metadata;
}
class DispatcherClientTest : public ::testing::Test {
protected:
absl::Status SetUpTfDataService(int64_t num_workers,
int64_t worker_max_concurrent_snapshots = 0) {
TestCluster::Config config;
config.num_workers = num_workers;
config.work_dir = tsl::io::JoinPath(tsl::testing::TmpDir(), "work_dir");
config.worker_max_concurrent_snapshots = worker_max_concurrent_snapshots;
test_cluster_ = std::make_unique<TestCluster>(config);
TF_RETURN_IF_ERROR(test_cluster_->Initialize());
dispatcher_client_ = std::make_unique<DataServiceDispatcherClient>(
test_cluster_->DispatcherAddress(), kProtocol);
return absl::OkStatus();
}
absl::StatusOr<std::string> RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id = std::nullopt) {
std::string dataset_id;
TF_RETURN_IF_ERROR(dispatcher_client_->RegisterDataset(
dataset, metadata, requested_dataset_id, dataset_id));
return dataset_id;
}
absl::StatusOr<absl::flat_hash_set<std::string>> StartDummySnapshots(
int64_t num_snapshots) {
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
absl::flat_hash_set<std::string> directories;
for (int64_t i = 0; i < num_snapshots; ++i) {
directories.insert(LocalTempFilename());
}
for (const auto& directory : directories) {
TF_RETURN_IF_ERROR(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
}
return directories;
}
std::unique_ptr<TestCluster> test_cluster_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_client_;
};
TEST_F(DispatcherClientTest, GetDataServiceMetadata) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
DataServiceMetadata result;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceMetadata(dataset_id, result));
EXPECT_THAT(result, EqualsProto(metadata));
}
TEST_F(DispatcherClientTest, DatasetDoesNotExist) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
EXPECT_THAT(
dispatcher_client_->GetDataServiceMetadata(
"not-found", metadata),
StatusIs(error::NOT_FOUND, HasSubstr("Dataset id not-found not found")));
}
TEST_F(DispatcherClientTest, SnapshotAlreadyStarted) {
TF_ASSERT_OK(SetUpTfDataService(1));
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
std::string directory = LocalTempFilename();
TF_ASSERT_OK(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
EXPECT_THAT(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata),
StatusIs(error::ALREADY_EXISTS, HasSubstr("already started")));
}
TEST_F(DispatcherClientTest, GetDataServiceConfig) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceConfig config;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceConfig(config));
EXPECT_EQ(config.deployment_mode(), DEPLOYMENT_MODE_COLOCATED);
}
TEST_F(DispatcherClientTest, SnapshotSkeletonWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(Env::Default()->FileExists(CommittedChunksDirectory(path)));
TF_ASSERT_OK(Env::Default()->FileExists(StreamsDirectory(path)));
}
}
TEST_F(DispatcherClientTest, SnapshotMetadataAndDatasetDefWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "snapshot.metadata")));
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "dataset_def.proto")));
}
}
TEST_F(DispatcherClientTest, SnapshotsInHeartbeat) {
TF_ASSERT_OK(SetUpTfDataService(1,
3));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
for (int64_t i = 1; i <= 3; ++i) {
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.snapshot_tasks_size(), i);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
ASSERT_TRUE(paths.count(snapshot_task.base_path()));
ASSERT_EQ(snapshot_task.stream_index(), 0);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplit) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
for (int64_t i = 0; i < 5; ++i) {
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(0), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, i);
EXPECT_FALSE(end_of_splits);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplitMultipleStreams) {
TF_ASSERT_OK(SetUpTfDataService(3,
1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
absl::flat_hash_set<std::string> snapshots_in_progress;
for (int64_t i = 0; i < 3; ++i) {
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(
test_cluster_->WorkerAddress(i));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
EXPECT_EQ(worker_heartbeat_response.snapshot_tasks().size(), 1);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
snapshots_in_progress.insert(snapshot_task.base_path());
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(i), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, 0);
EXPECT_FALSE(end_of_splits);
}
}
EXPECT_EQ(snapshots_in_progress, paths);
}
TEST_F(DispatcherClientTest, RegisterDatasetWithExplicitId) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id2,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, dataset_id2);
}
TEST_F(DispatcherClientTest, DatasetsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
metadata.set_cardinality(kInfiniteCardinality);
EXPECT_THAT(
RegisterDataset(InfiniteDataset(), metadata,
"dataset_id"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Datasets with the same ID should have the same structure")));
}
TEST_F(DispatcherClientTest, EnableCrossTrainerCache) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(kInfiniteCardinality);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(InfiniteDataset(), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id));
int64_t iteration_client_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateIteration(
job_id, 0, iteration_client_id));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.new_tasks_size(), 1);
EXPECT_TRUE(worker_heartbeat_response.new_tasks(0).use_cross_trainer_cache());
}
TEST_F(DispatcherClientTest, CreateNamedJob) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id_1 = -1;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_1));
int64_t job_id_2 = -2;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_2));
ASSERT_EQ(job_id_1, job_id_2);
}
TEST_F(DispatcherClientTest, NamedJobsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
int64_t job_id = 0;
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
false, TARGET_WORKERS_AUTO, job_id));
processing_mode.set_sharding_policy(ProcessingModeDef::DYNAMIC);
EXPECT_THAT(
dispatcher_client_->GetOrCreateJob(dataset_id, processing_mode, job_name,
std::nullopt,
true,
TARGET_WORKERS_AUTO, job_id),
StatusIs(error::INVALID_ARGUMENT,
AllOf(HasSubstr("but found an existing job with different "
"parameters: "),
ContainsRegex("Existing processing mode: <\\w*std::nullopt, dataset_id));
EXPECT_EQ(dataset_id, "1000");
std::string datasets_dir = tsl::io::JoinPath(config.work_dir, "datasets");
FileSystemDatasetStore dataset_store(datasets_dir);
TF_ASSERT_OK(dataset_store.Put("1001", dataset_def));
if (requested_dataset_id.has_value()) {
TF_ASSERT_OK(dataset_store.Put(*requested_dataset_id, dataset_def));
}
TF_ASSERT_OK(dispatcher_client_->RegisterDataset(
dataset_def, GetDefaultMetadata(),
requested_dataset_id, dataset_id));
if (requested_dataset_id.has_value()) {
EXPECT_EQ(dataset_id, *requested_dataset_id);
} else {
EXPECT_EQ(dataset_id, "1001");
}
}
INSTANTIATE_TEST_SUITE_P(DatasetId, DispatcherClientTest_DatasetId,
::testing::Values(std::nullopt, "dataset_id"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43917cfe-e049-4c6c-9278-844e1b636421 | cpp | tensorflow/tensorflow | grpc_worker_impl | tensorflow/core/data/service/grpc_worker_impl.cc | tensorflow/core/data/service/grpc_worker_impl_test.cc | #include "tensorflow/core/data/service/grpc_worker_impl.h"
#include <memory>
#include <string>
#include <vector>
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
GrpcWorkerImpl::GrpcWorkerImpl(const experimental::WorkerConfig& config,
ServerBuilder& server_builder)
: impl_(std::make_shared<DataServiceWorkerImpl>(config)) {
server_builder.RegisterService(this);
VLOG(1) << "Registered data service worker";
}
Status GrpcWorkerImpl::Start(
const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers) {
worker_address_ = worker_address;
TF_RETURN_IF_ERROR(impl_->Start(worker_address, transfer_servers));
LocalWorkers::Add(worker_address, impl_);
return absl::OkStatus();
}
void GrpcWorkerImpl::Stop() {
LocalWorkers::Remove(worker_address_);
impl_->Stop();
}
WorkerStateExport GrpcWorkerImpl::ExportState() const {
return impl_->ExportState();
}
#define HANDLER(method) \
::grpc::Status GrpcWorkerImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_->method(request, response)); \
}
HANDLER(ProcessTask);
HANDLER(GetElement);
HANDLER(GetWorkerTasks);
HANDLER(GetSnapshotTaskProgresses);
#undef HANDLER
}
} | #include "tensorflow/core/data/service/grpc_worker_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::grpc::Channel;
using ::grpc::ChannelArguments;
using ::grpc::ChannelCredentials;
using ::grpc::ClientContext;
constexpr const char kHostAddress[] = "localhost";
constexpr const char kProtocol[] = "grpc";
class GrpcWorkerImplTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(SetUpDispatcherServer());
TF_ASSERT_OK(SetUpWorkerServer());
TF_ASSERT_OK(SetUpWorkerClientStub());
}
Status SetUpDispatcherServer() {
experimental::DispatcherConfig config;
config.set_protocol(kProtocol);
TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));
return dispatcher_server_->Start();
}
Status SetUpWorkerServer() {
experimental::WorkerConfig config;
config.set_protocol(kProtocol);
config.set_dispatcher_address(GetDispatcherAddress());
config.set_worker_address(absl::StrCat(kHostAddress, ":%port%"));
TF_RETURN_IF_ERROR(NewWorkerServer(config, worker_server_));
return worker_server_->Start();
}
Status SetUpWorkerClientStub() {
std::shared_ptr<ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));
ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
std::shared_ptr<Channel> channel =
::grpc::CreateCustomChannel(GetWorkerAddress(), credentials, args);
worker_client_stub_ = WorkerService::NewStub(channel);
return absl::OkStatus();
}
std::string GetDispatcherAddress() const {
return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort());
}
std::string GetWorkerAddress() const {
return absl::StrCat(kHostAddress, ":", worker_server_->BoundPort());
}
std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_;
std::unique_ptr<WorkerGrpcDataServer> worker_server_;
std::unique_ptr<WorkerService::Stub> worker_client_stub_;
};
TEST_F(GrpcWorkerImplTest, GetWorkerTasks) {
ClientContext ctx;
GetWorkerTasksRequest req;
GetWorkerTasksResponse resp;
TF_ASSERT_OK(
FromGrpcStatus(worker_client_stub_->GetWorkerTasks(&ctx, req, &resp)));
EXPECT_EQ(resp.tasks_size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_worker_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_worker_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65d0efa3-bde3-4dea-bcb8-1f5223d73655 | cpp | tensorflow/tensorflow | byte_size | tensorflow/core/data/service/byte_size.cc | tensorflow/core/data/service/byte_size_test.cc | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include "absl/strings/str_cat.h"
namespace tensorflow {
namespace data {
size_t ByteSize::ToUnsignedBytes() const { return bytes_; }
double ByteSize::ToDoubleBytes() const { return static_cast<double>(bytes_); }
double ByteSize::ToDoubleKB() const { return *this / ByteSize::KB(1); }
double ByteSize::ToDoubleMB() const { return *this / ByteSize::MB(1); }
double ByteSize::ToDoubleGB() const { return *this / ByteSize::GB(1); }
double ByteSize::ToDoubleTB() const { return *this / ByteSize::TB(1); }
std::string ByteSize::DebugString() const {
if (*this < ByteSize::KB(1)) {
return absl::StrCat(ToUnsignedBytes(), "B");
}
if (*this < ByteSize::MB(1)) {
return absl::StrCat(ToDoubleKB(), "KB");
}
if (*this < ByteSize::GB(1)) {
return absl::StrCat(ToDoubleMB(), "MB");
}
if (*this < ByteSize::TB(1)) {
return absl::StrCat(ToDoubleGB(), "GB");
}
return absl::StrCat(ToDoubleTB(), "TB");
}
}
} | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Eq;
using ::testing::Not;
TEST(ByteSizeTest, Constructors) {
EXPECT_EQ(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::Bytes(1024));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 63), ByteSize::TB(size_t{1} << 23));
EXPECT_EQ(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1), ByteSize::Bytes(size_t{1} << 10));
EXPECT_EQ(ByteSize::KB(0.9), ByteSize::Bytes(1024 * 0.9));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::Bytes(1024 * 1.5));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(1024), ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1), ByteSize::Bytes(size_t{1} << 20));
EXPECT_EQ(ByteSize::MB(0.9), ByteSize::Bytes(size_t{1} << 20) * 0.9);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::Bytes(size_t{1} << 20) * 1.5);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1024), ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1), ByteSize::Bytes(size_t{1} << 30));
EXPECT_EQ(ByteSize::GB(0.9), ByteSize::Bytes(size_t{1} << 30) * 0.9);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::Bytes(size_t{1} << 30) * 1.5);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::GB(1.5));
EXPECT_EQ(ByteSize::GB(1024), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1), ByteSize::Bytes(size_t{1} << 40));
EXPECT_EQ(ByteSize::TB(0.9), ByteSize::Bytes(size_t{1} << 40) * 0.9);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::Bytes(size_t{1} << 40) * 1.5);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::TB(1024), ByteSize::TB(1024));
EXPECT_EQ(ByteSize::TB(size_t{1} << 23), ByteSize::TB(size_t{1} << 23));
EXPECT_THAT(ByteSize::Bytes(0), Not(Eq(ByteSize::Bytes(1))));
EXPECT_THAT(ByteSize::Bytes(1025), Not(Eq(ByteSize::KB(1))));
EXPECT_THAT(ByteSize::KB(1), Not(Eq(ByteSize::MB(1))));
EXPECT_THAT(ByteSize::MB(1), Not(Eq(ByteSize::GB(1))));
EXPECT_THAT(ByteSize::GB(1), Not(Eq(ByteSize::TB(1))));
EXPECT_THAT(ByteSize::TB(1), Not(Eq(ByteSize::TB(2))));
}
TEST(ByteSizeTest, ConstexprConstruction) {
constexpr ByteSize default_byte_size;
EXPECT_EQ(default_byte_size, ByteSize::Bytes(0));
constexpr ByteSize bytes = ByteSize::Bytes(1);
EXPECT_EQ(bytes, ByteSize::Bytes(1));
constexpr ByteSize kb = ByteSize::KB(1);
EXPECT_EQ(kb, ByteSize::KB(1));
constexpr ByteSize mb = ByteSize::MB(1);
EXPECT_EQ(mb, ByteSize::MB(1));
constexpr ByteSize gb = ByteSize::GB(1);
EXPECT_EQ(gb, ByteSize::GB(1));
constexpr ByteSize tb = ByteSize::TB(1);
EXPECT_EQ(tb, ByteSize::TB(1));
constexpr ByteSize tb_copy(tb);
EXPECT_EQ(tb_copy, tb);
}
TEST(ByteSizeTest, ConvertToBytes) {
EXPECT_EQ(ByteSize::Bytes(0).ToUnsignedBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleKB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleMB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleGB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleTB(), 0);
EXPECT_EQ(ByteSize::Bytes(1).ToUnsignedBytes(), 1);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleBytes(), 1.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleKB(), 1.0 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleMB(), 1.0 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleGB(), 1.0 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleTB(),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::KB(0.25).ToUnsignedBytes(), 0.25 * (size_t{1} << 10));
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleBytes(), 0.25 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleKB(), 0.25);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleMB(), 0.25 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleGB(), 0.25 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleTB(), 0.25 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::MB(0.5).ToUnsignedBytes(), 0.5 * (size_t{1} << 20));
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleBytes(), 0.5 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleKB(), 0.5 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleMB(), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleGB(), 0.5 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleTB(), 0.5 / 1024 / 1024);
EXPECT_EQ(ByteSize::GB(10).ToUnsignedBytes(), 10.0 * (size_t{1} << 30));
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleBytes(), 10.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleKB(), 10.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleMB(), 10.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleGB(), 10.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleTB(), 10.0 / 1024);
EXPECT_EQ(ByteSize::TB(1024).ToUnsignedBytes(), 1024 * (size_t{1} << 40));
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleBytes(),
1024.0 * 1024 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleKB(),
1024.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleMB(), 1024.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleGB(), 1024.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleTB(), 1024.0);
}
TEST(ByteSizeTest, Arithmetics) {
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::Bytes(512), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(0.5) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::KB(512), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::Bytes(512), ByteSize::Bytes(1049088));
EXPECT_EQ(ByteSize::GB(0.5) + ByteSize::MB(256) + ByteSize::MB(256),
ByteSize::GB(1));
std::vector<ByteSize> GBs(1024, ByteSize::GB(1));
EXPECT_EQ(absl::c_accumulate(GBs, ByteSize::Bytes(0)), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(1) + ByteSize::TB(0.5) + ByteSize::GB(512),
ByteSize::TB(2));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) - ByteSize::Bytes(512), ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::KB(512) - ByteSize::KB(512),
ByteSize::MB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512), ByteSize::GB(0.5));
EXPECT_EQ(ByteSize::GB(0.5) - ByteSize::MB(512), ByteSize::GB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512) - ByteSize::MB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::TB(1) - ByteSize::GB(512) - ByteSize::GB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1) * 1024, ByteSize::KB(1));
EXPECT_EQ(ByteSize::KB(1) * 1024, ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(1) * 1024, ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(1) * 1024, ByteSize::TB(1));
EXPECT_EQ(ByteSize::Bytes(1) * 1.1, ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::KB(1) * 1.2, ByteSize::KB(1.2));
EXPECT_EQ(ByteSize::MB(1) * 1.3, ByteSize::MB(1.3));
EXPECT_EQ(ByteSize::GB(1) * 1.4, ByteSize::GB(1.4));
EXPECT_EQ(ByteSize::TB(1) * 1.5, ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::KB(1) * 0.5, ByteSize::Bytes(512));
EXPECT_EQ(ByteSize::MB(1) * 0.5, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) * 0.5, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1) * 0.25, ByteSize::GB(256));
EXPECT_EQ(1024 * ByteSize::Bytes(1), ByteSize::KB(1));
EXPECT_EQ(1024 * ByteSize::KB(1), ByteSize::MB(1));
EXPECT_EQ(1024 * ByteSize::MB(1), ByteSize::GB(1));
EXPECT_EQ(1024 * ByteSize::GB(1), ByteSize::TB(1));
EXPECT_EQ(0.9 * ByteSize::TB(1), ByteSize::GB(921.6));
EXPECT_EQ(0 * ByteSize::TB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) / 1, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) / 2, ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) / 2, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) / 2, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1.5) / 2, ByteSize::GB(768));
EXPECT_EQ(ByteSize::KB(1) / 0.5, ByteSize::KB(2));
EXPECT_EQ(ByteSize::MB(1) / 0.5, ByteSize::MB(2));
EXPECT_EQ(ByteSize::GB(1) / 0.5, ByteSize::GB(2));
EXPECT_EQ(ByteSize::TB(1) / 0.25, ByteSize::TB(4));
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0) / ByteSize::KB(1), 0.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1) / ByteSize::TB(1),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::KB(2), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(512) / ByteSize::MB(1), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::MB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::MB(1) / ByteSize::GB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(1) / ByteSize::TB(1), 1.0 / 1024.0);
}
TEST(ByteSizeTest, Assignments) {
ByteSize byte_size;
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size = ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(1));
for (size_t i = 0; i < 1023; ++i) {
byte_size += ByteSize::Bytes(1);
}
EXPECT_EQ(byte_size, ByteSize::KB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size *= 2;
}
EXPECT_EQ(byte_size, ByteSize::MB(1));
byte_size *= 1024 * 1024;
EXPECT_EQ(byte_size, ByteSize::TB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size /= 2;
}
EXPECT_EQ(byte_size, ByteSize::GB(1));
for (size_t i = 0; i < 4; ++i) {
byte_size -= ByteSize::MB(256);
}
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size -= ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
}
TEST(ByteSizeTest, Comparisons) {
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_LE(ByteSize::KB(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LE(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LE(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LT(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LE(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LE(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LE(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LT(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LE(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LE(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LE(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LT(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LE(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LT(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LE(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LT(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_LE(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::GB(1), ByteSize::MB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1));
EXPECT_GT(ByteSize::KB(1), ByteSize::Bytes(1));
EXPECT_GT(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1) + ByteSize::MB(1) +
ByteSize::KB(1) + ByteSize::Bytes(1));
EXPECT_GT(ByteSize::GB(1), 0.0000001 * ByteSize::TB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1) * 1023);
EXPECT_GT(ByteSize::KB(1), ByteSize::KB(3) / 4);
EXPECT_GT(ByteSize::Bytes(1), ByteSize::TB(0));
EXPECT_GE(ByteSize::TB(0.5), ByteSize::GB(0.5));
EXPECT_GE(ByteSize::GB(0.5), ByteSize::MB(0.5));
EXPECT_GE(ByteSize::MB(0.5), ByteSize::KB(0.5));
EXPECT_GE(ByteSize::KB(0.5), ByteSize::Bytes(1));
EXPECT_GE(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::Bytes(0), ByteSize::Bytes(0));
}
TEST(ByteSizeTest, DebugString) {
EXPECT_EQ(ByteSize::Bytes(0).DebugString(), "0B");
EXPECT_EQ(ByteSize::Bytes(1).DebugString(), "1B");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 10).DebugString(), "1KB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 20).DebugString(), "1MB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 30).DebugString(), "1GB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 40).DebugString(), "1TB");
EXPECT_EQ(ByteSize::KB(0.5).DebugString(), "512B");
EXPECT_EQ(ByteSize::KB(1).DebugString(), "1KB");
EXPECT_EQ(ByteSize::KB(1.5).DebugString(), "1.5KB");
EXPECT_EQ(ByteSize::KB(1024).DebugString(), "1MB");
EXPECT_EQ(ByteSize::KB(1024 * 1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::KB(1024 * 1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::MB(0.5).DebugString(), "512KB");
EXPECT_EQ(ByteSize::MB(1).DebugString(), "1MB");
EXPECT_EQ(ByteSize::MB(1.5).DebugString(), "1.5MB");
EXPECT_EQ(ByteSize::MB(1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::MB(1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::GB(0.5).DebugString(), "512MB");
EXPECT_EQ(ByteSize::GB(1).DebugString(), "1GB");
EXPECT_EQ(ByteSize::GB(1.5).DebugString(), "1.5GB");
EXPECT_EQ(ByteSize::GB(1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(0.5).DebugString(), "512GB");
EXPECT_EQ(ByteSize::TB(1).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(1.5).DebugString(), "1.5TB");
EXPECT_EQ(ByteSize::TB(1024).DebugString(), "1024TB");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/byte_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/byte_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
288c8bde-c5d9-4a6c-a2a7-e4e91ae9a23a | cpp | tensorflow/tensorflow | auto_scaler | tensorflow/core/data/service/auto_scaler.cc | tensorflow/core/data/service/auto_scaler_test.cc | #include "tensorflow/core/data/service/auto_scaler.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/metrics.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr double kAutoScalerOutlierSigmas = 1.0;
template <typename T>
double GetMedian(const absl::flat_hash_map<T, double>& rates) {
std::vector<double> sorted_rates;
for (const auto& [id, rate] : rates) {
sorted_rates.push_back(rate);
}
std::sort(sorted_rates.begin(), sorted_rates.end());
return sorted_rates[sorted_rates.size() / 2];
}
template <typename T>
double GetMean(const absl::flat_hash_map<T, double>& rates) {
double rates_sum = 0.0;
for (const auto& [id, rate] : rates) {
rates_sum += rate;
}
if (rates_sum == 0.0) return 0.0;
return rates_sum / static_cast<double>(rates.size());
}
template <typename T>
double GetStandardDeviation(const absl::flat_hash_map<T, double>& rates,
double mean) {
double squared_distances_sum = 0.0;
for (const auto& [id, rate] : rates) {
squared_distances_sum += (rate - mean) * (rate - mean);
}
if (squared_distances_sum == 0.0 || rates.size() <= 1) return 0.0;
return std::sqrt(squared_distances_sum /
static_cast<double>(rates.size() - 1));
}
template <typename T>
void ReplaceOutliers(const absl::flat_hash_map<T, double>& rates,
std::vector<double>& rates_without_outliers,
double outlier_sigmas) {
if (rates.empty()) return;
double mean = GetMean(rates);
double median = GetMedian(rates);
double standard_deviation = GetStandardDeviation(rates, mean);
double lower_threshold = mean - standard_deviation * outlier_sigmas;
double upper_threshold = mean + standard_deviation * outlier_sigmas;
for (const auto& [id, rate] : rates) {
if (rate >= lower_threshold && rate <= upper_threshold) {
rates_without_outliers.push_back(rate);
} else {
rates_without_outliers.push_back(median);
}
}
}
std::optional<int64_t> AutoScaler::GetOptimalNumberOfWorkers() const
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (worker_throughputs_.empty() || consumption_rates_.empty())
return std::nullopt;
std::vector<double> consumption_rates_without_outliers;
ReplaceOutliers(consumption_rates_, consumption_rates_without_outliers,
kAutoScalerOutlierSigmas);
double consumption_rates_sum_ =
std::accumulate(consumption_rates_without_outliers.begin(),
consumption_rates_without_outliers.end(), 0.0);
std::vector<double> worker_throughputs_without_outliers;
ReplaceOutliers(worker_throughputs_, worker_throughputs_without_outliers,
kAutoScalerOutlierSigmas);
double worker_throughputs_sum_ =
std::accumulate(worker_throughputs_without_outliers.begin(),
worker_throughputs_without_outliers.end(), 0.0);
double average_worker_throughput =
worker_throughputs_sum_ / static_cast<double>(worker_throughputs_.size());
int64_t optimal_number_of_workers =
ceil(consumption_rates_sum_ / average_worker_throughput);
return std::max(int64_t{1}, optimal_number_of_workers);
}
absl::Status AutoScaler::ReportProcessingTime(const std::string& worker_address,
absl::Duration processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(absl::StrCat(
"Cannot update processing_time with a ZeroDuration or negative value: ",
absl::FormatDuration(processing_time)));
}
double worker_throughput = 1.0 / absl::ToDoubleSeconds(processing_time);
tsl::mutex_lock l(mu_);
worker_throughputs_[worker_address] = worker_throughput;
return absl::OkStatus();
}
absl::Status AutoScaler::ReportTargetProcessingTime(
int64_t consumer_id, absl::Duration target_processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (target_processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Cannot update target_processing_time with a ZeroDuration "
"or negative value: ",
absl::FormatDuration(target_processing_time)));
}
double consumption_rate = 1.0 / absl::ToDoubleSeconds(target_processing_time);
tsl::mutex_lock l(mu_);
consumption_rates_[consumer_id] = consumption_rate;
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveWorker(const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!worker_throughputs_.contains(worker_address))
return absl::NotFoundError(
absl::StrCat("Worker with address ", worker_address, " not found"));
worker_throughputs_.erase(worker_address);
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveConsumer(int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!consumption_rates_.contains(consumer_id))
return absl::NotFoundError(
absl::StrCat("Consumer with ID ", consumer_id, " not found"));
consumption_rates_.erase(consumer_id);
return absl::OkStatus();
}
void MultipleIterationsAutoScaler::EnsureIterationIsRegistered(
int64_t iteration_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!auto_scalers_.contains(iteration_id)) {
auto_scalers_[iteration_id] = std::make_unique<AutoScaler>();
}
}
absl::Status MultipleIterationsAutoScaler::UnregisterIteration(
int64_t iteration_id) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat("AutoScaler for iteration_id ",
iteration_id, " does not exist"));
auto_scalers_.erase(iteration_id);
return absl::OkStatus();
}
absl::Status MultipleIterationsAutoScaler::UpdateOptimalNumberOfWorkersMetric(
int64_t current_number_of_workers) TF_LOCKS_EXCLUDED(mu_) {
if (current_number_of_workers <= 0)
return absl::InvalidArgumentError(
"The current number of workers must be positive");
std::optional<int64_t> optimal_number_of_workers =
GetOptimalNumberOfWorkers();
if (!optimal_number_of_workers)
return absl::UnavailableError(
"Cannot update the optimal number of workers metric because there are "
"no reported processing and target processing times for at least one "
"iteration");
VLOG(3) << "Estimated optimal number of workers: "
<< optimal_number_of_workers.value();
int64_t bound_optimal_number_of_workers = optimal_number_of_workers.value();
if (bound_optimal_number_of_workers > current_number_of_workers * 4 ||
bound_optimal_number_of_workers > current_number_of_workers + 500) {
bound_optimal_number_of_workers = std::min(current_number_of_workers * 4,
current_number_of_workers + 500);
}
bound_optimal_number_of_workers =
std::min(bound_optimal_number_of_workers, int64_t{100000});
VLOG(3) << "Bound optimal number of workers: "
<< bound_optimal_number_of_workers;
metrics::RecordTFDataServiceOptimalNumberOfWorkers(
bound_optimal_number_of_workers);
return absl::OkStatus();
}
std::optional<int64_t> MultipleIterationsAutoScaler::GetOptimalNumberOfWorkers()
const TF_LOCKS_EXCLUDED(mu_) {
int64_t optimal_number_of_workers = 0;
{
tsl::tf_shared_lock l(mu_);
for (const auto& [iteration_id, auto_scaler] : auto_scalers_) {
std::optional<int64_t> current_optimal_number_of_workers =
auto_scaler->GetOptimalNumberOfWorkers();
if (!current_optimal_number_of_workers.has_value()) continue;
optimal_number_of_workers = std::max(
optimal_number_of_workers, current_optimal_number_of_workers.value());
}
}
if (optimal_number_of_workers == 0)
return std::nullopt;
else
return optimal_number_of_workers;
}
absl::Status MultipleIterationsAutoScaler::ReportProcessingTime(
int64_t iteration_id, const std::string& worker_address,
absl::Duration processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportProcessingTime(
worker_address, processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::ReportTargetProcessingTime(
int64_t iteration_id, int64_t consumer_id,
absl::Duration target_processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportTargetProcessingTime(
consumer_id, target_processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveWorker(
int64_t iteration_id, const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveWorker(worker_address);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveConsumer(int64_t iteration_id,
int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveConsumer(consumer_id);
return status;
}
}
} | #include "tensorflow/core/data/service/auto_scaler.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::tsl::testing::StatusIs;
TEST(AutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
AutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredWorkers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredConsumers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate1) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 8);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate2) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate3) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.01)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersTPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(500)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Nanoseconds(3000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Nanoseconds(2000000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 107);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Nanoseconds(70000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Nanoseconds(1000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(300000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 244);
}
TEST(AutoScalerTest, ReportProcessingTimeNewWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeExistingWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
"/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeExistingConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, RemoveWorkerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/1:20000"));
}
TEST(AutoScalerTest, RemoveNonexistentWorker) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker("/worker/task/0:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveWorkerAfterNewPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
}
TEST(AutoScalerTest, RemoveConsumerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1));
}
TEST(AutoScalerTest, RemoveNonexistentConsumer) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveConsumerAfterNewTPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterExistingIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.UnregisterIteration(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterNonexistentIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.UnregisterIteration(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricInvalidCurrentWorkers) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(-1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricWithReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_GT(cell_reader.Read(), 0);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricIncreaseWithinLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(500)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(15));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 50);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric4xIncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(2));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 8);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric500IncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1000));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 1500);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricMaxLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(200000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(99700));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 100000);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredWorkers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredConsumers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate1) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate2) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 0, absl::Seconds(0.01)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveWorker(1, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker(1, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerAfterNewPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 0),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveConsumer(1, 0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1, 0));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 1),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerAfterNewTPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/auto_scaler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/auto_scaler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
32b6f306-4d70-4f3a-8887-4af878ce1da0 | cpp | tensorflow/tensorflow | task_runner | tensorflow/core/data/service/task_runner.cc | tensorflow/core/data/service/task_runner_test.cc | #include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/cross_trainer_cache.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/thread_safe_buffer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int64_t kWaitBeforeSkipUs = 100 * 1000;
constexpr size_t kDefaultCrossTrainerCacheSizeBytes =
10 * (size_t{1} << 30);
}
StandaloneTaskIterator::StandaloneTaskIterator(
std::unique_ptr<standalone::Dataset> dataset,
std::unique_ptr<standalone::Iterator> iterator)
: dataset_(std::move(dataset)), iterator_(std::move(iterator)) {}
Status StandaloneTaskIterator::GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) {
return iterator_->GetNext(&element, &end_of_sequence);
}
int64_t StandaloneTaskIterator::Cardinality() const {
return dataset_->Get()->Cardinality();
}
absl::StatusOr<std::vector<Tensor>> StandaloneTaskIterator::Save() {
return iterator_->Save();
}
Status StandaloneTaskIterator::Restore(
const std::vector<Tensor>& saved_iterator) {
return iterator_->Restore(saved_iterator);
}
std::shared_ptr<model::Model> StandaloneTaskIterator::model() const {
return iterator_->model();
}
Status TaskRunner::Create(const experimental::WorkerConfig& worker_config,
const TaskDef& task_def,
std::unique_ptr<TaskIterator> iterator,
std::unique_ptr<TaskRunner>& out) {
if (task_def.optional_num_consumers_case() == TaskDef::kNumConsumers) {
int64_t cardinality = iterator->Cardinality();
if (cardinality != kInfiniteCardinality &&
cardinality != kUnknownCardinality) {
return errors::FailedPrecondition(
"Round robin reads require that the input dataset has infinite "
"cardinality, but the dataset has cardinality ",
cardinality,
". Consider adding a `.repeat()` transformation to the dataset.");
}
out = std::make_unique<RoundRobinTaskRunner>(std::move(iterator),
task_def.num_consumers(),
task_def.worker_address());
} else if (task_def.use_cross_trainer_cache()) {
const size_t max_cache_size_bytes =
worker_config.cross_trainer_cache_size_bytes() > 0
? worker_config.cross_trainer_cache_size_bytes()
: kDefaultCrossTrainerCacheSizeBytes;
out = std::make_unique<CachingTaskRunner>(std::move(iterator),
max_cache_size_bytes);
} else {
out = std::make_unique<FirstComeFirstServedTaskRunner>(std::move(iterator));
}
return absl::OkStatus();
}
FirstComeFirstServedTaskRunner::FirstComeFirstServedTaskRunner(
std::unique_ptr<TaskIterator> iterator)
: iterator_(std::move(iterator)), buffer_(1) {
RunPrefetchThread();
}
FirstComeFirstServedTaskRunner::~FirstComeFirstServedTaskRunner() { Cancel(); }
Status FirstComeFirstServedTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
if (req.allow_skip() && buffer_.Empty()) {
result.skip = true;
return absl::OkStatus();
}
return GetNext(result);
}
Status FirstComeFirstServedTaskRunner::GetNext(GetElementResult& result) {
TF_ASSIGN_OR_RETURN(result, buffer_.Pop());
return absl::OkStatus();
}
Status FirstComeFirstServedTaskRunner::PrefetchFn() {
while (true) {
TF_RETURN_IF_ERROR(buffer_.Push(GetNextFromInputIterator()));
}
return absl::OkStatus();
}
void FirstComeFirstServedTaskRunner::RunPrefetchThread() {
auto prefetch_fn = [this] {
Status status = PrefetchFn();
if (!status.ok()) {
buffer_.Cancel(status);
}
};
prefetch_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "tf_data_service_fcfs_prefetch_thread",
prefetch_fn));
}
absl::StatusOr<GetElementResult>
FirstComeFirstServedTaskRunner::GetNextFromInputIterator()
TF_LOCKS_EXCLUDED(mu_) {
GetElementResult result;
std::vector<Tensor> element;
bool end_of_task = false;
result.skip = false;
{
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_task));
result.end_of_sequence = end_of_task;
result.element_index = element_index_++;
}
if (!end_of_task) {
result.components = std::move(element);
}
return result;
}
void FirstComeFirstServedTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service FCFS task.";
buffer_.Cancel(errors::Cancelled("tf.data service FCFS task is cancelled."));
}
std::shared_ptr<model::Model> FirstComeFirstServedTaskRunner::model() const {
return model_;
}
CachingTaskRunner::CachingTaskRunner(std::unique_ptr<TaskIterator> iterator,
size_t max_cache_size_bytes)
: fcfs_task_runner_(std::move(iterator)),
cache_(max_cache_size_bytes,
std::make_unique<GetElementResultSequence>(fcfs_task_runner_)) {
LOG(INFO) << "Initialized tf.data service cross-trainer cache with "
<< ByteSize::Bytes(max_cache_size_bytes) << " of memory.";
}
CachingTaskRunner::~CachingTaskRunner() { Cancel(); }
Status CachingTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<const GetElementResult> element,
cache_.Get(req.trainer_id()));
result = element->Copy();
return absl::OkStatus();
}
CachingTaskRunner::GetElementResultSequence::GetElementResultSequence(
FirstComeFirstServedTaskRunner& fcfs_task_runner)
: fcfs_task_runner_(fcfs_task_runner) {}
absl::StatusOr<GetElementResult>
CachingTaskRunner::GetElementResultSequence::GetNext() {
GetElementResult result;
TF_RETURN_IF_ERROR(fcfs_task_runner_.GetNext(result));
if (result.end_of_sequence) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"However, it reached the end of sequence.");
}
return result;
}
size_t CachingTaskRunner::GetElementResultSequence::GetElementSizeBytes(
const GetElementResult& element) const {
return element.EstimatedMemoryUsageBytes();
}
void CachingTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service cross-trainer cache task.";
if (!cache_.IsCancelled()) {
cache_.Cancel(errors::Cancelled(
"tf.data service cross-trainer cache task is cancelled."));
}
fcfs_task_runner_.Cancel();
}
std::shared_ptr<model::Model> CachingTaskRunner::model() const {
return fcfs_task_runner_.model();
}
RoundRobinTaskRunner::RoundRobinTaskRunner(
std::unique_ptr<TaskIterator> iterator, int64_t num_consumers,
string worker_address)
: num_consumers_(num_consumers),
worker_address_(worker_address),
buffer_(num_consumers_),
prefetch_thread_(std::move(iterator), num_consumers_) {
VLOG(1) << "Creating task runner for distributing data round-robin to "
<< num_consumers << " consumers";
}
Status RoundRobinTaskRunner::ValidateRequest(const GetElementRequest& req) {
if (req.consumer_index() < 0 || req.round_index() < 0) {
return errors::FailedPrecondition(
"RoundRobinTaskRunner needs to know the consumer index and element "
"index of each request.");
}
if (req.consumer_index() >= num_consumers_) {
return errors::FailedPrecondition(
"Requesting data for consumer index ", req.consumer_index(),
", but the task is configured for only ", num_consumers_, " consumers");
}
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareFullRound(int64_t wait_us)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Preparing full round for round "
<< current_round_;
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(wait_us, buffer_));
round_skipped_ = buffer_.empty();
new_round_cv_.notify_all();
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PreparePartialRound()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Starting partial round " << first_round_
<< " for " << requests_[first_round_].size() << " consumers";
current_round_ = first_round_;
new_round_cv_.notify_all();
auto next_round_request = *(requests_[first_round_ + 1].begin()->second);
if (next_round_request.skipped_previous_round()) {
VLOG(1) << "Skipping partial round";
round_skipped_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(-1, buffer_));
round_skipped_ = false;
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareRound(const GetElementRequest& req) {
mutex_lock l(mu_);
first_round_ = std::min(first_round_, req.round_index());
absl::flat_hash_map<int64_t, const GetElementRequest*>& round =
requests_[req.round_index()];
round[req.consumer_index()] = &req;
auto cleanup = gtl::MakeCleanup([&]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
requests_[req.round_index()].erase(req.consumer_index());
});
if (current_round_ < req.round_index() && round.size() == num_consumers_) {
current_round_ = req.round_index();
int64_t wait_us = kWaitBeforeSkipUs;
if (!req.allow_skip()) {
wait_us = -1;
}
TF_RETURN_IF_ERROR(PrepareFullRound(wait_us));
}
if (current_round_ < 0 &&
requests_[first_round_].size() + requests_[first_round_ + 1].size() ==
num_consumers_) {
TF_RETURN_IF_ERROR(PreparePartialRound());
}
while (!cancelled_ && current_round_ < req.round_index()) {
TF_RETURN_IF_ERROR(prefetch_thread_.GetStatus());
new_round_cv_.wait(l);
}
if (current_round_ < req.round_index() && cancelled_) {
return errors::Cancelled("Worker is shutting down.");
}
if (current_round_ != req.round_index()) {
return errors::FailedPrecondition(
"Consumer ", req.consumer_index(), " requested data for round ",
req.round_index(), ", but the current round has already reached ",
current_round_,
". This may indicate that the consumer was restarted with the same "
"iteration "
"name.`");
}
return prefetch_thread_.GetStatus();
}
Status RoundRobinTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_RETURN_IF_ERROR(ValidateRequest(req));
result.end_of_sequence = false;
VLOG(2) << worker_address_ << ": Received request from consumer index "
<< req.consumer_index() << " for round " << req.round_index();
TF_RETURN_IF_ERROR(PrepareRound(req));
tf_shared_lock l(mu_);
result.skip = round_skipped_;
if (round_skipped_) {
VLOG(1) << worker_address_ << ": Buffer not ready, skipping round "
<< current_round_ << " for consumer " << req.consumer_index();
return absl::OkStatus();
}
auto& buffer_result = buffer_[req.consumer_index()];
result.element_index = buffer_result->index;
std::vector<Tensor> element;
for (auto& component : buffer_result->components) {
element.push_back(tensor::DeepCopy(component));
}
if (VLOG_IS_ON(2)) {
int64_t size = 0;
for (auto& component : element) {
size += component.TotalBytes();
}
VLOG(2) << worker_address_ << ": Returning element " << result.element_index
<< " to consumer " << req.consumer_index() << " for round "
<< req.round_index() << ". element size " << size;
}
result.components = std::move(element);
return absl::OkStatus();
}
void RoundRobinTaskRunner::Cancel() {
mutex_lock l(mu_);
cancelled_ = true;
new_round_cv_.notify_all();
}
std::shared_ptr<model::Model> RoundRobinTaskRunner::model() const {
return prefetch_thread_.model();
}
PrefetchThread::PrefetchThread(std::unique_ptr<TaskIterator> iterator,
int64_t round_size)
: iterator_(std::move(iterator)), round_size_(round_size) {
thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "round-robin-prefetch", [&] { Run(); }));
}
PrefetchThread::~PrefetchThread() {
mutex_lock l(mu_);
cancelled_ = true;
cv_.notify_all();
}
void PrefetchThread::Run() {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && buffer_.size() >= round_size_) {
cv_.wait(l);
}
if (cancelled_) {
return;
}
}
std::vector<Tensor> element;
bool end_of_sequence;
Status s = iterator_->GetNext(element, end_of_sequence);
if (!s.ok()) {
mutex_lock l(mu_);
status_ = s;
cv_.notify_all();
return;
}
if (end_of_sequence) {
mutex_lock l(mu_);
status_ = errors::FailedPrecondition(
"Encountered end of sequence on a round-robin read iterator. "
"Please ensure that the dataset used for round-robin reading has "
"infinite cardinality, e.g. by adding a .repeat() transformation "
"at the end.");
cv_.notify_all();
return;
}
mutex_lock l(mu_);
buffer_.push_back(std::make_unique<Element>(std::move(element), index_++));
cv_.notify_all();
}
}
Status PrefetchThread::FillBuffer(int64_t wait_us,
std::vector<std::unique_ptr<Element>>& out) {
int64_t start_us = Env::Default()->NowMicros();
out.clear();
mutex_lock l(mu_);
while (buffer_.size() < round_size_ && !cancelled_ && status_.ok()) {
int64_t remaining_us = start_us + wait_us - Env::Default()->NowMicros();
if (wait_us >= 0 && remaining_us <= 0) {
break;
}
cv_.wait_for(l, std::chrono::microseconds(remaining_us));
}
TF_RETURN_IF_ERROR(status_);
if (cancelled_) {
return errors::Cancelled("Prefetch thread cancelled");
}
if (buffer_.size() < round_size_) {
DCHECK_GE(wait_us, 0);
return absl::OkStatus();
}
for (auto& elem : buffer_) {
out.push_back(std::move(elem));
}
buffer_.clear();
cv_.notify_all();
return absl::OkStatus();
}
Status PrefetchThread::GetStatus() {
mutex_lock l(mu_);
return status_;
}
std::shared_ptr<model::Model> PrefetchThread::model() const {
return iterator_->model();
}
}
} | #include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAreArray;
constexpr size_t kSmallCache = 100;
constexpr size_t kLargeCache = 10 * (size_t{1} << 30);
class RangeIterator : public TaskIterator {
public:
explicit RangeIterator(const int64_t range, const bool repeat)
: range_(range), repeat_(repeat) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= range_);
if (end_of_sequence) {
return absl::OkStatus();
}
element = {Tensor{next_++}};
if (repeat_) {
next_ = next_ % range_;
}
return absl::OkStatus();
}
int64_t Cardinality() const override {
return repeat_ ? kInfiniteCardinality : range_;
}
private:
const int64_t range_;
const bool repeat_;
int64_t next_ = 0;
};
class InfiniteRangeIterator : public TaskIterator {
public:
InfiniteRangeIterator() = default;
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
element = {Tensor{next_++}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return kInfiniteCardinality; }
private:
int64_t next_ = 0;
};
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(const std::vector<StatusOr<T>>& elements)
: elements_(elements) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<StatusOr<T>> elements_;
int64_t next_ = 0;
};
template <class T>
StatusOr<std::vector<T>> GetTaskRunnerOutput(TaskRunner& runner,
const GetElementRequest& request) {
std::vector<T> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
end_of_sequence = result.end_of_sequence;
if (end_of_sequence) {
break;
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
output.push_back(result.components[0].unaligned_flat<T>().data()[0]);
}
return output;
}
template <class T>
StatusOr<T> GetNextFromTaskRunner(TaskRunner& runner,
const GetElementRequest& request) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
if (result.end_of_sequence) {
return errors::OutOfRange("TaskRunner has reached the end of sequence.");
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
return result.components[0].unaligned_flat<T>().data()[0];
}
template <class T>
StatusOr<std::vector<T>> GetElementsFromTaskRunner(
TaskRunner& runner, const GetElementRequest& request,
const size_t num_elements) {
std::vector<T> output;
for (size_t i = 0; i < num_elements; ++i) {
TF_ASSIGN_OR_RETURN(T next, GetNextFromTaskRunner<T>(runner, request));
output.push_back(next);
}
return output;
}
std::vector<int64_t> GetRange(const size_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
Status RunConsumer(int64_t consumer_index, int64_t start_index,
int64_t end_index, TaskRunner& task_runner,
std::vector<int64_t>& output) {
for (int64_t next_index = start_index; next_index < end_index; ++next_index) {
GetElementRequest request;
request.set_round_index(next_index);
request.set_consumer_index(consumer_index);
request.set_skipped_previous_round(false);
request.set_allow_skip(false);
GetElementResult result;
do {
TF_RETURN_IF_ERROR(task_runner.GetNext(request, result));
if (!result.end_of_sequence) {
output.push_back(result.components[0].flat<int64_t>()(0));
}
} while (result.skip);
}
return absl::OkStatus();
}
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNext) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
TEST(FirstComeFirstServedTaskRunnerTest, EmptyDataset) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(0, false));
for (int i = 0; i < 5; ++i) {
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Cancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
runner.Cancel();
for (int i = 0; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, ConcurrentReaders) {
size_t range = 1000;
size_t num_readers = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
mutex mu;
std::vector<int64_t> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &results, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
mutex_lock l(mu);
std::move(output.begin(), output.end(), std::back_inserter(results));
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(range)));
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNextAndCancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
int64_t i;
for (i = 0; i < range / 2; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, GetElementRequest()),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Error) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::InvalidArgument("Invalid argument"),
tstring("Second element"), errors::Aborted("Aborted")}));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("First element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("Second element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::ABORTED));
}
TEST(CachingTaskRunnerTest, GetNext) {
size_t range = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
size_t num_trainers = 10;
for (size_t i = 0; i < num_trainers; ++i) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer ", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
}
}
TEST(CachingTaskRunnerTest, EmptyDataset) {
CachingTaskRunner runner(
std::make_unique<RangeIterator>(0, false),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(CachingTaskRunnerTest, SlowClientSkipsData) {
size_t range = 1000;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
GetElementRequest request;
request.set_trainer_id("Fast trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> fast_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(fast_trainer_output, ElementsAreArray(GetRange(range)));
request.set_trainer_id("Slow trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> slow_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(slow_trainer_output, SizeIs(range));
EXPECT_THAT(slow_trainer_output[0], Gt(0));
}
TEST(CachingTaskRunnerTest, ConcurrentTrainers) {
size_t range = 100;
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, range, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
})));
}
}
TEST(CachingTaskRunnerTest, Cancel) {
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
int i;
for (i = 0; i < 10; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, request),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < 10; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(CachingTaskRunnerTest, CancelConcurrentReaders) {
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner]() {
for (size_t j = 0; true; ++j) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", (j % 100)));
GetElementResult result;
Status status = runner.GetNext(request, result);
if (!status.ok()) {
return;
}
ASSERT_FALSE(result.end_of_sequence);
ASSERT_EQ(result.components.size(), 1);
}
})));
}
Env::Default()->SleepForMicroseconds(1000000);
runner.Cancel();
for (auto& thread : reader_threads) {
thread.reset();
}
GetElementRequest request;
GetElementResult result;
request.set_trainer_id(absl::StrCat("Trainer_", 0));
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
TEST(CachingTaskRunnerTest, Errors) {
size_t num_readers = 10;
CachingTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::Cancelled("Cancelled"),
tstring("Second element"),
errors::FailedPrecondition("FailedPrecondition"),
tstring("Third element"),
errors::Unavailable("Unavailable"),
}),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
std::vector<std::vector<tstring>> results;
results.reserve(num_readers);
for (size_t i = 0; i < num_readers; ++i) {
results.emplace_back();
std::vector<tstring>& result = results.back();
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &result, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
while (true) {
absl::StatusOr<tstring> element =
GetNextFromTaskRunner<tstring>(runner, request);
if (element.ok()) {
result.push_back(*element);
}
if (errors::IsInvalidArgument(element.status())) {
EXPECT_THAT(
element.status(),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
return;
}
}
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_EQ(results.size(), num_readers);
for (const std::vector<tstring>& result : results) {
EXPECT_THAT(result,
ElementsAre(tstring("First element"), tstring("Second element"),
tstring("Third element")));
}
}
class ConsumeParallelTest
: public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<int64_t, int64_t>> {};
TEST_P(ConsumeParallelTest, ConsumeParallel) {
int64_t num_elements = std::get<0>(GetParam());
int64_t num_consumers = std::get<1>(GetParam());
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(num_elements, true),
num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, 0,
num_elements, runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int i = 0; i < num_elements; ++i) {
int consumer = i % num_consumers;
int round = i / num_consumers;
EXPECT_EQ(per_consumer_results[consumer][round], i);
}
}
INSTANTIATE_TEST_SUITE_P(ConsumeParallelTests, ConsumeParallelTest,
::testing::Values(std::make_tuple(1000, 5),
std::make_tuple(1003, 5),
std::make_tuple(1000, 20),
std::make_tuple(4, 20),
std::make_tuple(0, 20)));
TEST(RoundRobinTaskRunner, ConsumeParallelPartialRound) {
int64_t num_consumers = 5;
std::vector<int64_t> starting_rounds = {12, 11, 11, 12, 12};
int64_t end_index = 15;
std::vector<std::vector<int64_t>> expected_consumer_results = {
{5, 10, 15}, {1, 6, 11, 16}, {2, 7, 12, 17}, {8, 13, 18}, {9, 14, 19}};
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(30, true), num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, starting_rounds[consumer], end_index,
runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int consumer = 0; consumer < num_consumers; ++consumer) {
EXPECT_EQ(per_consumer_results[consumer],
expected_consumer_results[consumer]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/task_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/task_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff6e614c-d7f4-4756-bc9e-cd60d9819967 | cpp | tensorflow/tensorflow | data_transfer | tensorflow/core/data/service/data_transfer.cc | tensorflow/core/data/service/data_transfer_test.cc | #include "tensorflow/core/data/service/data_transfer.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using DataTransferServerFactories =
std::unordered_map<std::string, DataTransferServer::ServerFactoryT>;
DataTransferServerFactories& transfer_server_factories() {
static auto& factories = *new DataTransferServerFactories();
return factories;
}
using DataTransferClientFactories =
std::unordered_map<std::string, DataTransferClient::ClientFactoryT>;
DataTransferClientFactories& transfer_client_factories() {
static auto& factories = *new DataTransferClientFactories();
return factories;
}
}
GetElementResult GetElementResult::Copy() const {
GetElementResult copy;
copy.components = components;
copy.element_index = element_index;
copy.end_of_sequence = end_of_sequence;
copy.skip = skip;
return copy;
}
size_t GetElementResult::EstimatedMemoryUsageBytes() const {
size_t size_bytes = components.size() * sizeof(Tensor) +
sizeof(element_index) + sizeof(end_of_sequence) +
sizeof(skip);
for (const Tensor& tensor : components) {
size_bytes += tensor.TotalBytes();
if (tensor.dtype() != DT_VARIANT) {
continue;
}
const Variant& variant = tensor.scalar<Variant>()();
const CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed) {
size_bytes += compressed->SpaceUsedLong();
}
}
return size_bytes;
}
void DataTransferServer::Register(std::string name, ServerFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_server_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer server factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferServer::Build(std::string name, GetElementT get_element,
std::shared_ptr<DataTransferServer>* out) {
mutex_lock l(*get_lock());
auto it = transfer_server_factories().find(name);
if (it != transfer_server_factories().end()) {
return it->second(get_element, out);
}
std::vector<std::string> available_names;
for (const auto& factory : transfer_server_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer server factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
void DataTransferClient::Register(std::string name, ClientFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_client_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer client factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferClient::Build(std::string name, Config config,
std::unique_ptr<DataTransferClient>* out) {
mutex_lock l(*get_lock());
auto it = transfer_client_factories().find(name);
if (it != transfer_client_factories().end()) {
return it->second(config, out);
}
std::vector<string> available_names;
for (const auto& factory : transfer_client_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer client factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
}
} | #include "tensorflow/core/data/service/data_transfer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
class TestDataTransferServer : public DataTransferServer {
public:
explicit TestDataTransferServer(bool* called) : called_(called) {}
Status Start(const experimental::WorkerConfig& unused_config) override {
*called_ = true;
return absl::OkStatus();
}
int Port() const override { return 0; }
private:
bool* called_;
};
template <class T>
GetElementResult MakeElementResult(T value) {
GetElementResult result;
result.components.push_back(Tensor(std::move(value)));
result.element_index = 0;
result.end_of_sequence = false;
return result;
}
TEST(DataTransferTest, RegisterDataTransferServerBuilder) {
bool called = false;
DataTransferServer::Register("test", [&called](auto ignore, auto* server) {
*server = std::make_shared<TestDataTransferServer>(&called);
return absl::OkStatus();
});
std::shared_ptr<DataTransferServer> server;
TF_ASSERT_OK(DataTransferServer::Build("test", {}, &server));
EXPECT_FALSE(called);
TF_ASSERT_OK(server->Start({}));
EXPECT_TRUE(called);
}
TEST(DataTransferTest, EstimateMemoryUsageBytes) {
GetElementResult empty;
EXPECT_GT(empty.EstimatedMemoryUsageBytes(), 0);
Tensor tensor(DT_INT64, TensorShape({10, 100}));
GetElementResult int64_result = MakeElementResult(tensor);
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), 1000 * sizeof(int64_t));
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(),
int64_result.components[0].AllocatedBytes());
EXPECT_GE(int64_result.EstimatedMemoryUsageBytes(), sizeof(int64_result));
}
TEST(DataTransferTest, EstimateVariantMemoryUsageBytes) {
const size_t data_size = 1000;
std::unique_ptr<CompressedElement> compressed{
protobuf::Arena::Create<CompressedElement>(nullptr)};
compressed->set_data(std::string(data_size, 'a'));
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = *compressed;
GetElementResult variant_result = MakeElementResult(tensor);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), data_size);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->ByteSizeLong());
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->SpaceUsedLong());
}
TEST(DataTransferTest, CopyGetElementResult) {
std::string hello_world = "hello, world!";
GetElementResult result = MakeElementResult(hello_world);
ASSERT_EQ(result.components.size(), 1);
EXPECT_GT(result.EstimatedMemoryUsageBytes(), hello_world.size());
GetElementResult copy = result.Copy();
ASSERT_EQ(copy.components.size(), 1);
test::ExpectEqual(result.components[0], copy.components[0]);
EXPECT_EQ(copy.EstimatedMemoryUsageBytes(),
result.EstimatedMemoryUsageBytes());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be4b5433-ca85-4f8f-92b6-37a1dd883c50 | cpp | tensorflow/tensorflow | graph_rewriters | tensorflow/core/data/service/graph_rewriters.cc | tensorflow/core/data/service/graph_rewriters_test.cc | #include "tensorflow/core/data/service/graph_rewriters.h"
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
#include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::AutoShardDatasetOp;
constexpr bool kApplyGeneralGrapplerOptimizations = false;
bool HasDynamicPort(absl::string_view address) {
URL url(address);
return url.has_port() && absl::StartsWith(url.port(), "%port") &&
absl::EndsWith(url.port(), "%");
}
bool ShouldReplaceDynamicPort(absl::string_view config_address,
absl::string_view worker_address) {
URL config_url(config_address), worker_url(worker_address);
return (!config_url.has_port() || HasDynamicPort(config_address)) &&
worker_url.has_port() && config_url.host() == worker_url.host();
}
}
absl::StatusOr<GraphDef>
RemoveCompressionMapRewriter::ApplyRemoveCompressionMapRewrite(
const GraphDef& graph_def) {
grappler::RemoveCompressionMap remove_compression_map;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(remove_compression_map.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(remove_compression_map.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
tensorflow::RewriterConfig::CustomGraphOptimizer
RemoveCompressionMapRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-remove-compression-map");
return config;
}
absl::StatusOr<AutoShardRewriter> AutoShardRewriter::Create(
const TaskDef& task_def) {
TF_ASSIGN_OR_RETURN(
AutoShardPolicy auto_shard_policy,
ToAutoShardPolicy(task_def.processing_mode_def().sharding_policy()));
return AutoShardRewriter(auto_shard_policy, task_def.num_workers(),
task_def.worker_index());
}
absl::StatusOr<GraphDef> AutoShardRewriter::ApplyAutoShardRewrite(
const GraphDef& graph_def) {
if (auto_shard_policy_ == AutoShardPolicy::OFF) {
return graph_def;
}
VLOG(2) << "Applying auto-shard policy "
<< AutoShardPolicy_Name(auto_shard_policy_)
<< ". Number of workers: " << num_workers_
<< "; worker index: " << worker_index_ << ".";
grappler::AutoShard autoshard;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(autoshard.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(autoshard.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
AutoShardRewriter::AutoShardRewriter(AutoShardPolicy auto_shard_policy,
int64_t num_workers, int64_t worker_index)
: auto_shard_policy_(auto_shard_policy),
num_workers_(num_workers),
worker_index_(worker_index) {}
tensorflow::RewriterConfig::CustomGraphOptimizer
AutoShardRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-auto-shard");
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumWorkers].set_i(
num_workers_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kIndex].set_i(
worker_index_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kAutoShardPolicy].set_i(
auto_shard_policy_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumReplicas].set_i(1);
return config;
}
Status WorkerIndexResolver::ValidateWorker(
absl::string_view worker_address) const {
if (worker_addresses_.empty()) {
return absl::OkStatus();
}
for (absl::string_view config_address : worker_addresses_) {
if (config_address == worker_address ||
ShouldReplaceDynamicPort(config_address, worker_address)) {
return absl::OkStatus();
}
}
return errors::FailedPrecondition(absl::Substitute(
"Failed to assign an index for worker $0. Configured workers list: [$1]. "
"The worker's address is not configured, or other workers are already "
"running at the configured host. If your worker has restarted, make sure "
"it runs at the same address and port.",
worker_address, absl::StrJoin(worker_addresses_, ", ")));
}
void WorkerIndexResolver::AddWorker(absl::string_view worker_address) {
for (std::string& config_address : worker_addresses_) {
if (config_address == worker_address) {
return;
}
if (ShouldReplaceDynamicPort(config_address, worker_address)) {
config_address = std::string(worker_address);
return;
}
}
}
absl::StatusOr<int64_t> WorkerIndexResolver::GetWorkerIndex(
absl::string_view worker_address) const {
const auto it = absl::c_find(worker_addresses_, worker_address);
if (it == worker_addresses_.cend()) {
return errors::NotFound(absl::Substitute(
"Failed to shard dataset in tf.data service: Worker $0 is not in the "
"workers list. Got workers list $1.",
worker_address, absl::StrJoin(worker_addresses_, ",")));
}
return std::distance(worker_addresses_.cbegin(), it);
}
}
} | #include "tensorflow/core/data/service/graph_rewriters.h"
#include <string>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::RangeDatasetWithShardHint;
using ::tensorflow::data::testing::RangeSquareDataset;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::SizeIs;
absl::StatusOr<NodeDef> GetNode(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node;
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
absl::StatusOr<int64_t> GetValue(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node.attr().at("value").tensor().int64_val()[0];
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
TaskDef GetTaskDef(const ProcessingModeDef::ShardingPolicy sharding_policy,
const int64_t num_workers, const int64_t worker_index) {
TaskDef task_def;
task_def.mutable_processing_mode_def()->set_sharding_policy(sharding_policy);
task_def.set_num_workers(num_workers);
task_def.set_worker_index(worker_index);
return task_def;
}
TEST(AutoShardRewriterTest, AutoShard) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE_OR_DATA,
3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByData) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByFile) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::NOT_FOUND,
HasSubstr("Found an unshardable source dataset")));
}
TEST(AutoShardRewriterTest, ShardByHint) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::HINT, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeDatasetWithShardHint(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoShard) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, EmptyDataset) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(0);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoWorkers) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 0,
0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"num_workers should be >= 1, currently 0"));
}
TEST(AutoShardRewriterTest, NoWorkersWhenShardIsOff) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 0, 0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, WorkerIndexOutOfRange) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 2,
5);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"index should be >= 0 and < 2, currently 5"));
}
TEST(WorkerIndexResolverTest, AddOneWorker) {
WorkerIndexResolver resolver(std::vector<std::string>{"localhost"});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
}
TEST(WorkerIndexResolverTest, AddMultipleWorkers) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NamedPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:worker", "/worker/task/1:worker",
"/worker/task/2:worker"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, DynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:%port_worker%", "/worker/task/1:%port_worker%",
"/worker/task/2:%port_worker%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AnonymousPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:%port%", "/worker/task/1:%port%",
"/worker/task/2:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:10000"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:10001"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:10002"));
resolver.AddWorker("/worker/task/2:10000");
resolver.AddWorker("/worker/task/1:10001");
resolver.AddWorker("/worker/task/0:10002");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:10002"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:10001"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:10000"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NumericPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:12345", "/worker/task/1:23456", "/worker/task/2:34567"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:34567"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:12345"));
resolver.AddWorker("/worker/task/2:34567");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:12345");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6Addresses) {
WorkerIndexResolver resolver(std::vector<std::string>{
"[1080:0:0:0:8:800:200C:417A]", "[1080:0:0:0:8:800:200C:417B]",
"[1080:0:0:0:8:800:200C:417C]"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6AddressesWithDynamicPort) {
WorkerIndexResolver resolver(
std::vector<std::string>{"[1080:0:0:0:8:800:200C:417A]:%port%",
"[1080:0:0:0:8:800:200C:417B]:%port%",
"[1080:0:0:0:8:800:200C:417C]:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocols) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocolsAndDynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, HostNameHasColons) {
WorkerIndexResolver resolver(
std::vector<std::string>{":worker:task:0:%port%", ":worker:task:1:%port%",
":worker:task:2:34567"});
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:0:12345"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:2:34567"));
resolver.AddWorker(":worker:task:0:12345");
resolver.AddWorker(":worker:task:1:23456");
resolver.AddWorker(":worker:task:2:34567");
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, ChangeWorkerPort) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.ValidateWorker("/worker/task/0:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/1:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/2:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotFound) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/3:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("/worker/task/3:45678");
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
EXPECT_THAT(
resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND,
HasSubstr(
"Worker /worker/task/3:45678 is not in the workers list.")));
}
TEST(WorkerIndexResolverTest, MultipleWorkersInOneHost) {
WorkerIndexResolver resolver(
std::vector<std::string>{"localhost", "localhost", "localhost"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, MoreWorkersThanConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{
"localhost:%port%", "localhost:%port%", "localhost:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.ValidateWorker("localhost:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("localhost:56789"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{""});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.ValidateWorker("localhost:12345"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/graph_rewriters.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/graph_rewriters_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b2254c0-2234-4559-85fd-63d91a12a555 | cpp | tensorflow/tensorflow | credentials_factory | tensorflow/core/data/service/credentials_factory.cc | tensorflow/core/data/service/credentials_factory_test.cc | #include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using CredentialsFactories =
std::unordered_map<std::string, CredentialsFactory*>;
CredentialsFactories& credentials_factories() {
static auto& factories = *new CredentialsFactories();
return factories;
}
}
void CredentialsFactory::Register(CredentialsFactory* factory) {
mutex_lock l(*get_lock());
if (!credentials_factories().insert({factory->Protocol(), factory}).second) {
LOG(ERROR)
<< "Two credentials factories are being registered with protocol "
<< factory->Protocol() << ". Which one gets used is undefined.";
}
}
Status CredentialsFactory::Get(absl::string_view protocol,
CredentialsFactory** out) {
mutex_lock l(*get_lock());
auto it = credentials_factories().find(std::string(protocol));
if (it != credentials_factories().end()) {
*out = it->second;
return absl::OkStatus();
}
std::vector<string> available_types;
for (const auto& factory : credentials_factories()) {
available_types.push_back(factory.first);
}
return errors::NotFound("No credentials factory has been registered for ",
"protocol ", protocol,
". The available types are: [ ",
absl::StrJoin(available_types, ", "), " ]");
}
Status CredentialsFactory::CreateServerCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ServerCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateServerCredentials(out));
return absl::OkStatus();
}
Status CredentialsFactory::CreateClientCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ChannelCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateClientCredentials(out));
return absl::OkStatus();
}
bool CredentialsFactory::Exists(absl::string_view protocol) {
mutex_lock l(*get_lock());
return credentials_factories().find(std::string(protocol)) !=
credentials_factories().end();
}
class InsecureCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "grpc"; }
Status CreateServerCredentials(
std::shared_ptr<::grpc::ServerCredentials>* out) override {
*out = ::grpc::InsecureServerCredentials();
return absl::OkStatus();
}
Status CreateClientCredentials(
std::shared_ptr<::grpc::ChannelCredentials>* out) override {
*out = ::grpc::InsecureChannelCredentials();
return absl::OkStatus();
}
};
class InsecureCredentialsRegistrar {
public:
InsecureCredentialsRegistrar() {
auto factory = new InsecureCredentialsFactory();
CredentialsFactory::Register(factory);
}
};
static InsecureCredentialsRegistrar registrar;
}
} | #include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kFailedToCreateServerCredentials[] =
"Failed to create server credentials.";
constexpr char kFailedToCreateClientCredentials[] =
"Failed to create client credentials.";
class TestCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "test"; }
Status CreateServerCredentials(
std::shared_ptr<grpc::ServerCredentials>* out) override {
return errors::Internal(kFailedToCreateServerCredentials);
}
Status CreateClientCredentials(
std::shared_ptr<grpc::ChannelCredentials>* out) override {
return errors::Internal(kFailedToCreateClientCredentials);
}
};
}
TEST(CredentialsFactory, Register) {
TestCredentialsFactory test_factory;
CredentialsFactory::Register(&test_factory);
std::shared_ptr<grpc::ServerCredentials> server_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateServerCredentials),
CredentialsFactory::CreateServerCredentials(test_factory.Protocol(),
&server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateClientCredentials),
CredentialsFactory::CreateClientCredentials(test_factory.Protocol(),
&client_credentials));
}
TEST(CredentialsFactory, DefaultGrpcProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateServerCredentials("grpc", &server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateClientCredentials("grpc", &client_credentials));
}
TEST(CredentialsFactory, MissingServerProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
Status s = CredentialsFactory::CreateServerCredentials("unknown_protocol",
&server_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
TEST(CredentialsFactory, MissingClientProtocol) {
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
Status s = CredentialsFactory::CreateClientCredentials("unknown_protocol",
&client_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/credentials_factory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/credentials_factory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce35ed39-e4f0-49e6-98a4-f765d18996b3 | cpp | tensorflow/tensorflow | worker_impl | tensorflow/core/data/service/worker_impl.cc | tensorflow/core/data/service/worker_impl_test.cc | #include "tensorflow/core/data/service/worker_impl.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/create_channel.h"
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/graph_rewriters.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
constexpr absl::Duration kRetryInterval = absl::Seconds(5);
constexpr absl::Duration kDefaultHeartBeatInterval = absl::Seconds(30);
constexpr absl::Duration kDefaultDispatcherTimeout = absl::Hours(1);
using WorkerConfig = experimental::WorkerConfig;
Status MoveElementToResponse(std::vector<Tensor>&& element,
GetElementResponse& resp) {
if (element.size() != 1 || element[0].dtype() != DT_VARIANT ||
!TensorShapeUtils::IsScalar(element[0].shape())) {
for (const auto& component : element) {
UncompressedElement* uncompressed = resp.mutable_uncompressed();
component.AsProtoTensorContent(uncompressed->add_components());
}
return absl::OkStatus();
}
Variant& variant = element[0].scalar<Variant>()();
CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed == nullptr) {
return errors::FailedPrecondition(
"Expected dataset to produce a CompressedElement variant tensor, but "
"it produced ",
variant.TypeName());
}
*resp.mutable_compressed() = *compressed;
return absl::OkStatus();
}
WorkerConfig ApplyWorkerDefaults(const WorkerConfig& config) {
WorkerConfig new_config(config);
if (new_config.heartbeat_interval_ms() == 0) {
new_config.set_heartbeat_interval_ms(
absl::ToInt64Milliseconds(kDefaultHeartBeatInterval));
}
if (new_config.dispatcher_timeout_ms() == 0) {
new_config.set_dispatcher_timeout_ms(
absl::ToInt64Milliseconds(kDefaultDispatcherTimeout));
}
if (new_config.snapshot_max_chunk_size_bytes() == 0) {
new_config.set_snapshot_max_chunk_size_bytes(
kDefaultMaxChunkSize.ToUnsignedBytes());
}
return new_config;
}
TaskDef Export(const TaskDef& task) {
TaskDef result;
switch (task.dataset_case()) {
case TaskDef::kDatasetDef:
result.set_path(
"In-memory dataset graphs are omitted for brevity. To view datasets "
"stored on the dispatcher, configure a `work_dir`.");
break;
case TaskDef::kPath:
result.set_path(task.path());
break;
default:
break;
}
result.set_dataset_id(task.dataset_id());
result.set_task_id(task.task_id());
result.set_iteration_id(task.iteration_id());
result.set_num_split_providers(task.num_split_providers());
result.set_worker_address(task.worker_address());
*result.mutable_processing_mode_def() = task.processing_mode_def();
switch (task.optional_num_consumers_case()) {
case TaskDef::kNumConsumers:
result.set_num_consumers(task.num_consumers());
break;
default:
break;
}
result.set_num_workers(task.num_workers());
result.set_worker_index(task.worker_index());
return result;
}
}
mutex LocalWorkers::mu_(LINKER_INITIALIZED);
LocalWorkers::AddressToWorkerMap* LocalWorkers::local_workers_ =
new AddressToWorkerMap();
DataServiceWorkerImpl::DataServiceWorkerImpl(const WorkerConfig& config)
: config_(ApplyWorkerDefaults(config)), worker_uid_(port::JobUid()) {
metrics::RecordTFDataServiceWorkerCreated();
}
DataServiceWorkerImpl::~DataServiceWorkerImpl() {
mutex_lock l(mu_);
cancelled_ = true;
task_completion_cv_.notify_one();
heartbeat_cv_.notify_one();
}
Status DataServiceWorkerImpl::Start(
const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers) {
VLOG(3) << "Starting tf.data service worker at address " << worker_address;
TF_RETURN_IF_ERROR(ValidateWorkerConfig());
worker_address_ = worker_address;
transfer_servers_ = transfer_servers;
TF_ASSIGN_OR_RETURN(dispatcher_, CreateDispatcherClient());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry([this]() { return Heartbeat(); },
should_retry, "Worker heartbeat.",
kint64max));
LOG(INFO) << "Worker registered with dispatcher running at "
<< config_.dispatcher_address()
<< ". Worker config: " << config_.DebugString();
task_completion_thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "data-service-worker-task-completion",
[this]() { TaskCompletionThread(); }));
heartbeat_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "data-service-worker-heartbeat", [this]() { HeartbeatThread(); }));
mutex_lock l(mu_);
registered_ = true;
return absl::OkStatus();
}
void DataServiceWorkerImpl::Stop() {
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> tasks;
absl::flat_hash_map<SnapshotTask, std::unique_ptr<SnapshotStreamWriter>,
absl::Hash<SnapshotTask>>
snapshot_writers;
{
mutex_lock l(mu_);
cancelled_ = true;
tasks.swap(tasks_);
snapshot_writers.swap(snapshot_writers_);
}
for (const auto& [task_id, task] : tasks) {
StopTask(*task);
}
for (const auto& [unused, snapshot_writer] : snapshot_writers) {
snapshot_writer->Cancel();
}
Env::Default()->SleepForMicroseconds(config_.shutdown_quiet_period_ms() *
1000);
}
Status DataServiceWorkerImpl::ValidateWorkerConfig() const {
const bool any_tag_is_empty = absl::c_any_of(
config_.worker_tags(),
[](const std::string& worker_tag) { return worker_tag.empty(); });
if (any_tag_is_empty) {
return errors::FailedPrecondition(
"Worker tags cannot be empty. Got tags {",
absl::StrJoin(config_.worker_tags().begin(),
config_.worker_tags().end(), ", "),
"}");
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<DataServiceDispatcherClient>>
DataServiceWorkerImpl::CreateDispatcherClient() const TF_LOCKS_EXCLUDED(mu_) {
auto dispatcher = std::make_unique<DataServiceDispatcherClient>(
config_.dispatcher_address(), config_.protocol());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(
grpc_util::Retry([&dispatcher]() { return dispatcher->Initialize(); },
should_retry, "Initialize dispatcher client.",
kint64max));
return dispatcher;
}
Status DataServiceWorkerImpl::GetElementResult(
const GetElementRequest* request, struct GetElementResult* result) {
Task* task = nullptr;
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("Worker is shutting down");
}
if (!registered_) {
return errors::Unavailable(
"Worker has not yet registered with dispatcher.");
}
auto it = tasks_.find(request->task_id());
if (it == tasks_.end()) {
if (deleted_tasks_.contains(request->task_id())) {
return errors::FailedPrecondition(
"Got request for local task ", request->task_id(), " of worker ",
worker_address_, ", which has been deleted. You may be creating ",
"a duplicate iteration which has already finished. To fix this, "
"make sure to create your dataset only once, as opposed to "
"re-creating it repeatedly inside a loop.");
}
if (finished_tasks_.contains(request->task_id())) {
VLOG(3) << "Task is already finished";
result->end_of_sequence = true;
result->skip = false;
return absl::OkStatus();
}
return errors::Unavailable("Task ", request->task_id(), " not found");
}
task = it->second.get();
task->outstanding_requests++;
}
auto cleanup = gtl::MakeCleanup([&] {
mutex_lock l(mu_);
task->outstanding_requests--;
cv_.notify_all();
});
TF_RETURN_IF_ERROR(EnsureTaskInitialized(*task));
TF_RETURN_IF_ERROR(task->task_runner->GetNext(*request, *result));
if (result->end_of_sequence) {
mutex_lock l(mu_);
VLOG(3) << "Reached end_of_sequence for task " << request->task_id();
pending_completed_tasks_.insert(request->task_id());
task_completion_cv_.notify_one();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response) {
mutex_lock l(mu_);
const TaskDef& task = request->task();
VLOG(3) << "Received request to process task " << task.task_id();
return ProcessTaskInternal(task);
}
Status DataServiceWorkerImpl::ProcessTaskInternal(const TaskDef& task_def)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<Task>& task = tasks_[task_def.task_id()];
if (task) {
VLOG(1) << "Received request to process already-processed task "
<< task->task_def.task_id();
return absl::OkStatus();
}
task = std::make_unique<Task>(task_def);
VLOG(3) << "Began processing for task " << task_def.task_id()
<< " with processing mode "
<< task_def.processing_mode_def().DebugString();
return absl::OkStatus();
}
Status DataServiceWorkerImpl::EnsureTaskInitialized(
DataServiceWorkerImpl::Task& task) {
if (task.task_def.worker_address() != worker_address_) {
return errors::Internal(absl::Substitute(
"Dispatcher's worker address $0 does not match worker's address $1.",
task.task_def.worker_address(), worker_address_));
}
mutex_lock l(task.mu);
if (task.initialized) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(DatasetDef dataset_def, GetDatasetDef(task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Dataset> dataset,
MakeDataset(dataset_def, task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Iterator> iterator,
MakeDatasetIterator(*dataset, task.task_def));
auto task_iterator = std::make_unique<StandaloneTaskIterator>(
std::move(dataset), std::move(iterator));
TF_RETURN_IF_ERROR(TaskRunner::Create(
config_, task.task_def, std::move(task_iterator), task.task_runner));
task.initialized = true;
VLOG(3) << "Created iterator for task " << task.task_def.task_id();
return absl::OkStatus();
}
absl::StatusOr<DatasetDef> DataServiceWorkerImpl::GetDatasetDef(
const TaskDef& task_def) const {
switch (task_def.dataset_case()) {
case TaskDef::kDatasetDef:
return task_def.dataset_def();
case TaskDef::kPath: {
DatasetDef def;
Status s = ReadDatasetDef(task_def.path(), def);
if (!s.ok()) {
LOG(INFO) << "Failed to read dataset from " << task_def.path() << ": "
<< s << ". Falling back to reading from dispatcher.";
TF_RETURN_IF_ERROR(
dispatcher_->GetDatasetDef(task_def.dataset_id(), def));
}
return def;
}
case TaskDef::DATASET_NOT_SET:
return errors::Internal("Unrecognized dataset case: ",
task_def.dataset_case());
}
}
absl::StatusOr<bool> DataServiceWorkerImpl::DisableCompressionAtRuntime(
const std::string& dataset_id) const {
DisableCompressionAtRuntimeResponse response;
absl::Time deadline =
absl::FromUnixMicros(EnvTime::NowMicros()) + kDefaultDispatcherTimeout;
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->DisableCompressionAtRuntime(
dataset_id, false, response);
},
should_retry, "Disable compression at runtime.",
absl::ToUnixMicros(deadline)));
if (response.no_compression_to_disable()) {
return false;
}
metrics::RecordTFDataServiceRuntimeCompressionDecision(
response.compression_disabled_at_runtime());
return response.compression_disabled_at_runtime();
}
absl::StatusOr<std::unique_ptr<standalone::Dataset>>
DataServiceWorkerImpl::MakeDataset(const DatasetDef& dataset_def,
const TaskDef& task_def) const {
TF_ASSIGN_OR_RETURN(bool compression_disabled_at_runtime,
DisableCompressionAtRuntime(task_def.dataset_id()));
GraphDef graph = dataset_def.graph();
if (VLOG_IS_ON(1)) {
std::string prefix = absl::StrCat(task_def.dataset_id(), "_", worker_uid_);
DumpGraphDefToFile(absl::StrCat(prefix, "-prerewrite_GraphDef"), graph);
DumpProtoToFile(absl::StrCat(prefix, "-prerewrite_TaskDef"), task_def);
}
if (compression_disabled_at_runtime) {
RemoveCompressionMapRewriter remove_compression_map_rewriter;
TF_ASSIGN_OR_RETURN(
graph, remove_compression_map_rewriter.ApplyRemoveCompressionMapRewrite(
graph));
}
TF_ASSIGN_OR_RETURN(AutoShardRewriter auto_shard_rewriter,
AutoShardRewriter::Create(task_def));
TF_ASSIGN_OR_RETURN(graph, auto_shard_rewriter.ApplyAutoShardRewrite(graph));
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), graph, &dataset));
return dataset;
}
absl::StatusOr<std::unique_ptr<standalone::Iterator>>
DataServiceWorkerImpl::MakeDatasetIterator(standalone::Dataset& dataset,
const TaskDef& task_def) const {
std::unique_ptr<standalone::Iterator> iterator;
if (IsNoShard(task_def.processing_mode_def()) ||
IsStaticShard(task_def.processing_mode_def())) {
TF_RETURN_IF_ERROR(dataset.MakeIterator(&iterator));
return iterator;
}
if (IsDynamicShard(task_def.processing_mode_def())) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
split_providers.reserve(task_def.num_split_providers());
for (int i = 0; i < task_def.num_split_providers(); ++i) {
split_providers.push_back(std::make_unique<DataServiceSplitProvider>(
config_.dispatcher_address(), config_.protocol(),
task_def.iteration_id(), i, config_.dispatcher_timeout_ms()));
}
TF_RETURN_IF_ERROR(
dataset.MakeIterator(std::move(split_providers), &iterator));
return iterator;
}
return errors::InvalidArgument("Unrecognized processing mode: ",
task_def.processing_mode_def().DebugString());
}
void DataServiceWorkerImpl::StopTask(Task& task) TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(task.mu);
task.initialized = true;
}
if (task.task_runner) {
task.task_runner->Cancel();
}
mutex_lock l(mu_);
while (task.outstanding_requests > 0) {
cv_.wait(l);
}
}
Status DataServiceWorkerImpl::GetElement(const GetElementRequest* request,
GetElementResponse* response) {
VLOG(3) << "Received GetElement request for task " << request->task_id();
struct GetElementResult result;
TF_RETURN_IF_ERROR(GetElementResult(request, &result));
response->set_end_of_sequence(result.end_of_sequence);
response->set_skip_task(result.skip);
if (!response->end_of_sequence() && !response->skip_task()) {
TF_RETURN_IF_ERROR(
MoveElementToResponse(std::move(result.components), *response));
VLOG(3) << "Producing an element for task " << request->task_id();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetWorkerTasks(
const GetWorkerTasksRequest* request, GetWorkerTasksResponse* response) {
mutex_lock l(mu_);
for (const auto& it : tasks_) {
Task* task = it.second.get();
TaskInfo* task_info = response->add_tasks();
task_info->set_worker_address(worker_address_);
task_info->set_task_id(task->task_def.task_id());
task_info->set_iteration_id(task->task_def.iteration_id());
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetSnapshotTaskProgresses(
const GetSnapshotTaskProgressesRequest* request,
GetSnapshotTaskProgressesResponse* response) {
for (const auto& snapshot_task_progress : GetSnapshotTaskProgress()) {
*response->add_snapshot_task_progresses() = snapshot_task_progress;
}
return absl::OkStatus();
}
void DataServiceWorkerImpl::TaskCompletionThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && pending_completed_tasks_.empty()) {
task_completion_cv_.wait(l);
}
if (cancelled_ && pending_completed_tasks_.empty()) {
VLOG(3) << "Task completion thread shutting down";
return;
}
}
Status s = SendTaskUpdates();
if (!s.ok()) {
LOG(WARNING) << "Failed to send task updates to dispatcher: " << s;
mutex_lock l(mu_);
if (cancelled_) {
VLOG(3) << "Task completion thread shutting down";
return;
} else {
task_completion_cv_.wait_for(
l, absl::ToChronoMicroseconds(kRetryInterval));
}
}
}
}
Status DataServiceWorkerImpl::SendTaskUpdates() TF_LOCKS_EXCLUDED(mu_) {
std::vector<TaskProgress> task_progress;
{
mutex_lock l(mu_);
VLOG(3) << "Sending " << pending_completed_tasks_.size()
<< " task updates to dispatcher";
task_progress.reserve(pending_completed_tasks_.size());
for (int task_id : pending_completed_tasks_) {
task_progress.emplace_back();
task_progress.back().set_task_id(task_id);
task_progress.back().set_completed(true);
}
}
TF_RETURN_IF_ERROR(dispatcher_->WorkerUpdate(worker_address_, task_progress));
mutex_lock l(mu_);
for (const auto& update : task_progress) {
pending_completed_tasks_.erase(update.task_id());
}
VLOG(3) << "Sent " << task_progress.size() << " task updates ";
return absl::OkStatus();
}
void DataServiceWorkerImpl::HeartbeatThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
int64_t next_heartbeat_micros =
Env::Default()->NowMicros() + (config_.heartbeat_interval_ms() * 1000);
{
mutex_lock l(mu_);
while (!cancelled_ &&
Env::Default()->NowMicros() < next_heartbeat_micros) {
int64_t time_to_wait_micros =
next_heartbeat_micros - Env::Default()->NowMicros();
heartbeat_cv_.wait_for(l,
std::chrono::microseconds(time_to_wait_micros));
}
if (cancelled_) {
VLOG(3) << "Heartbeat thread shutting down";
return;
}
if (!registered_) {
VLOG(1) << "Not performing heartbeat; worker is not yet registered";
continue;
}
}
Status s = Heartbeat();
if (!s.ok()) {
LOG(WARNING) << "Failed to send heartbeat to dispatcher: " << s;
}
}
}
Status DataServiceWorkerImpl::Heartbeat() {
WorkerHeartbeatRequest request = BuildWorkerHeartbeatRequest();
TF_ASSIGN_OR_RETURN(WorkerHeartbeatResponse response,
dispatcher_->WorkerHeartbeat(request));
UpdateTasks(response);
return UpdateSnapshotWriters(response);
}
std::vector<ActiveTask> DataServiceWorkerImpl::GetActiveTasks() const
TF_LOCKS_EXCLUDED(mu_) {
std::vector<ActiveTask> active_tasks;
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> current_tasks;
{
mutex_lock l(mu_);
current_tasks = tasks_;
}
for (const auto& [task_id, task] : current_tasks) {
if (task == nullptr) {
continue;
}
ActiveTask active_task;
active_task.set_task_id(task_id);
active_task.set_processing_time_nsec(0.0);
bool task_initialized = false;
{
mutex_lock task_lock(task->mu);
task_initialized = task->initialized;
}
if (task_initialized && task->task_runner != nullptr &&
task->task_runner->model() != nullptr) {
std::shared_ptr<model::Model> model = task->task_runner->model();
double processing_time_nsec = model->ComputeSnapshotProcessingTimeNsec();
if (processing_time_nsec > 0) {
active_task.set_processing_time_nsec(processing_time_nsec);
}
}
active_tasks.push_back(std::move(active_task));
}
return active_tasks;
}
std::vector<int64_t> DataServiceWorkerImpl::GetTaskIds(
const std::vector<ActiveTask>& active_tasks) const {
std::vector<int64_t> task_ids;
task_ids.reserve(active_tasks.size());
for (const ActiveTask& active_task : active_tasks) {
task_ids.push_back(active_task.task_id());
}
return task_ids;
}
WorkerHeartbeatRequest DataServiceWorkerImpl::BuildWorkerHeartbeatRequest()
const TF_LOCKS_EXCLUDED(mu_) {
std::vector<ActiveTask> active_tasks = GetActiveTasks();
std::vector<int64_t> current_tasks = GetTaskIds(active_tasks);
WorkerHeartbeatRequest request;
request.set_worker_address(worker_address_);
*request.mutable_transfer_servers() = {transfer_servers_.begin(),
transfer_servers_.end()};
*request.mutable_worker_tags() = config_.worker_tags();
request.set_worker_uid(worker_uid_);
*request.mutable_current_tasks() = {current_tasks.begin(),
current_tasks.end()};
for (const auto& snapshot_task_progress : GetSnapshotTaskProgress()) {
request.mutable_snapshot_task_progress()->insert(
{snapshot_task_progress.snapshot_task().base_path(),
snapshot_task_progress});
}
*request.mutable_active_tasks() = {active_tasks.begin(), active_tasks.end()};
return request;
}
std::vector<SnapshotTaskProgress>
DataServiceWorkerImpl::GetSnapshotTaskProgress() const {
mutex_lock l(mu_);
std::vector<SnapshotTaskProgress> snapshot_task_progress;
for (const auto& [snapshot_task, stream_writer] : snapshot_writers_) {
SnapshotTaskProgress progress;
progress.mutable_snapshot_task()->set_base_path(snapshot_task.base_path);
progress.mutable_snapshot_task()->set_stream_index(
snapshot_task.stream_index);
absl::StatusOr<bool> completed = stream_writer->Completed();
if (completed.ok()) {
progress.set_completed(*completed);
} else {
*progress.mutable_status() = tsl::StatusToProto(completed.status());
}
snapshot_task_progress.push_back(std::move(progress));
}
return snapshot_task_progress;
}
void DataServiceWorkerImpl::UpdateTasks(const WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::shared_ptr<Task>> tasks_to_delete;
{
mutex_lock l(mu_);
for (const auto& task : response.new_tasks()) {
VLOG(1) << "Received new task from dispatcher with id " << task.task_id();
if (deleted_tasks_.contains(task.task_id())) {
continue;
}
Status s = ProcessTaskInternal(task);
if (!s.ok() && !errors::IsAlreadyExists(s)) {
LOG(WARNING) << "Failed to start processing task " << task.task_id()
<< ": " << s;
}
}
tasks_to_delete.reserve(response.tasks_to_delete_size());
for (int64_t task_id : response.tasks_to_delete()) {
VLOG(3) << "Deleting task " << task_id
<< " at the request of the dispatcher";
if (!tasks_.contains(task_id)) {
continue;
}
tasks_to_delete.push_back(std::move(tasks_[task_id]));
tasks_.erase(task_id);
finished_tasks_.insert(task_id);
}
}
for (const auto& task : tasks_to_delete) {
StopTask(*task);
}
}
Status DataServiceWorkerImpl::UpdateSnapshotWriters(
const WorkerHeartbeatResponse& response) TF_LOCKS_EXCLUDED(mu_) {
absl::flat_hash_set<SnapshotTask> assigned_snapshot_task_keys;
for (const SnapshotTaskDef& snapshot_task : response.snapshot_tasks()) {
SnapshotTask snapshot_task_key{snapshot_task.base_path(),
snapshot_task.stream_index()};
assigned_snapshot_task_keys.insert(snapshot_task_key);
{
mutex_lock l(mu_);
if (snapshot_writers_.contains(snapshot_task_key)) {
continue;
}
}
DatasetDef dataset_def;
TF_RETURN_IF_ERROR(ReadBinaryProto(
Env::Default(), DatasetDefFilePath(snapshot_task.base_path()),
&dataset_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<StandaloneTaskIterator> iterator,
MakeSnapshotTaskIterator(snapshot_task, dataset_def));
mutex_lock l(mu_);
snapshot_writers_.emplace(
snapshot_task_key,
std::make_unique<SnapshotStreamWriter>(
SnapshotWriterParams{
snapshot_task.base_path(), snapshot_task.stream_index(),
snapshot_task.metadata().compression(), Env::Default(),
ByteSize::Bytes(config_.snapshot_max_chunk_size_bytes())},
std::move(iterator)));
}
mutex_lock l(mu_);
for (auto it = snapshot_writers_.begin(); it != snapshot_writers_.end();) {
if (!assigned_snapshot_task_keys.contains(it->first)) {
it->second->Cancel();
snapshot_writers_.erase(it++);
} else {
++it;
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>>
DataServiceWorkerImpl::MakeSnapshotTaskIterator(
const SnapshotTaskDef& snapshot_task, const DatasetDef& dataset_def) const {
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), dataset_def.graph(), &dataset));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
split_providers.reserve(snapshot_task.num_sources());
for (int i = 0; i < snapshot_task.num_sources(); ++i) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceDispatcherClient> dispatcher,
CreateDispatcherClient());
split_providers.push_back(std::make_unique<SnapshotSplitProvider>(
worker_address_, snapshot_task,
i, absl::Milliseconds(config_.dispatcher_timeout_ms()),
std::move(dispatcher), Env::Default()));
}
std::unique_ptr<standalone::Iterator> iterator;
TF_RETURN_IF_ERROR(
dataset->MakeIterator(std::move(split_providers), &iterator));
return std::make_unique<StandaloneTaskIterator>(std::move(dataset),
std::move(iterator));
}
void DataServiceWorkerImpl::DeleteLocalTask(const TaskInfo& task_info)
TF_LOCKS_EXCLUDED(mu_) {
std::shared_ptr<Task> task;
{
mutex_lock l(mu_);
auto it = tasks_.find(task_info.task_id());
if (it == tasks_.end() || !it->second) {
return;
}
task = std::move(it->second);
tasks_.erase(task_info.task_id());
pending_completed_tasks_.insert(task_info.task_id());
deleted_tasks_.insert(task_info.task_id());
}
VLOG(2) << "Delete local task " << task_info.task_id() << " from worker "
<< worker_address_ << " at the request of the client.";
StopTask(*task);
}
WorkerStateExport DataServiceWorkerImpl::ExportState() const {
WorkerStateExport worker_state_export;
*worker_state_export.mutable_worker_config() = config_;
mutex_lock l(mu_);
if (!registered_) {
return worker_state_export;
}
for (const auto& task : tasks_) {
*worker_state_export.add_tasks() = Export(task.second->task_def);
}
for (int64_t finished_task : finished_tasks_) {
worker_state_export.add_finished_task_ids(finished_task);
}
for (int64_t deleted_task : deleted_tasks_) {
worker_state_export.add_deleted_task_ids(deleted_task);
}
return worker_state_export;
}
void LocalWorkers::Add(absl::string_view worker_address,
std::shared_ptr<DataServiceWorkerImpl> worker) {
DCHECK(worker != nullptr) << "Adding a nullptr local worker is disallowed.";
VLOG(1) << "Register local worker at address " << worker_address;
mutex_lock l(mu_);
(*local_workers_)[worker_address] = worker;
}
std::shared_ptr<DataServiceWorkerImpl> LocalWorkers::Get(
absl::string_view worker_address) {
tf_shared_lock l(mu_);
AddressToWorkerMap::const_iterator it = local_workers_->find(worker_address);
if (it == local_workers_->end()) {
return nullptr;
}
return it->second;
}
bool LocalWorkers::Empty() {
tf_shared_lock l(mu_);
return local_workers_->empty();
}
void LocalWorkers::Remove(absl::string_view worker_address) {
VLOG(1) << "Remove local worker at address " << worker_address;
mutex_lock l(mu_);
local_workers_->erase(worker_address);
}
}
} | #include "tensorflow/core/data/service/worker_impl.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
class LocalWorkersTest : public ::testing::Test {
protected:
void SetUp() override {
test_cluster_ = std::make_unique<TestCluster>(0);
TF_ASSERT_OK(test_cluster_->Initialize());
}
std::unique_ptr<TestCluster> test_cluster_;
};
TEST_F(LocalWorkersTest, AddRemoveLocalWorkers) {
EXPECT_TRUE(LocalWorkers::Empty());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
std::vector<std::string> worker_addresses = {test_cluster_->WorkerAddress(0),
test_cluster_->WorkerAddress(1),
test_cluster_->WorkerAddress(2)};
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorker(0);
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorkers();
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), IsNull());
}
TEST_F(LocalWorkersTest, NoLocalWorker) {
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(""), IsNull());
EXPECT_THAT(LocalWorkers::Get("Invalid address"),
IsNull());
EXPECT_TRUE(LocalWorkers::Empty());
LocalWorkers::Remove("");
LocalWorkers::Remove("Invalid address");
EXPECT_TRUE(LocalWorkers::Empty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85e435bf-b69e-489b-a72a-a03f354f1baa | cpp | tensorflow/tensorflow | server_lib | tensorflow/core/distributed_runtime/server_lib.cc | tensorflow/core/distributed_runtime/server_lib_test.cc | #include "tensorflow/core/distributed_runtime/server_lib.h"
#include <unordered_map>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace {
mutex* get_server_factory_lock() {
static mutex server_factory_lock(LINKER_INITIALIZED);
return &server_factory_lock;
}
typedef std::unordered_map<string, ServerFactory*> ServerFactories;
ServerFactories* server_factories() {
static ServerFactories* factories = new ServerFactories;
return factories;
}
}
void ServerFactory::Register(const string& server_type,
ServerFactory* factory) {
mutex_lock l(*get_server_factory_lock());
if (!server_factories()->insert({server_type, factory}).second) {
LOG(ERROR) << "Two server factories are being registered under "
<< server_type;
}
}
Status ServerFactory::GetFactory(const ServerDef& server_def,
ServerFactory** out_factory) {
mutex_lock l(*get_server_factory_lock());
for (const auto& server_factory : *server_factories()) {
if (server_factory.second->AcceptsOptions(server_def)) {
*out_factory = server_factory.second;
return absl::OkStatus();
}
}
std::vector<string> server_names;
for (const auto& server_factory : *server_factories()) {
server_names.push_back(server_factory.first);
}
return errors::NotFound(
"No server factory registered for the given ServerDef: ",
server_def.DebugString(), "\nThe available server factories are: [ ",
absl::StrJoin(server_names, ", "), " ]");
}
Status NewServer(const ServerDef& server_def,
std::unique_ptr<ServerInterface>* out_server) {
ServerFactory* factory;
TF_RETURN_IF_ERROR(ServerFactory::GetFactory(server_def, &factory));
return factory->NewServer(server_def, ServerFactory::Options(), out_server);
}
Status NewServerWithOptions(const ServerDef& server_def,
const ServerFactory::Options& options,
std::unique_ptr<ServerInterface>* out_server) {
ServerFactory* factory;
TF_RETURN_IF_ERROR(ServerFactory::GetFactory(server_def, &factory));
return factory->NewServer(server_def, options, out_server);
}
} | #include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestServerFactory : public ServerFactory {
public:
bool AcceptsOptions(const ServerDef& server_def) override {
return server_def.protocol() == "test_protocol";
}
Status NewServer(const ServerDef& server_def, const Options& options,
std::unique_ptr<ServerInterface>* out_server) override {
return absl::OkStatus();
}
};
TEST(ServerLibTest, NewServerFactoryAccepts) {
ServerFactory::Register("TEST_SERVER", new TestServerFactory());
ServerDef server_def;
server_def.set_protocol("test_protocol");
std::unique_ptr<ServerInterface> server;
TF_EXPECT_OK(NewServer(server_def, &server));
}
TEST(ServerLibTest, NewServerNoFactoriesAccept) {
ServerDef server_def;
server_def.set_protocol("fake_protocol");
std::unique_ptr<ServerInterface> server;
Status s = NewServer(server_def, &server);
ASSERT_NE(s, absl::OkStatus());
EXPECT_TRUE(absl::StrContains(
s.message(), "No server factory registered for the given ServerDef"));
EXPECT_TRUE(
absl::StrContains(s.message(), "The available server factories are: ["));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/server_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/server_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0a5d668-8561-4789-820f-0d628bfa1730 | cpp | tensorflow/tensorflow | journal | tensorflow/core/data/service/journal.cc | tensorflow/core/data/service/journal_test.cc | #include "tensorflow/core/data/service/journal.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
namespace {
constexpr StringPiece kJournal = "journal";
Status ParseSequenceNumber(const std::string& journal_file,
int64_t* sequence_number) {
if (!RE2::FullMatch(journal_file, ".*_(\\d+)", sequence_number)) {
return errors::InvalidArgument("Failed to parse journal file name: ",
journal_file);
}
return absl::OkStatus();
}
}
std::string DataServiceJournalFile(const std::string& journal_dir,
int64_t sequence_number) {
return io::JoinPath(journal_dir,
absl::StrCat(kJournal, "_", sequence_number));
}
FileJournalWriter::FileJournalWriter(Env* env, const std::string& journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalWriter::EnsureInitialized() {
if (writer_) {
return absl::OkStatus();
}
std::vector<std::string> journal_files;
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(journal_dir_));
TF_RETURN_IF_ERROR(env_->GetChildren(journal_dir_, &journal_files));
int64_t latest_sequence_number = -1;
for (const auto& file : journal_files) {
int64_t sequence_number;
TF_RETURN_IF_ERROR(ParseSequenceNumber(file, &sequence_number));
latest_sequence_number = std::max(latest_sequence_number, sequence_number);
}
std::string journal_file =
DataServiceJournalFile(journal_dir_, latest_sequence_number + 1);
TF_RETURN_IF_ERROR(env_->NewAppendableFile(journal_file, &file_));
writer_ = std::make_unique<io::RecordWriter>(file_.get());
VLOG(1) << "Created journal writer to write to " << journal_file;
return absl::OkStatus();
}
Status FileJournalWriter::Write(const Update& update) {
TF_RETURN_IF_ERROR(EnsureInitialized());
std::string s = update.SerializeAsString();
if (s.empty()) {
return errors::Internal("Failed to serialize update ", update.DebugString(),
" to string");
}
TF_RETURN_IF_ERROR(writer_->WriteRecord(s));
TF_RETURN_IF_ERROR(writer_->Flush());
TF_RETURN_IF_ERROR(file_->Sync());
if (VLOG_IS_ON(4)) {
VLOG(4) << "Wrote journal entry: " << update.DebugString();
}
return absl::OkStatus();
}
FileJournalReader::FileJournalReader(Env* env, StringPiece journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalReader::EnsureInitialized() {
if (reader_) {
return absl::OkStatus();
}
return UpdateFile(DataServiceJournalFile(journal_dir_, 0));
}
Status FileJournalReader::Read(Update& update, bool& end_of_journal) {
TF_RETURN_IF_ERROR(EnsureInitialized());
while (true) {
tstring record;
Status s = reader_->ReadRecord(&record);
if (absl::IsOutOfRange(s)) {
sequence_number_++;
std::string next_journal_file =
DataServiceJournalFile(journal_dir_, sequence_number_);
if (absl::IsNotFound(env_->FileExists(next_journal_file))) {
VLOG(3) << "Next journal file " << next_journal_file
<< " does not exist. End of journal reached.";
end_of_journal = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFile(next_journal_file));
continue;
}
TF_RETURN_IF_ERROR(s);
if (!update.ParseFromString(record)) {
return errors::DataLoss("Failed to parse journal record.");
}
if (VLOG_IS_ON(4)) {
VLOG(4) << "Read journal entry: " << update.DebugString();
}
end_of_journal = false;
return absl::OkStatus();
}
}
Status FileJournalReader::UpdateFile(const std::string& filename) {
VLOG(1) << "Reading from journal file " << filename;
TF_RETURN_IF_ERROR(env_->NewRandomAccessFile(filename, &file_));
io::RecordReaderOptions opts;
opts.buffer_size = 2 << 20;
reader_ = std::make_unique<io::SequentialRecordReader>(file_.get(), opts);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/journal.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
bool NewJournalDir(std::string& journal_dir) {
std::string filename = testing::TmpDir();
if (!Env::Default()->CreateUniqueFileName(&filename, "journal_dir")) {
return false;
}
journal_dir = filename;
return true;
}
Update MakeCreateIterationUpdate() {
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(3);
create_iteration->set_iteration_id(8);
create_iteration->set_repetition(5);
return update;
}
Update MakeFinishTaskUpdate() {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(8);
return update;
}
Update MakeRegisterDatasetUpdate() {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id("dataset_id");
register_dataset->set_fingerprint(3);
return update;
}
Status CheckJournalContent(StringPiece journal_dir,
const std::vector<Update>& expected) {
FileJournalReader reader(Env::Default(), journal_dir);
for (const auto& update : expected) {
Update result;
bool end_of_journal = true;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_FALSE(end_of_journal);
EXPECT_EQ(result.SerializeAsString(), update.SerializeAsString());
}
Update result;
bool end_of_journal = false;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_TRUE(end_of_journal);
return absl::OkStatus();
}
}
TEST(Journal, RoundTripMultiple) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
FileJournalWriter writer(Env::Default(), journal_dir);
for (const auto& update : updates) {
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, AppendExistingJournal) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
for (const auto& update : updates) {
FileJournalWriter writer(Env::Default(), journal_dir);
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, MissingFile) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_TRUE(absl::IsNotFound(s));
}
TEST(Journal, NonRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
TF_ASSERT_OK(file->Append("not record data"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("corrupted record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
TEST(Journal, InvalidRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
auto writer = std::make_unique<io::RecordWriter>(file.get());
TF_ASSERT_OK(writer->WriteRecord("not serialized proto"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("Failed to parse journal record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
637f3e50-ff60-4f5e-af87-9df00924ac52 | cpp | tensorflow/tensorflow | snapshot_split_provider | tensorflow/core/data/service/snapshot/snapshot_split_provider.cc | tensorflow/core/data/service/snapshot/snapshot_split_provider_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNextSplitIndex[] = "next_split_index";
constexpr char kRepetitionIndex[] = "repetition_index";
absl::StatusOr<int64_t> GetRepetitionIndex(const std::string& split_file) {
absl::string_view repetition_dir_path = tsl::io::Dirname(split_file);
absl::string_view repetition_dir_name =
tsl::io::Basename(repetition_dir_path);
return ParseRepetitionDirectoryName(repetition_dir_name);
}
}
SnapshotSplitProvider::SnapshotSplitProvider(
const std::string& worker_address, const SnapshotTaskDef& snapshot_task,
int64_t source_index, absl::Duration timeout,
std::unique_ptr<DataServiceDispatcherClient> dispatcher, Env* env)
: worker_address_(worker_address),
snapshot_task_(snapshot_task),
source_index_(source_index),
timeout_(timeout),
env_(env) {
mutex_lock l(mu_);
dispatcher_ = std::move(dispatcher);
}
absl::Status SnapshotSplitProvider::GetNext(Tensor* split, bool* end_of_splits)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(GetAndValidateSplit(split, end_of_splits));
if (!*end_of_splits) {
++next_split_index_;
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::GetAndValidateSplit(Tensor* split,
bool* end_of_splits)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (split_to_file_map_.contains(next_split_index_)) {
return GetSplitFromFile(split_to_file_map_[next_split_index_], split,
end_of_splits);
}
TF_ASSIGN_OR_RETURN(int64_t dispatcher_split_index,
GetSplitFromDispatcher(split, end_of_splits));
if (dispatcher_split_index == next_split_index_) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(split_to_file_map_, GetSplitsFiles(next_split_index_));
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_to_file_map_, next_split_index_,
dispatcher_split_index,
*end_of_splits));
return GetSplitFromFile(split_to_file_map_[next_split_index_], split,
end_of_splits);
}
absl::Status SnapshotSplitProvider::GetSplitFromFile(
const std::string& split_file, Tensor* split, bool* end_of_splits)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(3) << "Getting the next split from file: " << split_file;
TF_ASSIGN_OR_RETURN(int64_t repetition_index, GetRepetitionIndex(split_file));
if (repetition_index_ < repetition_index) {
*end_of_splits = true;
return absl::OkStatus();
}
snapshot_util::TFRecordReaderImpl reader(split_file,
tsl::io::compression::kNone);
TF_RETURN_IF_ERROR(reader.Initialize(env_));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> tensors, reader.GetTensors());
if (tensors.size() != 1) {
return absl::InternalError(absl::StrCat(
"A snapshot split file is expected to contain 1 tensor. Got ",
tensors.size(), " tensors from ", split_file, "."));
}
*split = std::move(tensors[0]);
*end_of_splits = false;
return absl::OkStatus();
}
absl::StatusOr<int64_t> SnapshotSplitProvider::GetSplitFromDispatcher(
Tensor* split, bool* end_of_splits) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t local_split_index = 0;
TF_RETURN_IF_ERROR(grpc_util::Retry(
[this, split, &local_split_index, end_of_splits]()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dispatcher_->GetSnapshotSplit(
worker_address_, snapshot_task_.base_path(),
snapshot_task_.stream_index(), source_index_, repetition_index_,
*split, local_split_index, *end_of_splits);
},
"Get next split for snapshot",
env_->NowMicros() +
absl::ToInt64Microseconds(timeout_)));
return local_split_index;
}
absl::StatusOr<absl::btree_map<int64_t, std::string>>
SnapshotSplitProvider::GetSplitsFiles(int64_t start_index) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::btree_map<int64_t, std::string> split_to_file_map;
std::string splits_directory = SourceDirectory(
snapshot_task_.base_path(), snapshot_task_.stream_index(), source_index_);
TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories,
GetChildren(splits_directory, env_));
for (const std::string& repetition : repetition_directories) {
std::string repetition_dir = io::JoinPath(splits_directory, repetition);
TF_ASSIGN_OR_RETURN(std::vector<std::string> split_files,
GetChildren(repetition_dir, env_));
for (const std::string& split_file : split_files) {
TF_ASSIGN_OR_RETURN(auto split_index, ParseSplitFilename(split_file));
auto [local_split_index, global_split_index] = split_index;
if (local_split_index >= start_index) {
split_to_file_map[local_split_index] =
tsl::io::JoinPath(repetition_dir, split_file);
}
}
}
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_to_file_map, start_index));
return split_to_file_map;
}
absl::Status SnapshotSplitProvider::ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index) const {
if (split_files.empty()) {
return absl::OkStatus();
}
if (split_files.cbegin()->first != start_index) {
return absl::InternalError(absl::StrCat("Failed to get split ", start_index,
" for snapshot ",
snapshot_task_.DebugString()));
}
int64_t end_index = split_files.rbegin()->first;
if (end_index - start_index + 1 != split_files.size()) {
return absl::InternalError(absl::StrCat(
"Failed to get split ", start_index, ". Some splits between [",
start_index, ", ", end_index, "] are missing for snapshot ",
snapshot_task_.DebugString()));
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index, int64_t end_index, bool end_of_splits) const {
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_files, start_index));
if (end_index < start_index) {
return absl::InternalError(absl::StrCat(
"The tf.data service worker is expected to read split ", start_index,
", but the dispatcher returns split ", end_index, " for snapshot ",
snapshot_task_.DebugString()));
}
if (end_of_splits) {
end_index = end_index - 1;
}
if (split_files.empty() || split_files.cbegin()->first != start_index ||
split_files.rbegin()->first < end_index) {
return absl::InternalError(absl::StrCat(
"The tf.data service dispatcher has written split ", end_index,
". However, not all splits between [", start_index, ", ", end_index,
"] are found for snapshot ", snapshot_task_.DebugString()));
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Reset() {
mutex_lock l(mu_);
++repetition_index_;
LOG(INFO) << "Reset tf.data snapshot split provider for snapshot "
<< snapshot_task_.ShortDebugString() << ", repetition "
<< repetition_index_ << ".";
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kNextSplitIndex), next_split_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kRepetitionIndex), repetition_index_));
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) TF_LOCKS_EXCLUDED(mu_) {
int64_t next_split_index = 0;
int64_t repetition_index = 0;
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kNextSplitIndex), &next_split_index));
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kRepetitionIndex), &repetition_index));
mutex_lock l(mu_);
next_split_index_ = next_split_index;
repetition_index_ = repetition_index;
TF_ASSIGN_OR_RETURN(split_to_file_map_, GetSplitsFiles(next_split_index_));
LOG(INFO) << "Restored snapshot split provider for snapshot "
<< snapshot_task_.ShortDebugString() << ", next split "
<< next_split_index_ << ", repetition " << repetition_index_ << ".";
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::_;
using testing::CreateDummyDistributedSnapshotMetadata;
using ::testing::DoAll;
using ::testing::HasSubstr;
using testing::LocalTempFilename;
using ::testing::Return;
using ::testing::SetArgReferee;
using tsl::testing::StatusIs;
class MockDispatcherClient : public DataServiceDispatcherClient {
public:
explicit MockDispatcherClient()
: DataServiceDispatcherClient("localhost",
"grpc") {}
MOCK_METHOD(absl::Status, GetSnapshotSplit,
(const std::string& worker_address, const std::string& base_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index, Tensor& split,
int64_t& local_split_index, bool& end_of_splits),
(override));
};
SnapshotTaskDef TestSnapshotTask() {
SnapshotTaskDef snapshot_task;
snapshot_task.set_base_path(LocalTempFilename());
snapshot_task.set_stream_index(0);
snapshot_task.set_num_sources(1);
*snapshot_task.mutable_metadata() = CreateDummyDistributedSnapshotMetadata();
return snapshot_task;
}
absl::Status WriteSplits(const SnapshotTaskDef& snapshot_task,
int64_t num_splits) {
std::string source_dir =
RepetitionDirectory(snapshot_task.base_path(),
snapshot_task.stream_index(), 0,
0);
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(source_dir));
for (int64_t i = 0; i < num_splits; ++i) {
std::string split_filename = absl::StrCat("split_", i, "_", i);
std::string split_path = tsl::io::JoinPath(source_dir, split_filename);
Tensor split(int64_t{i});
TF_RETURN_IF_ERROR(AtomicallyWriteTFRecords(
split_path, {split}, tsl::io::compression::kNone, Env::Default()));
}
return absl::OkStatus();
}
TEST(SnapshotSplitProviderTest, GetSplitFromDispatcher) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{0});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(0),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
Tensor result;
bool end_of_splits = false;
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, split);
EXPECT_FALSE(end_of_splits);
}
TEST(SnapshotSplitProviderTest, GetSplitFromFile) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{9});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(9),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 10));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
for (int64_t i = 0; i < 10; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
}
TEST(SnapshotSplitProviderTest, EndOfSplits) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<6>(0),
SetArgReferee<7>(true),
Return(absl::OkStatus())));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
TEST(SnapshotSplitProviderTest, SplitNotFound) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{10});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(10),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 0));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
Tensor result;
bool end_of_splits = false;
EXPECT_THAT(split_provider.GetNext(&result, &end_of_splits),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("not all splits between [0, 10] are found")));
}
std::string full_name(const std::string& name) {
return FullName("test", name);
}
TEST(SnapshotSplitProviderTest, SaveRestore) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{9});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(9),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 10));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
for (int64_t i = 0; i < 5; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(split_provider.Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
SnapshotSplitProvider restored_split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::make_unique<MockDispatcherClient>(),
Env::Default());
TF_ASSERT_OK(restored_split_provider.Restore(full_name, &reader));
for (int64_t i = 5; i <= 9; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_split_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_split_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad69ddc5-0993-4550-bcd1-c25e14d6bb5e | cpp | tensorflow/tensorflow | path_utils | tensorflow/core/data/service/snapshot/path_utils.cc | tensorflow/core/data/service/snapshot/path_utils_test.cc | #include "tensorflow/core/data/service/snapshot/path_utils.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kDoneFileName[] = "DONE";
constexpr const char kErrorFileName[] = "ERROR";
constexpr const char kWorkerFileName[] = "owner_worker";
constexpr const char kSnapshotMetadataFileName[] = "snapshot.metadata";
constexpr const char kDatasetDefFileName[] = "dataset_def.proto";
constexpr const char kDatasetSpecFileName[] = "dataset_spec.pb";
constexpr const char kStreamsDirectoryName[] = "streams";
constexpr const char kSplitsDirectoryName[] = "splits";
constexpr const char kCheckpointsDirectoryName[] = "checkpoints";
constexpr const char kCommittedChunksDirectoryName[] = "chunks";
constexpr const char kUncommittedChunksDirectoryName[] = "uncommitted_chunks";
constexpr int64_t kUnknownNumElements = -1;
}
std::string StreamsDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kStreamsDirectoryName);
}
std::string StreamDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamsDirectory(snapshot_path),
absl::StrCat("stream_", stream_index));
}
std::string SplitsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kSplitsDirectoryName);
}
std::string SourceDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index) {
return tsl::io::JoinPath(SplitsDirectory(snapshot_path, stream_index),
absl::StrCat("source_", source_index));
}
std::string RepetitionDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index) {
return tsl::io::JoinPath(
SourceDirectory(snapshot_path, stream_index, source_index),
absl::StrCat("repetition_", repetition_index));
}
std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index,
int64_t source_index, int64_t repetition_index,
int64_t local_index, int64_t global_index) {
return tsl::io::JoinPath(
RepetitionDirectory(snapshot_path, stream_index, source_index,
repetition_index),
absl::StrCat("split_", local_index, "_", global_index));
}
absl::StatusOr<int64_t> ParseStreamDirectoryName(
absl::string_view stream_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(stream_directory_name, '_');
int64_t stream_index = 0;
if (tokens.size() != 2 || tokens[0] != "stream" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid stream directory name: ", stream_directory_name,
". Expected stream_<stream_index>."));
}
return stream_index;
}
absl::StatusOr<int64_t> ParseSourceDirectoryName(
absl::string_view source_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(source_directory_name, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || tokens[0] != "source" ||
!absl::SimpleAtoi(tokens[1], &source_index) || source_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid source directory name: ", source_directory_name,
". Expected source_<source_index>."));
}
return source_index;
}
absl::StatusOr<int64_t> ParseRepetitionDirectoryName(
absl::string_view repetition_directory_name) {
std::vector<std::string> tokens =
absl::StrSplit(repetition_directory_name, '_');
int64_t repetition_index = 0;
if (tokens.size() != 2 || tokens[0] != "repetition" ||
!absl::SimpleAtoi(tokens[1], &repetition_index) || repetition_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid repetition directory name: ", repetition_directory_name,
". Expected repetition_<repetition_index>."));
}
return repetition_index;
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseSplitFilename(
absl::string_view split_filename) {
std::vector<std::string> tokens =
absl::StrSplit(tsl::io::Basename(split_filename), '_');
int64_t local_split_index = 0, global_split_index = 0;
if (tokens.size() != 3 || tokens[0] != "split" ||
!absl::SimpleAtoi(tokens[1], &local_split_index) ||
local_split_index < 0 ||
!absl::SimpleAtoi(tokens[2], &global_split_index) ||
global_split_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename,
". Expected split_<local_split_index>_<global_split_index>."));
}
if (local_split_index > global_split_index) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename, ". The local split index ",
local_split_index, " exceeds the global split index ",
global_split_index, "."));
}
return std::make_pair(local_split_index, global_split_index);
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseCheckpointFilename(
absl::string_view checkpoint_filename) {
std::vector<std::string> tokens = absl::StrSplit(checkpoint_filename, '_');
int64_t checkpoint_index = 0, checkpoint_num_elements = 0;
if (tokens.size() != 3 || tokens[0] != "checkpoint" ||
!absl::SimpleAtoi(tokens[1], &checkpoint_index) || checkpoint_index < 0 ||
!absl::SimpleAtoi(tokens[2], &checkpoint_num_elements) ||
(checkpoint_num_elements < 0 &&
checkpoint_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid checkpoint file name: ", checkpoint_filename,
". Expected checkpoint_<checkpoint_index>_<checkpoint_num_elements>."));
}
return std::make_pair(checkpoint_index, checkpoint_num_elements);
}
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> ParseChunkFilename(
absl::string_view chunk_filename) {
std::vector<std::string> tokens = absl::StrSplit(chunk_filename, '_');
int64_t stream_index = 0, stream_chunk_index = 0, chunk_num_elements = 0;
if (tokens.size() != 4 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0 ||
!absl::SimpleAtoi(tokens[2], &stream_chunk_index) ||
stream_chunk_index < 0 ||
!absl::SimpleAtoi(tokens[3], &chunk_num_elements) ||
(chunk_num_elements < 0 && chunk_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid chunk file name: ", chunk_filename,
". Expected "
"chunk_<stream_index>_<stream_chunk_index>_<chunk_num_elements>."));
}
return std::make_tuple(stream_index, stream_chunk_index, chunk_num_elements);
}
std::string SnapshotMetadataFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kSnapshotMetadataFileName);
}
std::string DatasetDefFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetDefFileName);
}
std::string DatasetSpecFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetSpecFileName);
}
std::string StreamDoneFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kDoneFileName);
}
std::string StreamWorkerFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return StreamWorkerFilePath(StreamDirectory(snapshot_path, stream_index));
}
std::string StreamWorkerFilePath(absl::string_view stream_path) {
return tsl::io::JoinPath(stream_path, kWorkerFileName);
}
std::string SnapshotDoneFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kDoneFileName);
}
std::string SnapshotErrorFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kErrorFileName);
}
std::string CheckpointsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kCheckpointsDirectoryName);
}
std::string CommittedChunksDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kCommittedChunksDirectoryName);
}
std::string UncommittedChunksDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kUncommittedChunksDirectoryName);
}
}
} | #include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::FieldsAre;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(PathUtilsTest, StreamsDirectory) {
EXPECT_THAT(StreamsDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.streams"));
}
TEST(PathUtilsTest, StreamDirectory) {
EXPECT_THAT(StreamDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0"));
}
TEST(PathUtilsTest, SplitsDirectory) {
EXPECT_THAT(SplitsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits"));
}
TEST(PathUtilsTest, SourceDirectory) {
EXPECT_THAT(
SourceDirectory("/path/to/snapshot", 0,
1),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits.source_1"));
}
TEST(PathUtilsTest, RepetitionDirectory) {
EXPECT_THAT(
RepetitionDirectory("/path/to/snapshot", 0,
1, 2),
MatchesRegex(
"/path/to/snapshot.streams.stream_0.splits.source_1.repetition_2"));
}
TEST(PathUtilsTest, SplitPath) {
EXPECT_THAT(
SplitPath("/path/to/snapshot", 0, 1,
2, 3, 4),
MatchesRegex(
"/path/to/"
"snapshot.streams.stream_0.splits.source_1.repetition_2.split_3_4"));
}
TEST(PathUtilsTest, ParseStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName("stream_1"), IsOkAndHolds(1));
}
TEST(PathUtilsTest, ParseSourceDirectoryName) {
EXPECT_THAT(ParseSourceDirectoryName("source_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseSourceDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("source_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
}
TEST(PathUtilsTest, ParseRepetitionDirectoryName) {
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseRepetitionDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
}
TEST(PathUtilsTest, InvalidStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("stream_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
}
TEST(PathUtilsTest, ParseSplitFilename) {
EXPECT_THAT(ParseSplitFilename("split_0_1"), IsOkAndHolds(Pair(0, 1)));
}
TEST(PathUtilsTest, InvalidSplitFilename) {
EXPECT_THAT(
ParseSplitFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_5_0"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"The local split index 5 exceeds the global split index 0")));
}
TEST(PathUtilsTest, ParseCheckpointFilename) {
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_1"),
IsOkAndHolds(Pair(0, 1)));
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_-1"),
IsOkAndHolds(Pair(0, -1)));
}
TEST(PathUtilsTest, InvalidCheckpointFilename) {
EXPECT_THAT(
ParseCheckpointFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
}
TEST(PathUtilsTest, ParseChunkFilename) {
EXPECT_THAT(ParseChunkFilename("chunk_0_1_2"),
IsOkAndHolds(FieldsAre(0, 1, 2)));
EXPECT_THAT(ParseChunkFilename("chunk_0_1_-1"),
IsOkAndHolds(FieldsAre(0, 1, -1)));
}
TEST(PathUtilsTest, InvalidChunkFilename) {
EXPECT_THAT(ParseChunkFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_123_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_-1_(-1)_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("split_1_2_3"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
}
TEST(PathUtilsTest, StreamDoneFilePath) {
EXPECT_THAT(StreamDoneFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.DONE"));
}
TEST(PathUtilsTest, StreamWorkerFilePath) {
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot/streams/stream_0"),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
}
TEST(PathUtilsTest, SnapshotDoneFilePath) {
EXPECT_THAT(SnapshotDoneFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.DONE"));
}
TEST(PathUtilsTest, SnapshotErrorFilePath) {
EXPECT_THAT(SnapshotErrorFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.ERROR"));
}
TEST(PathUtilsTest, SnapshotMetadataFilePath) {
EXPECT_THAT(SnapshotMetadataFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.snapshot.metadata"));
}
TEST(PathUtilsTest, DatasetDefFilePath) {
EXPECT_THAT(DatasetDefFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_def.proto"));
}
TEST(PathUtilsTest, DatasetSpefFilePath) {
EXPECT_THAT(DatasetSpecFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_spec.pb"));
}
TEST(PathUtilsTest, CheckpointsDirectory) {
EXPECT_THAT(CheckpointsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.checkpoints"));
}
TEST(PathUtilsTest, CommittedChunksDirectory) {
EXPECT_THAT(CommittedChunksDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.chunks"));
}
TEST(PathUtilsTest, UncommittedChunksDirectory) {
EXPECT_THAT(
UncommittedChunksDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.uncommitted_chunks"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/path_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/path_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f87db812-983d-4651-9b5e-a77f29660ffd | cpp | tensorflow/tensorflow | prefetched_split_provider | tensorflow/core/data/service/snapshot/prefetched_split_provider.cc | tensorflow/core/data/service/snapshot/prefetched_split_provider_test.cc | #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace data {
PrefetchedSplitProvider::PrefetchedSplitProvider(
std::unique_ptr<SplitProvider> split_provider, const std::string& directory,
tsl::Env* env, size_t num_write_threads, size_t buffer_size_per_thread)
: env_(env),
directory_(directory),
num_write_threads_(num_write_threads),
buffer_size_(num_write_threads_ * buffer_size_per_thread),
split_provider_(std::move(split_provider)) {
absl::Status status = InitDirs();
if (!status.ok()) {
UpdateStatus(std::move(status));
return;
}
absl::MutexLock l(&mu_);
thread_pool_ = RunPrefetchThreads();
}
PrefetchedSplitProvider::~PrefetchedSplitProvider() { Cancel(); }
absl::StatusOr<std::optional<Tensor>> PrefetchedSplitProvider::GetNext(
const std::string& split_path) ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() &&
(buffer_.empty() || buffer_.begin()->index != split_index_to_read_) &&
(finished_threads_ < num_write_threads_ || reset_)) {
ready_to_pop_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (buffer_.empty()) {
return std::nullopt;
}
if (buffer_.begin()->index != split_index_to_read_) {
return absl::InternalError(absl::StrCat(
"Failed to get tf.data snapshot split. Expected split ",
split_index_to_read_, ", got split ", buffer_.begin()->index,
". This is likely a tf.data bug."));
}
auto it = buffer_.begin();
SplitAndIndex split = std::move(*it);
buffer_.erase(it);
TF_RETURN_IF_ERROR(env_->RenameFile(split.SplitPath(directory_), split_path));
++split_index_to_read_;
ready_to_push_.Signal();
return std::move(split.split);
}
std::unique_ptr<tsl::thread::ThreadPool>
PrefetchedSplitProvider::RunPrefetchThreads() {
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "tf_data_prefetch_splits_thread",
num_write_threads_);
for (size_t i = 0; i < num_write_threads_; ++i) {
thread_pool->Schedule([this]() { PrefetchLoop(); });
}
return thread_pool;
}
void PrefetchedSplitProvider::PrefetchLoop() ABSL_LOCKS_EXCLUDED(mu_) {
while (ShouldPrefetchSplit()) {
absl::StatusOr<bool> has_next = PrefetchSplit();
if (!has_next.status().ok()) {
UpdateStatus(has_next.status());
break;
}
if (!*has_next) {
break;
}
}
absl::MutexLock l(&mu_);
if (++finished_threads_ >= num_write_threads_) {
ready_to_pop_.SignalAll();
}
}
bool PrefetchedSplitProvider::ShouldPrefetchSplit() const
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
return status_.ok() && !reset_;
}
absl::StatusOr<bool> PrefetchedSplitProvider::PrefetchSplit()
ABSL_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(std::optional<SplitAndIndex> split,
GetSplitFromProvider());
if (!split.has_value()) {
return false;
}
TF_RETURN_IF_ERROR(
AtomicallyWriteTFRecords(split->SplitPath(directory_), {split->split},
tsl::io::compression::kNone, env_));
absl::MutexLock l(&mu_);
buffer_.insert(std::move(*split));
ready_to_pop_.Signal();
return true;
}
absl::StatusOr<std::optional<PrefetchedSplitProvider::SplitAndIndex>>
PrefetchedSplitProvider::GetSplitFromProvider() ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && buffer_.size() >= buffer_size_ && !reset_) {
ready_to_push_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (reset_) {
return std::nullopt;
}
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
return std::nullopt;
}
return SplitAndIndex{split, split_index_to_write_++};
}
absl::Status PrefetchedSplitProvider::Reset() ABSL_LOCKS_EXCLUDED(mu_) {
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
reset_ = true;
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
thread_pool = std::move(thread_pool_);
}
thread_pool.reset();
TF_RETURN_IF_ERROR(split_provider_->Reset());
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(status_);
reset_ = false;
split_index_to_read_ = 0;
split_index_to_write_ = 0;
finished_threads_ = 0;
buffer_.clear();
TF_RETURN_IF_ERROR(InitDirs());
thread_pool_ = RunPrefetchThreads();
return absl::OkStatus();
}
void PrefetchedSplitProvider::Cancel() {
UpdateStatus(
absl::CancelledError("tf.data prefetched split provider is shut down."));
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
thread_pool = std::move(thread_pool_);
}
}
absl::Status PrefetchedSplitProvider::InitDirs() {
if (env_->FileExists(directory_).ok()) {
int64_t undeleted_files, undeleted_dirs;
TF_RETURN_IF_ERROR(
env_->DeleteRecursively(directory_, &undeleted_files, &undeleted_dirs));
}
return env_->RecursivelyCreateDir(directory_);
}
void PrefetchedSplitProvider::UpdateStatus(absl::Status status)
ABSL_LOCKS_EXCLUDED(mu_) {
if (status.ok()) {
return;
}
absl::MutexLock l(&mu_);
status_.Update(std::move(status));
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
}
} | #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsSupersetOf;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::vector<std::string>> TestDirs(size_t num_dirs) {
std::vector<std::string> test_dirs;
std::string base_dir;
if (!tsl::Env::Default()->LocalTempFilename(&base_dir)) {
return absl::FailedPreconditionError("Failed to create local temp file.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(base_dir));
for (size_t i = 0; i < num_dirs; ++i) {
std::string test_dir =
tsl::io::JoinPath(base_dir, absl::StrCat("test_dir_", i));
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(test_dir));
test_dirs.push_back(std::move(test_dir));
}
return test_dirs;
}
absl::StatusOr<std::unique_ptr<SplitProvider>> RangeSplitProvider(
int64_t range) {
DatasetDef range_dataset = testing::RangeDataset(range);
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(range_dataset, split_providers));
if (split_providers.size() != 1) {
return absl::InternalError(
absl::StrCat("Range dataset should have one split provider, got ",
split_providers.size(), "."));
}
return std::move(split_providers[0]);
}
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
template <class T>
absl::StatusOr<T> GetValueFromFile(const std::string& filename) {
snapshot_util::TFRecordReaderImpl reader(filename,
tsl::io::compression::kNone);
TF_RETURN_IF_ERROR(reader.Initialize(tsl::Env::Default()));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> tensors, reader.GetTensors());
if (tensors.size() != 1) {
return absl::InternalError(absl::StrCat(
"A snapshot split file is expected to contain 1 tensor. Got ",
tensors.size(), " tensors from ", filename, "."));
}
return GetValue<T>(tensors[0]);
}
template <class T>
absl::StatusOr<std::vector<T>> GetSplits(
PrefetchedSplitProvider& prefetched_split_provider,
const std::string& test_dir) {
std::vector<T> splits;
for (size_t i = 0;; ++i) {
std::string target_split_path =
tsl::io::JoinPath(test_dir, absl::StrCat("split_", i));
TF_ASSIGN_OR_RETURN(std::optional<Tensor> split,
prefetched_split_provider.GetNext(target_split_path));
if (!split.has_value()) {
return splits;
}
T split_value = GetValue<T>(*split);
TF_ASSIGN_OR_RETURN(T split_from_file,
GetValueFromFile<T>(target_split_path));
if (split_value != split_from_file) {
return absl::InternalError(
absl::StrCat("Inconsistent splits. From buffer: ", split_value,
", from file: ", split_from_file, "."));
}
splits.push_back(split_value);
}
return splits;
}
std::vector<int64_t> Range(int64_t range) {
std::vector<int64_t> result(range);
std::iota(result.begin(), result.end(), 0);
return result;
}
class PrefetchedSplitProviderParamTest
: public ::testing::TestWithParam<
std::tuple<int64_t, size_t, size_t, size_t>> {
protected:
int64_t NumElements() const { return std::get<0>(GetParam()); }
size_t NumClients() const { return std::get<1>(GetParam()); }
size_t NumWriteThreads() const { return std::get<2>(GetParam()); }
size_t BufferSizePerThread() const { return std::get<3>(GetParam()); }
};
TEST_P(PrefetchedSplitProviderParamTest, GetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
EXPECT_TRUE(absl::c_is_sorted(splits_per_thread));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
client_threads.clear();
EXPECT_THAT(splits, UnorderedElementsAreArray(Range(NumElements())));
}
TEST_P(PrefetchedSplitProviderParamTest, Reset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
for (int i = 0; i < 3; ++i) {
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplitsAndReset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
TF_EXPECT_OK(prefetched_split_provider.Reset());
client_threads.clear();
EXPECT_THAT(splits, IsSupersetOf(Range(NumElements())));
}
INSTANTIATE_TEST_SUITE_P(
PrefetchedSplitProviderParams, PrefetchedSplitProviderParamTest,
::testing::Combine(
::testing::Values(0, 10, 1000),
::testing::Values(1, 5),
::testing::Values(1, 10),
::testing::Values(1, 10000)));
TEST(PrefetchedSplitProviderTest, Cancellation) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(999999));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
2, 1);
std::unique_ptr<tsl::Thread> client_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "client_thread",
[&prefetched_split_provider, &test_dirs]() {
EXPECT_THAT(
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
StatusIs(absl::StatusCode::kCancelled));
}));
prefetched_split_provider.Cancel();
client_thread.reset();
}
TEST(PrefetchedSplitProviderTest, ShutdownWithUnreadSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(100));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default());
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/prefetched_split_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/prefetched_split_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17e0abbc-0854-4ec0-a080-901466566b64 | cpp | tensorflow/tensorflow | snapshot_manager | tensorflow/core/data/service/snapshot/snapshot_manager.cc | tensorflow/core/data/service/snapshot/snapshot_manager_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/io/compression.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/threadpool.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
const absl::Duration kProgressLoggingInterval = absl::Minutes(1);
absl::StatusOr<int64_t> CountSplits(SplitProvider& split_provider) {
if (split_provider.Cardinality() != kUnknownCardinality) {
return split_provider.Cardinality();
}
int64_t num_splits = 0;
Tensor tensor;
for (bool end_of_splits = false; !end_of_splits; ++num_splits) {
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
--num_splits;
TF_RETURN_IF_ERROR(split_provider.Reset());
return num_splits;
}
absl::Status SkipSplit(SplitProvider& split_provider,
int64_t& repetition_index) {
Tensor tensor;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
while (end_of_splits) {
++repetition_index;
TF_RETURN_IF_ERROR(split_provider.Reset());
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
return absl::OkStatus();
}
std::string PrefetchedSplitDir(const std::string& snapshot_path,
int64_t source_index) {
return tsl::io::JoinPath(snapshot_path, "prefetched_splits",
absl::StrCat("source_", source_index));
}
}
absl::StatusOr<bool> SnapshotAssignmentManager::TryAddAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (assignments_[worker_address].size() >=
worker_max_concurrent_snapshots()) {
return false;
}
Assignment assignment{std::string(snapshot_path), stream_index};
auto [unused, success] = assignments_[worker_address].insert(assignment);
if (!success) {
return absl::InternalError(absl::StrCat("Worker ", worker_address,
" already had an assignment for ",
assignment.DebugString()));
}
++snapshot_assignment_counts_[snapshot_path];
return true;
}
void SnapshotAssignmentManager::RemoveAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
auto num_erased = assignments_[worker_address].erase(
{std::string(snapshot_path), stream_index});
if ((snapshot_assignment_counts_[snapshot_path] -= num_erased) <= 0) {
snapshot_assignment_counts_.erase(snapshot_path);
}
}
void SnapshotAssignmentManager::AddSnapshot(absl::string_view snapshot_path)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!snapshot_assignment_counts_.contains(snapshot_path)) {
snapshot_assignment_counts_[snapshot_path] = 0;
}
}
std::vector<std::string> SnapshotAssignmentManager::LoadBalanceSnapshots(
absl::string_view worker_address) TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::string> result;
tsl::mutex_lock l(mu_);
result.reserve(snapshot_assignment_counts_.size());
const auto it = assignments_.find(worker_address);
if (it != assignments_.end()) {
for (const Assignment& assignment : it->second) {
result.push_back(assignment.snapshot_path);
}
}
if (result.size() >= worker_max_concurrent_snapshots()) {
return result;
}
absl::btree_multimap<size_t, std::string> snapshots_by_count;
for (const auto& [snapshot, count] : snapshot_assignment_counts_) {
snapshots_by_count.emplace(count, snapshot);
}
for (const auto& [_, snapshot] : snapshots_by_count) {
if (absl::c_find(result, snapshot) == result.end()) {
result.push_back(snapshot);
return result;
}
}
return result;
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Start(
const SnapshotRequest& request,
SnapshotAssignmentManager& assignment_manager, Env* env) {
std::unique_ptr<SnapshotManager> snapshot_manager{
new SnapshotManager{request.path(), assignment_manager, env}};
TF_RETURN_IF_ERROR(snapshot_manager->Start(request));
return snapshot_manager;
}
absl::Status SnapshotManager::Start(const SnapshotRequest& request)
TF_LOCKS_EXCLUDED(mu_) {
LOG(INFO) << "Starting to write tf.data snapshot at " << request.path();
if (env_->FileExists(request.path()).ok()) {
return errors::AlreadyExists("tf.data snapshot at ", request.path(),
" already exists.");
}
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(WriteOnDiskSkeleton());
TF_RETURN_IF_ERROR(WriteOnDiskMetadata(request));
TF_ASSIGN_OR_RETURN(sources_, CreateSources(request.dataset()));
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
metadata_ = request.metadata();
LOG(INFO) << "Started writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::StatusOr<std::vector<SnapshotManager::Source>>
SnapshotManager::CreateSources(const DatasetDef& dataset_def) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<SnapshotManager::Source> sources;
sources.reserve(split_providers.size());
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(size_t cardinality, CountSplits(*split_providers[i]));
sources.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
0, cardinality);
}
return sources;
}
absl::StatusOr<int64_t> SnapshotManager::GetSplitsCardinality()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return absl::c_accumulate(sources_, 0,
[](size_t cardinality, const Source& source) {
return cardinality + source.cardinality;
});
}
absl::Status SnapshotManager::WriteOnDiskSkeleton()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(
env_->RecursivelyCreateDir(CommittedChunksDirectory(path_)));
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(StreamsDirectory(path_)));
return absl::OkStatus();
}
absl::Status SnapshotManager::WriteOnDiskMetadata(
const SnapshotRequest& request) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotMetadataFilePath(path_),
request.metadata(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(
DatasetSpecFilePath(path_), request.metadata().element_spec(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteBinaryProto(DatasetDefFilePath(path_),
request.dataset(), env_));
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Resume(
absl::string_view path, SnapshotAssignmentManager& assignment_manager,
Env* env) {
SnapshotManager* snapshot_manager =
new SnapshotManager(path, assignment_manager, env);
TF_RETURN_IF_ERROR(snapshot_manager->Resume());
return absl::WrapUnique(snapshot_manager);
}
absl::Status SnapshotManager::Resume() TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!env_->FileExists(path_).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover tf.data snapshot at ", path_,
": the snapshot path doesn't exist."));
}
if (env_->FileExists(SnapshotDoneFilePath(path_)).ok()) {
mode_ = Mode::kDone;
LOG(INFO) << "Recovered finished tf.data snapshot at " << path_;
return absl::OkStatus();
}
if (env_->FileExists(SnapshotErrorFilePath(path_)).ok()) {
mode_ = Mode::kError;
StatusProto status_proto;
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotErrorFilePath(path_), &status_proto));
status_ = tsl::StatusFromProto(status_proto);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ReadOnDiskMetadata());
TF_RETURN_IF_ERROR(ReadOnDiskStreams());
LOG(INFO) << "Resumed writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskMetadata()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!env_->FileExists(SnapshotMetadataFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover snapshot at ", path_,
": snapshot has no snapshot.metadata"));
}
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotMetadataFilePath(path_), &metadata_));
if (!env_->FileExists(DatasetDefFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recovery snapshot at ", path_,
": snapshot has no dataset_def.proto"));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskStreams()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::string streams_path = StreamsDirectory(path_);
TF_ASSIGN_OR_RETURN(const std::vector<std::string> stream_directories,
GetChildren(streams_path, env_));
DatasetDef dataset_def;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(env_, DatasetDefFilePath(path_), &dataset_def));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<int64_t> repetition_indices(split_providers.size(), 0);
std::vector<int64_t> cardinalities;
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(int64_t cardinality, CountSplits(*split_providers[i]));
cardinalities.push_back(cardinality);
}
tsl::mutex mu;
absl::Status resume_status;
absl::flat_hash_set<int64_t> global_split_indices;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "restore_snapshot_stream_thread",
std::max(size_t{1}, stream_directories.size()));
for (const auto& stream_directory : stream_directories) {
std::string stream_path = tsl::io::JoinPath(streams_path, stream_directory);
std::vector<std::string> tokens = absl::StrSplit(stream_directory, '_');
int64_t stream_index;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &stream_index) ||
stream_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot stream directory ", stream_path,
": filename must have the format stream_<stream_index>."));
}
thread_pool->Schedule([this, &stream_directories, stream_index,
&split_providers, &repetition_indices,
&global_split_indices, &resume_status,
&mu]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
StreamRestorer stream_restorer(env_, path_, stream_index,
split_providers.size(),
assignment_manager_);
absl::Status s = stream_restorer.ReadOnDiskStream();
tsl::mutex_lock l(mu);
resume_status.Update(s);
resume_status.Update(RestoreFrom(stream_restorer, stream_directories,
split_providers, repetition_indices,
global_split_indices));
});
}
thread_pool.reset();
TF_RETURN_IF_ERROR(resume_status);
for (int64_t i = 0; i < split_providers.size(); ++i) {
sources_.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
repetition_indices[i], cardinalities[i]);
}
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
for (int64_t i = 0; i < global_split_indices.size(); ++i) {
if (!global_split_indices.contains(i)) {
return absl::InternalError(
absl::StrCat("Failed to restore tf.data snapshot at ", path_,
": Found missing global split index ", i, "."));
}
}
num_assigned_splits_ = global_split_indices.size();
if (!streams_.empty() && absl::c_all_of(streams_, [](const auto& stream) {
return stream.second.state == Stream::State::kDone;
})) {
mode_ = Mode::kDone;
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),
std::string(), env_));
LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_;
}
return absl::OkStatus();
}
absl::StatusOr<std::string>
SnapshotManager::StreamRestorer::OwnerWorkerAddress() const {
std::string worker_address;
TF_RETURN_IF_ERROR(
env_->FileExists(StreamWorkerFilePath(path_, stream_index_)));
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env_, StreamWorkerFilePath(path_, stream_index_), &worker_address));
return worker_address;
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskStream() {
absl::StatusOr<std::string> worker_address = OwnerWorkerAddress();
if (!worker_address.ok()) {
return absl::OkStatus();
}
worker_address_ = *worker_address;
restored_stream_.emplace(num_sources_);
std::string splits_path = SplitsDirectory(path_, stream_index_);
TF_ASSIGN_OR_RETURN(std::vector<std::string> source_directories,
GetChildren(splits_path, env_));
for (const auto& source_directory : source_directories) {
std::string source_path = tsl::io::JoinPath(splits_path, source_directory);
std::vector<std::string> tokens = absl::StrSplit(source_directory, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &source_index) ||
source_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot source directory ", source_path,
": filename must have the format source_<source_index>."));
}
if (source_index >= num_sources_) {
return absl::InternalError(
absl::StrCat("Found conflict between the number of sources, ",
num_sources_, ", and the filename of ", source_path));
}
TF_RETURN_IF_ERROR(ReadOnDiskSource(source_index));
}
if (env_->FileExists(StreamDoneFilePath(path_, stream_index_)).ok()) {
restored_stream_->state = Stream::State::kDone;
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool assignment_added,
assignment_manager_.TryAddAssignment(
path_, *worker_address, stream_index_));
if (!assignment_added) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot dispatcher: Worker ",
*worker_address, " was assigned too many streams. At most ",
assignment_manager_.worker_max_concurrent_snapshots(),
" streams are allowed."));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSource(
int64_t source_index) {
std::string source_directory =
SourceDirectory(path_, stream_index_, source_index);
TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories,
GetChildren(source_directory, env_));
for (const std::string& repetition : repetition_directories) {
std::string repetition_dir =
tsl::io::JoinPath(source_directory, repetition);
TF_ASSIGN_OR_RETURN(std::vector<std::string> split_files,
GetChildren(repetition_dir, env_));
for (const std::string& split_file : split_files) {
std::string split_path = tsl::io::JoinPath(repetition_dir, split_file);
TF_RETURN_IF_ERROR(
ReadOnDiskSplit(source_index, split_files, split_path));
}
restored_stream_->num_assigned_splits_per_source[source_index] +=
split_files.size();
}
return absl::OkStatus();
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSplit(
int64_t source_index, const std::vector<std::string>& split_files,
const std::string& split_file) {
TF_ASSIGN_OR_RETURN(auto split_indices, ParseSplitFilename(split_file));
auto [local_split_index, global_split_index] = split_indices;
if (global_split_indices_.contains(global_split_index)) {
return absl::InternalError(absl::StrCat(
"Failed to restore tf.data snapshot at ", path_,
": Found duplicate global split index in split ", split_file, "."));
}
global_split_indices_.insert(global_split_index);
return absl::OkStatus();
}
absl::Status SnapshotManager::RestoreFrom(
const StreamRestorer& stream_restorer,
const std::vector<std::string>& stream_directories,
std::vector<std::unique_ptr<SplitProvider>>& split_providers,
std::vector<std::int64_t>& repetition_indices,
absl::flat_hash_set<int64_t>& global_split_indices)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!stream_restorer.GetStream().has_value()) {
return absl::OkStatus();
}
streams_.insert(
{stream_restorer.StreamIndex(), *stream_restorer.GetStream()});
auto [it, success] = assignments_.insert(
{stream_restorer.WorkerAddress(), stream_restorer.StreamIndex()});
if (!success) {
return absl::InternalError(absl::StrCat(
"tf.data dispatcher failed to assign stream ",
stream_restorer.StreamIndex(), " to snapshot worker ",
stream_restorer.WorkerAddress(),
": The worker is already assigned stream ", it->second, "."));
}
for (int64_t source_index = 0; source_index < repetition_indices.size();
++source_index) {
int64_t skip_splits = GetStream(stream_restorer.StreamIndex())
.num_assigned_splits_per_source[source_index];
for (int64_t i = 0; i < skip_splits; ++i) {
TF_RETURN_IF_ERROR(SkipSplit(*split_providers[source_index],
repetition_indices[source_index]));
}
}
for (int64_t global_split_index : stream_restorer.GlobalSplitIndices()) {
if (global_split_indices.contains(global_split_index)) {
return absl::InternalError(
absl::StrCat("Failed to restore tf.data snapshot at ", path_,
": Found ", "duplicate global split index in stream ",
stream_restorer.StreamIndex(), "."));
}
global_split_indices.insert(global_split_index);
}
return absl::OkStatus();
}
SnapshotManager::Stream& SnapshotManager::GetStream(int64_t stream_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto [it, _] = streams_.try_emplace(stream_index, num_sources());
return it->second;
}
absl::Status SnapshotManager::HandleStreamCompletion(
int64_t stream_index, absl::string_view worker_address)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
GetStream(stream_index).state = Stream::State::kDone;
assignment_manager_.RemoveAssignment(path_, worker_address, stream_index);
++num_completed_streams_;
if (absl::c_all_of(streams_, [](const auto& stream) {
return stream.second.state == Stream::State::kDone;
})) {
mode_ = Mode::kDone;
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),
std::string(), env_));
LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_;
}
return absl::OkStatus();
}
absl::Status SnapshotManager::HandleStreamError(
absl::string_view worker_address, const StatusProto& status_proto)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!status_.ok()) {
return absl::OkStatus();
}
mode_ = Mode::kError;
status_ = tsl::StatusFromProto(status_proto);
TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotErrorFilePath(path_),
status_proto, env_));
LOG(ERROR) << "Failed to write tf.data distributed snapshot at " << path_
<< ". Worker " << worker_address << " reported error: " << status_;
return absl::OkStatus();
}
absl::StatusOr<std::optional<int64_t>>
SnapshotManager::MaybeCreateAndAssignNewStream(absl::string_view worker_address)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t new_stream_index =
streams_.empty() ? 0 : streams_.rbegin()->first + 1;
TF_ASSIGN_OR_RETURN(bool assignment_added,
assignment_manager_.TryAddAssignment(
path_, worker_address, new_stream_index));
if (!assignment_added) {
return std::optional<int64_t>();
}
streams_.insert({new_stream_index, Stream(num_sources())});
assignments_[worker_address] = new_stream_index;
return new_stream_index;
}
absl::StatusOr<std::optional<std::pair<int64_t, bool>>>
SnapshotManager::MaybeGetOrCreateStreamAssignment(
absl::string_view worker_address,
const SnapshotTaskProgress* snapshot_progress)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::optional<int64_t> assigned_stream_index;
if (auto it = assignments_.find(worker_address); it != assignments_.end()) {
assigned_stream_index = it->second;
}
if (snapshot_progress) {
if (assigned_stream_index.has_value() &&
*assigned_stream_index !=
snapshot_progress->snapshot_task().stream_index()) {
return absl::InternalError(absl::StrCat(
"tf.data snapshot worker ", worker_address, " was assigned stream ",
snapshot_progress->snapshot_task().stream_index(),
", but is now assigned a different stream ", *assigned_stream_index));
}
if (assigned_stream_index.has_value() && snapshot_progress->completed()) {
TF_RETURN_IF_ERROR(HandleStreamCompletion(
snapshot_progress->snapshot_task().stream_index(), worker_address));
return std::nullopt;
}
if (snapshot_progress->status().code() != error::OK) {
TF_RETURN_IF_ERROR(
HandleStreamError(worker_address, snapshot_progress->status()));
return std::nullopt;
}
}
if (!assigned_stream_index) {
if (mode_ != Mode::kActive) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(assigned_stream_index,
MaybeCreateAndAssignNewStream(worker_address));
if (!assigned_stream_index.has_value()) {
return std::nullopt;
}
return std::make_pair(*assigned_stream_index, true);
}
if (!assigned_stream_index.has_value() ||
GetStream(*assigned_stream_index).state == Stream::State::kDone) {
return std::nullopt;
}
return std::make_pair(*assigned_stream_index, false);
}
absl::Status SnapshotManager::WorkerHeartbeat(
const WorkerHeartbeatRequest& request, WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_) {
std::optional<std::pair<int64_t, bool>> assigned_stream_index;
std::vector<int64_t> repetitions_per_source;
{
tsl::mutex_lock l(mu_);
dead_workers_.erase(request.worker_address());
if (mode_ == Mode::kDone || mode_ == Mode::kError) {
return absl::OkStatus();
}
if (absl::Time now = absl::FromUnixMicros(env_->NowMicros());
now - last_progress_log_time_ > kProgressLoggingInterval) {
LOG(INFO) << "tf.data snapshot progress [" << path_
<< "]: " << num_completed_streams_ << "/" << streams_.size()
<< " streams completed; " << num_assigned_splits_ << "/"
<< num_total_splits_ << " splits assigned or completed.";
last_progress_log_time_ = now;
}
const SnapshotTaskProgress* snapshot_progress = nullptr;
if (auto it = request.snapshot_task_progress().find(path_);
it != request.snapshot_task_progress().end()) {
snapshot_progress = &it->second;
}
if (snapshot_progress && snapshot_progress->completed() &&
mode_ == Mode::kActive) {
mode_ = Mode::kWindingDown;
}
TF_ASSIGN_OR_RETURN(assigned_stream_index,
MaybeGetOrCreateStreamAssignment(
request.worker_address(), snapshot_progress));
if (!assigned_stream_index.has_value()) {
return absl::OkStatus();
}
SnapshotTaskDef* snapshot_task = response.add_snapshot_tasks();
snapshot_task->set_base_path(path_);
snapshot_task->set_num_sources(num_sources());
*snapshot_task->mutable_metadata() = metadata_;
snapshot_task->set_stream_index(assigned_stream_index->first);
for (int64_t source_index = 0; source_index < num_sources();
++source_index) {
repetitions_per_source.push_back(sources_[source_index].repetition_index);
}
}
const auto [stream_index, is_new_stream] = *assigned_stream_index;
if (is_new_stream) {
TF_RETURN_IF_ERROR(InitStreamDirectory(
stream_index, request.worker_address(), repetitions_per_source));
LOG(INFO) << "For snapshot at " << path_ << ", created stream_"
<< stream_index << " and assigned to "
<< request.worker_address();
}
return absl::OkStatus();
}
absl::Status SnapshotManager::InitStreamDirectory(
int64_t stream_index, const std::string& worker_address,
const std::vector<int64_t>& repetitions_per_source) {
for (int64_t source_index = 0; source_index < repetitions_per_source.size();
++source_index) {
for (int64_t repetition_index = 0;
repetition_index <= repetitions_per_source[source_index];
++repetition_index) {
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(
path_, stream_index, source_index, repetition_index)));
}
}
return AtomicallyWriteStringToFile(StreamWorkerFilePath(path_, stream_index),
worker_address, env_);
}
absl::Status SnapshotManager::GetSnapshotSplit(
const GetSnapshotSplitRequest& request, GetSnapshotSplitResponse& response)
TF_LOCKS_EXCLUDED(get_split_mu_, mu_) {
int64_t local_split_index = 0;
int64_t global_split_index = 0;
PrefetchedSplitProvider* split_provider = nullptr;
tsl::mutex_lock get_split_lock(get_split_mu_);
{
tsl::mutex_lock l(mu_);
if (auto it = assignments_.find(request.worker_address());
it == assignments_.end()) {
return absl::InternalError(
absl::StrCat("tf.data snapshot worker ", request.worker_address(),
" was assigned stream ", request.stream_index(),
", but the assignment is no longer available."));
} else if (it->second != request.stream_index()) {
return absl::InternalError(
absl::StrCat("tf.data snapshot worker ", request.worker_address(),
" was assigned stream ", request.stream_index(),
" but is now assigned a different stream ", it->second));
}
Stream& stream = GetStream(request.stream_index());
local_split_index =
stream.num_assigned_splits_per_source[request.source_index()];
global_split_index = num_assigned_splits_;
response.set_local_split_index(local_split_index);
Source& source = sources_[request.source_index()];
if (request.repetition_index() < source.repetition_index) {
response.set_end_of_splits(true);
return absl::OkStatus();
}
while (request.repetition_index() > source.repetition_index) {
TF_RETURN_IF_ERROR(ResetSource(source, request.source_index()));
}
split_provider = source.split_provider.get();
}
std::string split_path = SplitPath(
path_, request.stream_index(), request.source_index(),
request.repetition_index(), local_split_index, global_split_index);
TF_ASSIGN_OR_RETURN(std::optional<Tensor> split,
split_provider->GetNext(split_path));
if (!split.has_value()) {
response.set_end_of_splits(true);
return absl::OkStatus();
}
split->AsProtoTensorContent(response.mutable_split());
tsl::mutex_lock l(mu_);
++GetStream(request.stream_index())
.num_assigned_splits_per_source[request.source_index()];
++num_assigned_splits_;
return absl::OkStatus();
}
absl::Status SnapshotManager::ResetSource(Source& source, int64_t source_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(source.split_provider->Reset());
++source.repetition_index;
LOG(INFO) << "Starting repetition_" << source.repetition_index << " "
<< "for snapshot " << path_ << ", source " << source_index;
for (const auto& [stream_index, _] : streams_) {
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(
path_, stream_index, source_index, source.repetition_index)));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::GetSnapshotStreams(
GetSnapshotStreamsResponse& response) TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
for (const auto& [stream_index, stream] : streams_) {
SnapshotStreamInfo* stream_info = response.add_streams();
stream_info->set_index(stream_index);
stream_info->set_state(stream.state == Stream::State::kDone
? SnapshotStreamInfo::DONE
: SnapshotStreamInfo::ASSIGNED);
}
return absl::OkStatus();
}
void SnapshotManager::Cancel() {
std::vector<PrefetchedSplitProvider*> split_providers_to_cancel;
{
tsl::mutex_lock l(mu_);
for (Source& source : sources_) {
split_providers_to_cancel.push_back(source.split_provider.get());
}
}
for (PrefetchedSplitProvider* split_provider : split_providers_to_cancel) {
split_provider->Cancel();
}
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <memory>
#include <string>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
TEST(SnapshotManagerTest, CreateStreamAssignment) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).base_path(), snapshot_path);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).stream_index(), 0);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).num_sources(), 1);
}
TEST(SnapshotManagerTest, GetSnapshotSplit) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
GetSnapshotSplitRequest get_split_request;
GetSnapshotSplitResponse get_split_response;
get_split_request.set_worker_address("localhost");
get_split_request.set_base_path(task.base_path());
get_split_request.set_stream_index(task.stream_index());
get_split_request.set_source_index(0);
for (int64_t i = 0; i < 10; ++i) {
TF_ASSERT_OK(snapshot_manager->GetSnapshotSplit(get_split_request,
get_split_response));
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(get_split_response.split()));
EXPECT_EQ(GetValue<int64_t>(tensor), i);
}
}
TEST(SnapshotManagerTest, HandleStreamCompletion) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:2");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
const SnapshotTaskDef& snapshot_task = heartbeat_response.snapshot_tasks(0);
EXPECT_EQ(snapshot_task.base_path(), snapshot_path);
EXPECT_EQ(snapshot_task.stream_index(), 1);
EXPECT_EQ(snapshot_task.num_sources(), 1);
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
SnapshotTaskProgress progress;
*progress.mutable_snapshot_task() = snapshot_task;
progress.set_completed(true);
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
progress;
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
}
TEST(SnapshotManagerTest, Resume) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
}
TEST(SnapshotManagerTest, SnapshotStreamError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest snapshot_request;
*snapshot_request.mutable_dataset() = testing::RangeDataset(10);
snapshot_request.set_path(snapshot_path);
*snapshot_request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(snapshot_request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
TF_ASSERT_OK(
Env::Default()->FileExists(SnapshotErrorFilePath(snapshot_path)));
StatusProto status_proto;
TF_ASSERT_OK(ReadTextProto(
Env::Default(), SnapshotErrorFilePath(snapshot_path), &status_proto));
EXPECT_THAT(tsl::StatusFromProto(status_proto),
StatusIs(error::NOT_FOUND, "Not found"));
}
TEST(SnapshotManagerTest, ResumeFromError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
}
TEST(SnapshotAssignmentManagerTest, LoadBalanceSnapshots) {
SnapshotAssignmentManager snapshot_assignment_manager(
2);
snapshot_assignment_manager.AddSnapshot("snapshot_1");
snapshot_assignment_manager.AddSnapshot("snapshot_2");
snapshot_assignment_manager.AddSnapshot("snapshot_3");
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_3", "worker_1", 0),
IsOkAndHolds(true));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", _));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre(Not("snapshot_3")));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_1", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_1"));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_1", "worker_1", 0),
IsOkAndHolds(false));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_2", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_2", "worker_1",
0);
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", "snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_3", "worker_1",
0);
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fbd27fd6-da5b-4cbf-9c30-4a3f1eebfb20 | cpp | tensorflow/tensorflow | snapshot_stream_writer | tensorflow/core/data/service/snapshot/snapshot_stream_writer.cc | tensorflow/core/data/service/snapshot/snapshot_stream_writer_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
constexpr ByteSize kTFRecordReaderOutputBufferSize = ByteSize::GB(1);
constexpr int64_t kUnknownNumElements = -1;
constexpr const char kFileShardDelimiter[] = "_CHUNK_SHARDS_";
absl::StatusOr<int64_t> GetUncommittedChunkIndex(const std::string& filename) {
std::vector<std::string> tokens =
absl::StrSplit(filename, kFileShardDelimiter);
if (tokens.size() != 2) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected sharded chunk files."));
}
tokens = absl::StrSplit(tokens[0], '_');
int64_t chunk_index = 0;
if (tokens.size() != 2 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &chunk_index) || chunk_index < 0) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected chunk_<chunk_index>."));
}
return chunk_index;
}
size_t TotalNumElements(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
size_t num_elements = 0;
for (const auto& [file, stats] : file_stats) {
num_elements += stats.num_records;
}
return num_elements;
}
ByteSize TotalBytes(const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
ByteSize bytes;
for (const auto& [file, stats] : file_stats) {
bytes += stats.estimated_size;
}
return bytes;
}
}
SnapshotStreamWriter::SnapshotStreamWriter(
const SnapshotWriterParams& params, std::unique_ptr<TaskIterator> iterator)
: params_(params), iterator_(std::move(iterator)) {
DCHECK_NE(iterator_.get(), nullptr);
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
snapshot_thread_ = absl::WrapUnique(params_.env->StartThread(
{}, "tf_data_service_snapshot_thread",
[this]() { WriteSnapshotAndLog(); }));
}
void SnapshotStreamWriter::WriteSnapshotAndLog() TF_LOCKS_EXCLUDED(mu_) {
if (StreamAlreadyCompleted()) {
LOG(INFO) << "Distributed tf.data snapshot stream has already been "
<< "completed for " << params_.DebugString();
mutex_lock l(mu_);
completed_ = true;
return;
}
LOG(INFO) << "Writing distributed tf.data snapshot stream: "
<< params_.DebugString();
absl::Status status = WriteSnapshot();
if (IsPreemptedError(status)) {
LOG(INFO) << "tf.data service snapshot writer is cancelled: " << status;
return;
}
status = FinalizeStream(status);
mutex_lock l(mu_);
if (!status.ok()) {
LOG(ERROR) << "Failed to write distributed tf.data snapshot stream: "
<< params_.DebugString() << ". Status: " << status;
completed_ = std::move(status);
return;
}
LOG(INFO) << "Finished writing distributed tf.data snapshot stream: "
<< params_.DebugString();
completed_ = true;
iterator_ = nullptr;
}
absl::Status SnapshotStreamWriter::WriteSnapshot() TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(InitializeDirectories());
TF_RETURN_IF_ERROR(Restore());
while (ShouldWriteChunks()) {
TF_RETURN_IF_ERROR(WriteChunks());
}
mutex_lock l(mu_);
return completed_.status();
}
bool SnapshotStreamWriter::StreamAlreadyCompleted() const {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return params_.env->FileExists(done_file_path).ok();
}
absl::Status SnapshotStreamWriter::InitializeDirectories() {
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.UncommittedChunksDirectory()));
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.CheckpointsDirectory()));
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteChunks() const TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !end_of_sequence_ && completed_.ok();
}
absl::Status SnapshotStreamWriter::WriteChunks() {
LOG(INFO) << "Writing distributed tf.data snapshot " << params_.snapshot_path
<< ", stream " << params_.stream_index << ", chunk " << chunk_index_
<< ".";
std::string chunks_prefix = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(),
absl::StrCat("chunk_", chunk_index_, kFileShardDelimiter));
ParallelTFRecordWriter writer(TranslateFileName(chunks_prefix),
params_.compression, params_.env,
params_.max_chunk_size);
do {
TF_RETURN_IF_ERROR(WriteRecord(writer));
} while (ShouldWriteRecord());
TF_ASSIGN_OR_RETURN(const ParallelTFRecordWriter::FileToStatsMap file_stats,
writer.Finalize());
TF_RETURN_IF_ERROR(Completed().status());
TF_RETURN_IF_ERROR(Commit(file_stats));
metrics::RecordTFDataServiceSnapshotBytesCommitted(
TotalBytes(file_stats).ToUnsignedBytes());
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteRecord() const {
mutex_lock l(mu_);
if (!completed_.ok() || end_of_sequence_) {
return false;
}
const absl::Time now = absl::FromUnixMicros(params_.env->NowMicros());
const absl::Duration adjusted_checkpoint_interval = std::min(
params_.checkpoint_interval, absl::Minutes(0.5 * chunk_index_ + 5));
return now < last_commit_time_ + adjusted_checkpoint_interval;
}
absl::Status SnapshotStreamWriter::WriteRecord(ParallelTFRecordWriter& writer) {
std::vector<Tensor> element;
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_sequence_));
if (end_of_sequence_) {
return absl::OkStatus();
}
return writer.Write(std::move(element));
}
absl::Status SnapshotStreamWriter::Commit(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
TF_RETURN_IF_ERROR(Save(file_stats));
for (const auto& [file, stats] : file_stats) {
std::string committed_chunk_path =
tsl::io::JoinPath(params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_",
chunk_index_++, "_", stats.num_records));
TF_RETURN_IF_ERROR(params_.env->RenameFile(file, committed_chunk_path));
}
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::FinalizeStream(absl::Status status) {
if (status.ok()) {
status = WriteDoneFile();
}
if (!status.ok()) {
WriteErrorFile(status).IgnoreError();
}
absl::Status s = DeleteCheckpoints();
if (!s.ok()) {
LOG(ERROR) << "Failed to clean up checkpoints at "
<< params_.CheckpointsDirectory() << ": " << s;
}
return status;
}
absl::Status SnapshotStreamWriter::WriteDoneFile() {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return AtomicallyWriteStringToFile(done_file_path, "", params_.env);
}
absl::Status SnapshotStreamWriter::WriteErrorFile(const absl::Status& status) {
std::string error_file_path =
tsl::io::JoinPath(params_.StreamDirectory(), "ERROR");
return AtomicallyWriteStringToFile(error_file_path, status.ToString(),
params_.env);
}
absl::StatusOr<bool> SnapshotStreamWriter::Completed() const
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return completed_;
}
absl::StatusOr<bool> SnapshotStreamWriter::Wait() TF_LOCKS_EXCLUDED(mu_) {
snapshot_thread_.reset();
mutex_lock l(mu_);
return completed_;
}
void SnapshotStreamWriter::Cancel() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
completed_ = absl::CancelledError(
"The tf.data service snapshot writer has been cancelled.");
}
absl::Status SnapshotStreamWriter::Save(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
const size_t num_elements = TotalNumElements(file_stats);
const ByteSize byte_size = TotalBytes(file_stats);
LOG(INFO) << "Checkpointing distributed tf.data snapshot writer for snapshot "
<< params_.DebugString() << ". Stream " << params_.stream_index
<< ", chunk " << chunk_index_
<< ", number of elements in chunk: " << num_elements
<< ", chunk size: " << byte_size << ".";
tsl::profiler::TraceMe activity("SnapshotCheckpoint",
tsl::profiler::TraceMeLevel::kInfo);
absl::Time start_time = absl::FromUnixMicros(params_.env->NowMicros());
int64_t checkpoint_index = chunk_index_ + file_stats.size();
std::string checkpoint_path = CheckpointPath(checkpoint_index, num_elements);
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_iterator,
iterator_->Save());
TF_RETURN_IF_ERROR(AtomicallyWriteTFRecords(
checkpoint_path, serialized_iterator, params_.compression, params_.env));
absl::Time end_time = absl::FromUnixMicros(params_.env->NowMicros());
LOG(INFO) << "Wrote checkpoint file " << checkpoint_path << ". "
<< "Checkpointing distributed tf.data snapshot writer took "
<< (end_time - start_time);
return DeleteOutdatedCheckpoints(checkpoint_index);
}
absl::Status SnapshotStreamWriter::DeleteOutdatedCheckpoints(
int64_t checkpoint_index) {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
std::vector<std::string> checkpoint_filenames;
TF_RETURN_IF_ERROR(params_.env->GetChildren(params_.CheckpointsDirectory(),
&checkpoint_filenames));
for (const std::string& checkpoint_filename : checkpoint_filenames) {
std::string checkpoint_filepath =
tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_filename);
if (IsTemporaryFile(checkpoint_filename)) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
continue;
}
TF_ASSIGN_OR_RETURN(auto checkpoint_filename_tokens,
ParseCheckpointFilename(checkpoint_filename));
auto [checkpoint_file_index, _] = checkpoint_filename_tokens;
if (checkpoint_file_index < checkpoint_index) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
}
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::DeleteCheckpoints() {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
LOG(INFO) << "Deleting tf.data snapshot checkpoints directory: "
<< params_.CheckpointsDirectory();
if (params_.env->FileExists(params_.CheckpointsDirectory()).ok()) {
int64_t undeleted_files, undeleted_dirs;
return params_.env->DeleteRecursively(params_.CheckpointsDirectory(),
&undeleted_files, &undeleted_dirs);
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::Restore() {
absl::StatusOr<std::string> checkpoint_name = LastCheckpointName();
if (absl::IsNotFound(checkpoint_name.status())) {
return SyncCheckpointWithChunks(std::nullopt,
kUnknownNumElements);
}
TF_RETURN_IF_ERROR(checkpoint_name.status());
snapshot_util::TFRecordReaderImpl reader(
CheckpointPath(*checkpoint_name), params_.compression,
kTFRecordReaderOutputBufferSize.ToUnsignedBytes());
TF_RETURN_IF_ERROR(reader.Initialize(params_.env));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_tensors,
reader.GetTensors());
TF_RETURN_IF_ERROR(iterator_->Restore(serialized_tensors));
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(*checkpoint_name));
auto [checkpoint_index, checkpoint_num_elements] = checkpoint_name_tokens;
TF_RETURN_IF_ERROR(
SyncCheckpointWithChunks(checkpoint_index, checkpoint_num_elements));
chunk_index_ = checkpoint_index;
LOG(INFO) << "Restored distributed tf.data snapshot writer. Snapshot "
<< params_.snapshot_path << ", stream " << params_.stream_index
<< ", chunk " << checkpoint_index << ".";
return absl::OkStatus();
}
absl::StatusOr<std::string> SnapshotStreamWriter::LastCheckpointName() const {
TF_ASSIGN_OR_RETURN(std::vector<std::string> checkpoint_names,
GetChildren(params_.CheckpointsDirectory(), params_.env));
if (checkpoint_names.empty()) {
return absl::NotFoundError(
absl::StrCat("No checkpoint has been written in directory ",
params_.CheckpointsDirectory()));
}
int64_t last_index = -1;
std::string last_checkpoint_name = "";
for (const std::string& checkpoint_name : checkpoint_names) {
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(checkpoint_name));
auto [checkpoint_index, unused] = checkpoint_name_tokens;
if (checkpoint_index > last_index) {
last_index = checkpoint_index;
last_checkpoint_name = checkpoint_name;
}
}
return last_checkpoint_name;
}
absl::Status SnapshotStreamWriter::SyncCheckpointWithChunks(
std::optional<int64_t> checkpoint_index, int64_t checkpoint_num_elements) {
TF_ASSIGN_OR_RETURN(
std::vector<std::string> uncommitted_chunks,
GetChildren(params_.UncommittedChunksDirectory(), params_.env));
TF_ASSIGN_OR_RETURN(int64_t last_committed_chunk_index,
LastCommittedChunkIndex());
int64_t next_chunk_index = last_committed_chunk_index + 1;
for (const std::string& uncommitted_chunk : uncommitted_chunks) {
std::string uncommitted_chunk_filename = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(), uncommitted_chunk);
TF_ASSIGN_OR_RETURN(int64_t uncommitted_chunk_index,
GetUncommittedChunkIndex(uncommitted_chunk));
if (checkpoint_index.has_value() &&
uncommitted_chunk_index < *checkpoint_index) {
int64_t chunk_num_elements = (next_chunk_index == *checkpoint_index - 1)
? checkpoint_num_elements
: kUnknownNumElements;
std::string committed_chunk_filename = tsl::io::JoinPath(
params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_", next_chunk_index,
"_", chunk_num_elements));
TF_RETURN_IF_ERROR(params_.env->RenameFile(uncommitted_chunk_filename,
committed_chunk_filename));
++next_chunk_index;
} else {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(uncommitted_chunk_filename));
}
}
if (checkpoint_index.has_value() && next_chunk_index != *checkpoint_index) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot writer: Unable to find chunks [",
next_chunk_index, ", ", *checkpoint_index, ")."));
}
return absl::OkStatus();
}
absl::StatusOr<int64_t> SnapshotStreamWriter::LastCommittedChunkIndex() {
std::string committed_chunks_directory = params_.CommittedChunksDirectory();
TF_ASSIGN_OR_RETURN(
std::vector<std::string> committed_chunks,
GetChildren(params_.CommittedChunksDirectory(), params_.env));
int64_t last_committed_chunk_index = -1;
for (const std::string& committed_chunk : committed_chunks) {
TF_ASSIGN_OR_RETURN(auto chunk_filename_tokens,
ParseChunkFilename(committed_chunk));
const auto [stream_index, chunk_index, _] = chunk_filename_tokens;
if (stream_index != params_.stream_index) {
continue;
}
if (chunk_index > last_committed_chunk_index) {
last_committed_chunk_index = chunk_index;
}
}
return last_committed_chunk_index;
}
std::string SnapshotStreamWriter::CheckpointPath(
int64_t chunk_index, int64_t chunk_num_elements) const {
return tsl::io::JoinPath(
params_.CheckpointsDirectory(),
absl::StrCat("checkpoint_", chunk_index, "_", chunk_num_elements));
}
std::string SnapshotStreamWriter::CheckpointPath(
const std::string& checkpoint_name) const {
return tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_name);
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "xla/tsl/lib/monitoring/cell_reader.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/test_utils.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::testing::ValuesIn;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>> TestIterator(
const DatasetDef& dataset_def) {
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iterator));
return std::make_unique<StandaloneTaskIterator>(std::move(dataset),
std::move(iterator));
}
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(
const std::vector<absl::StatusOr<T>>& elements)
: elements_(elements) {}
absl::Status GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const absl::StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
absl::StatusOr<std::vector<Tensor>> Save() override {
return std::vector<Tensor>{};
}
absl::Status Restore(const std::vector<Tensor>& saved_iterator) override {
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<absl::StatusOr<T>> elements_;
int64_t next_ = 0;
};
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::StatusOr<std::unique_ptr<snapshot_util::Reader>> CreateSnapshotReader(
const std::string& snapshot_path, int64_t num_elements,
const std::string& compression, Env* env) {
static constexpr int kTFRecordReader = 2;
DataTypeVector dtypes(num_elements, DT_INT64);
std::unique_ptr<snapshot_util::Reader> reader;
TF_RETURN_IF_ERROR(snapshot_util::Reader::Create(
env, snapshot_path, compression, kTFRecordReader, dtypes, &reader));
return reader;
}
template <class T>
absl::StatusOr<std::vector<T>> ReadSnapshot(const std::string& snapshot_path,
const std::string& compression,
int64_t num_elements) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<snapshot_util::Reader> reader,
CreateSnapshotReader(snapshot_path, num_elements,
compression, Env::Default()));
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(reader->ReadTensors(&tensors));
std::vector<T> result;
for (const Tensor& tensor : tensors) {
result.push_back(tensor.unaligned_flat<T>().data()[0]);
}
return result;
}
absl::StatusOr<std::string> ReadStringFromFile(const std::string& filename) {
std::string data;
TF_RETURN_IF_ERROR(ReadFileToString(Env::Default(), filename, &data));
return data;
}
class SnapshotStreamWriterParameterizedTest
: public ::testing::TestWithParam<std::string> {
public:
std::string Compression() const { return GetParam(); }
};
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshot) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/snapshot_bytes_committed");
EXPECT_EQ(cell_reader.Delta(), 0);
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
EXPECT_THAT(
GetChildren(writer_params.UncommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(IsEmpty()));
EXPECT_GE(cell_reader.Delta(), 80);
}
TEST_P(SnapshotStreamWriterParameterizedTest, StreamAlreadyCompleted) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
TF_ASSERT_OK_AND_ASSIGN(iterator, TestIterator(testing::RangeDataset(range)));
SnapshotStreamWriter duplicate_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshotChunks) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(
GetChildren(writer_params.CommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(SizeIs(range)));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteDoneFile) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
TF_EXPECT_OK(Env::Default()->FileExists(done_file_path));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(snapshot_writer.Completed(), IsOkAndHolds(true));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteErrorFile) {
auto error_iterator = std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
absl::InvalidArgumentError("Invalid argument"),
tstring("Second element"), absl::AbortedError("Aborted")});
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params,
std::move(error_iterator));
EXPECT_THAT(snapshot_writer.Wait(),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
TF_EXPECT_OK(Env::Default()->FileExists(error_file_path));
EXPECT_THAT(ReadStringFromFile(error_file_path),
IsOkAndHolds(HasSubstr("Invalid argument")));
EXPECT_THAT(snapshot_writer.Completed(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(Compression, SnapshotStreamWriterParameterizedTest,
ValuesIn<std::string>({tsl::io::compression::kNone,
tsl::io::compression::kGzip,
tsl::io::compression::kSnappy,
tsl::io::compression::kZlib}));
TEST(SnapshotStreamWriterTest, EmptyDataset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(0)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path,
tsl::io::compression::kSnappy),
IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotStreamWriterTest, Cancel) {
const int64_t range = 10000;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
snapshot_writer.Cancel();
EXPECT_THAT(snapshot_writer.Wait(), StatusIs(absl::StatusCode::kCancelled));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_stream_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_stream_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f20f0ac8-614d-4be5-a30f-2b86a1eedae3 | cpp | tensorflow/tensorflow | snapshot_chunk_provider | tensorflow/core/data/service/snapshot/snapshot_chunk_provider.cc | tensorflow/core/data/service/snapshot/snapshot_chunk_provider_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kChunksRead[] = "chunks_read";
constexpr absl::string_view kSetElementDelimiter = ",";
Tensor ConvertToTensor(absl::string_view s) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tsl::tstring>()() = tsl::tstring(s);
return tensor;
}
std::string AbsPath(absl::string_view snapshot_path, absl::string_view chunk) {
return tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk);
}
void Backoff(int num_retries, tsl::Env* env) {
if (num_retries >= 1) {
absl::Duration retry_backoff = tsl::ComputeRetryBackoff(num_retries - 1);
env->SleepForMicroseconds(absl::ToInt64Microseconds(retry_backoff));
}
}
}
SnapshotChunkProvider::SnapshotChunkProvider(absl::string_view snapshot_path,
tsl::Env* env)
: snapshot_path_(snapshot_path), env_(env) {}
absl::Status SnapshotChunkProvider::GetNext(Tensor* split, bool* end_of_splits)
ABSL_LOCKS_EXCLUDED(mu_) {
for (int num_retries = 0;; ++num_retries) {
Backoff(num_retries, env_);
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(snapshot_state_.status);
if (!chunks_unread_.empty()) {
std::string next_chunk = *chunks_unread_.begin();
chunks_read_.insert(next_chunk);
chunks_unread_.erase(next_chunk);
*split = ConvertToTensor(AbsPath(snapshot_path_, next_chunk));
*end_of_splits = false;
return absl::OkStatus();
}
if (snapshot_state_.snapshot_is_done) {
*end_of_splits = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateSnapshot());
}
}
absl::Status SnapshotChunkProvider::UpdateSnapshot()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_ASSIGN_OR_RETURN(snapshot_state_, GetSnapshotState());
TF_RETURN_IF_ERROR(snapshot_state_.status);
TF_ASSIGN_OR_RETURN(std::vector<std::string> chunks, GetAvailableChunks());
for (const std::string& chunk : chunks) {
if (!chunks_read_.contains(chunk)) {
chunks_unread_.insert(std::string(chunk));
}
}
return absl::OkStatus();
}
absl::StatusOr<SnapshotChunkProvider::SnapshotState>
SnapshotChunkProvider::GetSnapshotState() {
std::string error_file_path = SnapshotErrorFilePath(snapshot_path_);
if (env_->FileExists(error_file_path).ok()) {
StatusProto status_proto;
TF_RETURN_IF_ERROR(ReadTextProto(env_, error_file_path, &status_proto));
absl::Status status = tsl::StatusFromProto(status_proto);
if (status.ok()) {
return absl::InternalError(absl::StrCat(
"Unexpected snapshot ERROR file contains an OK status at ",
error_file_path, "."));
}
return SnapshotState(status);
}
return SnapshotState(
env_->FileExists(SnapshotDoneFilePath(snapshot_path_)).ok());
}
absl::StatusOr<std::vector<std::string>>
SnapshotChunkProvider::GetAvailableChunks() {
absl::StatusOr<std::vector<std::string>> status_or_chunks =
GetChildren(CommittedChunksDirectory(snapshot_path_), env_);
if (status_or_chunks.ok()) {
return *std::move(status_or_chunks);
} else if (absl::IsNotFound(status_or_chunks.status())) {
return std::vector<std::string>{};
}
return status_or_chunks.status();
}
absl::Status SnapshotChunkProvider::Reset() {
absl::MutexLock l(&mu_);
chunks_read_.clear();
chunks_unread_.clear();
return UpdateSnapshot();
}
absl::Status SnapshotChunkProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kChunksRead), SetToString(chunks_read_)));
return absl::OkStatus();
}
absl::Status SnapshotChunkProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
absl::MutexLock l(&mu_);
tsl::tstring chunks_read;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kChunksRead), &chunks_read));
chunks_read_ = SetFromString(chunks_read);
return UpdateSnapshot();
}
int64_t SnapshotChunkProvider::Cardinality() const {
return SnapshotChunksCardinality(snapshot_path_, env_);
}
void SnapshotChunkProvider::Cancel() {
absl::MutexLock l(&mu_);
if (snapshot_state_.snapshot_is_done || !snapshot_state_.status.ok()) {
return;
}
snapshot_state_.status = absl::CancelledError(
absl::StrCat("Cancelled loading tf.data snapshot at ", snapshot_path_));
VLOG(2) << snapshot_state_.status;
}
std::string SnapshotChunkProvider::SetToString(
const SnapshotChunkProvider::OrderedChunkSet& s) {
return absl::StrJoin(s, kSetElementDelimiter);
}
SnapshotChunkProvider::OrderedChunkSet SnapshotChunkProvider::SetFromString(
absl::string_view s) {
if (s.empty()) {
return {};
}
std::vector<std::string> split = absl::StrSplit(s, kSetElementDelimiter);
return OrderedChunkSet(split.begin(), split.end());
}
bool SnapshotChunkProvider::ChunkOrder::operator()(
const std::string& chunk1, const std::string& chunk2) const {
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens1 =
ParseChunkFilename(chunk1);
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens2 =
ParseChunkFilename(chunk2);
if (!tokens1.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk1 << ": " << tokens1.status();
return chunk1 < chunk2;
}
if (!tokens2.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk2 << ": " << tokens2.status();
return chunk1 < chunk2;
}
auto [stream_index1, chunk_index1, num_records1] = *tokens1;
auto [stream_index2, chunk_index2, num_records2] = *tokens2;
if (chunk_index1 != chunk_index2) {
return chunk_index1 < chunk_index2;
}
return stream_index1 < stream_index2;
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!tsl::Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::Status WriteChunk(absl::string_view snapshot_path,
absl::string_view chunk_file) {
return AtomicallyWriteStringToFile(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk_file),
"", tsl::Env::Default());
}
absl::Status SetDone(absl::string_view snapshot_path) {
return AtomicallyWriteStringToFile(SnapshotDoneFilePath(snapshot_path), "",
tsl::Env::Default());
}
absl::Status SetStatus(absl::string_view snapshot_path,
const absl::Status& status) {
return AtomicallyWriteTextProto(SnapshotErrorFilePath(snapshot_path),
tsl::StatusToProto(status),
tsl::Env::Default());
}
absl::StatusOr<std::string> GetChunk(
SnapshotChunkProvider& snapshot_chunk_provider) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return absl::OutOfRangeError("No more available chunks.");
}
return split.unaligned_flat<tsl::tstring>().data()[0];
}
absl::StatusOr<std::vector<std::string>> GetAllChunks(
SnapshotChunkProvider& snapshot_chunk_provider) {
std::vector<std::string> chunks;
while (true) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return chunks;
}
chunks.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
return chunks;
}
std::vector<std::string> JoinPaths(absl::string_view snapshot_path,
const std::vector<std::string> chunks) {
std::vector<std::string> joined_chunks;
for (absl::string_view chunk : chunks) {
joined_chunks.push_back(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk));
}
return joined_chunks;
}
std::string full_name(const std::string& name) {
return FullName("test", name);
}
absl::Status SaveAndRestore(SplitProvider& split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider.Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider.Restore(full_name, &reader));
return absl::OkStatus();
}
TEST(SnapshotChunkProviderTest, EmptySnapshot) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotChunkProviderTest, SingleReader) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
absl::c_reverse(chunks);
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(JoinPaths(snapshot_path, chunks))));
}
TEST(SnapshotChunkProviderTest, Cardinality) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
std::vector<std::string> chunks = {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
TF_ASSERT_OK(SetDone(snapshot_path));
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), 5);
}
TEST(SnapshotChunkProviderTest, WaitForSnapshot) {
std::string snapshot_path;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&snapshot_path));
absl::Mutex mu;
std::vector<std::string> result;
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_path, &mu, &result]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> chunks,
GetAllChunks(snapshot_chunk_provider));
absl::MutexLock l(&mu);
result = std::move(chunks);
}));
{
absl::MutexLock l(&mu);
EXPECT_TRUE(result.empty());
}
TF_ASSERT_OK(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(SetDone(snapshot_path));
reader_thread.reset();
absl::MutexLock l(&mu);
EXPECT_THAT(result,
ElementsAreArray(JoinPaths(snapshot_path, {"chunk_0_0_0"})));
}
TEST(SnapshotChunkProviderTest, ConcurrentReadWrite) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
const int num_readers = 10;
absl::Mutex mu;
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::vector<std::string> result;
std::vector<std::unique_ptr<tsl::Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Reader_", i),
[&snapshot_chunk_provider, &mu, &result]() {
while (true) {
tsl::Env::Default()->SleepForMicroseconds(25);
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(
snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
break;
}
absl::MutexLock l(&mu);
result.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
})));
}
int num_streams = 10, num_chunks_per_stream = 50;
std::vector<std::unique_ptr<tsl::Thread>> stream_threads;
for (int i = 0; i < num_streams; ++i) {
stream_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Writer_", i),
[&snapshot_path, num_chunks_per_stream, i]() {
for (int j = 0; j < num_chunks_per_stream; ++j) {
std::string filename = absl::StrCat("chunk_", i, "_", j, "_1");
TF_ASSERT_OK(WriteChunk(snapshot_path, filename));
tsl::Env::Default()->SleepForMicroseconds(35);
}
})));
}
stream_threads.clear();
TF_ASSERT_OK(SetDone(snapshot_path));
reader_threads.clear();
std::vector<std::string> expected;
for (int i = 0; i < num_streams; ++i) {
for (int j = 0; j < num_chunks_per_stream; ++j) {
expected.push_back(absl::StrCat("chunk_", i, "_", j, "_1"));
}
}
EXPECT_THAT(result,
UnorderedElementsAreArray(JoinPaths(snapshot_path, expected)));
}
TEST(SnapshotChunkProviderTest, SaveRestore) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetChunk(snapshot_chunk_provider),
IsOkAndHolds(tsl::io::JoinPath(
CommittedChunksDirectory(snapshot_path), "chunk_0_0_0")));
TF_ASSERT_OK(SaveAndRestore(snapshot_chunk_provider));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(
JoinPaths(snapshot_path, {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"}))));
}
TEST(SnapshotChunkProviderTest, SnapshotError) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader", [&snapshot_path]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kFailedPrecondition, "Test error."));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
TF_ASSERT_OK(
SetStatus(snapshot_path, absl::FailedPreconditionError("Test error.")));
reader_thread.reset();
}
TEST(SnapshotChunkProviderTest, Cancel) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_chunk_provider]() {
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kCancelled,
HasSubstr("Cancelled loading tf.data snapshot at")));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
snapshot_chunk_provider.Cancel();
reader_thread.reset();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_chunk_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_chunk_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d16e7c62-0807-460f-8ba5-1385b18f29fc | cpp | tensorflow/tensorflow | parallel_tfrecord_writer | tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.cc | tensorflow/core/data/service/snapshot/parallel_tfrecord_writer_test.cc | #include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/snapshot/utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/random.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
ParallelTFRecordWriter::ParallelTFRecordWriter(const std::string& file_prefix,
const std::string& compression,
tsl::Env* env,
ByteSize max_file_size,
int64_t num_write_threads,
int64_t buffer_size)
: env_(env),
file_prefix_(file_prefix),
compression_(compression),
max_file_size_(max_file_size),
buffer_size_(buffer_size) {
thread_pool_ = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "write_tfrecord_thread", num_write_threads);
for (int64_t i = 0; i < num_write_threads; ++i) {
thread_pool_->Schedule([this]() { WriteFiles(); });
}
}
ParallelTFRecordWriter::~ParallelTFRecordWriter() {
absl::Status status = Finalize().status();
if (!status.ok()) {
LOG(ERROR) << "Parallel TFRecord writer failed with error: " << status;
}
}
absl::Status ParallelTFRecordWriter::Write(std::vector<Tensor> record)
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && !finalized_ && buffer_.size() >= buffer_size_) {
ready_to_push_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (finalized_) {
return absl::FailedPreconditionError(absl::StrCat(
"Trying to write a closed TFRecord file at ", file_prefix_, "."));
}
buffer_.push_back(std::move(record));
ready_to_pop_.Signal();
return absl::OkStatus();
}
absl::StatusOr<ParallelTFRecordWriter::FileToStatsMap>
ParallelTFRecordWriter::Finalize() ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
finalized_ = true;
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
thread_pool_.reset();
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(status_);
return file_stats_;
}
void ParallelTFRecordWriter::WriteFiles() {
while (HasNext()) {
UpdateStatus(WriteFile());
}
}
bool ParallelTFRecordWriter::HasNext() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
if (!status_.ok()) {
return false;
}
return !finalized_ || !buffer_.empty();
}
absl::Status ParallelTFRecordWriter::WriteFile() ABSL_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(const std::string filename, GetUniqueFile());
snapshot_util::TFRecordWriter writer(filename, compression_);
TF_RETURN_IF_ERROR(writer.Initialize(env_));
while (ShouldWriteFile(filename)) {
TF_RETURN_IF_ERROR(WriteRecord(filename, writer));
}
TF_RETURN_IF_ERROR(writer.Close());
return DeleteEmptyFile(filename);
}
bool ParallelTFRecordWriter::ShouldWriteFile(const std::string& filename) const
ABSL_LOCKS_EXCLUDED(mu_) {
if (!HasNext()) {
return false;
}
absl::MutexLock l(&mu_);
auto iterator = file_stats_.find(filename);
return iterator == file_stats_.end() ||
iterator->second.estimated_size < max_file_size_;
}
absl::Status ParallelTFRecordWriter::WriteRecord(
const std::string& filename, snapshot_util::TFRecordWriter& writer) {
TF_ASSIGN_OR_RETURN(std::optional<std::vector<Tensor>> record,
GetNextRecord(filename));
if (!record.has_value()) {
return absl::OkStatus();
}
tsl::profiler::TraceMe activity("WriteTFRecord",
tsl::profiler::TraceMeLevel::kInfo);
TF_RETURN_IF_ERROR(writer.WriteTensors(*std::move(record)));
return absl::OkStatus();
}
absl::StatusOr<std::optional<std::vector<Tensor>>>
ParallelTFRecordWriter::GetNextRecord(const std::string& filename)
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && !finalized_ && buffer_.empty()) {
ready_to_pop_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (buffer_.empty()) {
return std::nullopt;
}
std::vector<Tensor> record = std::move(buffer_.front());
ByteSize estimated_size = EstimatedSize(record);
LOG_EVERY_N_SEC(INFO, 1) << "Writing TFRecord of " << estimated_size
<< " to file " << filename << "*.";
++file_stats_[filename].num_records;
file_stats_[filename].estimated_size += estimated_size;
buffer_.pop_front();
ready_to_push_.SignalAll();
return record;
}
absl::Status ParallelTFRecordWriter::DeleteEmptyFile(
const std::string& filename) ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
auto iterator = file_stats_.find(filename);
if (iterator != file_stats_.end() && iterator->second.num_records > 0) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(env_->DeleteFile(filename));
if (iterator != file_stats_.end()) {
file_stats_.erase(iterator);
}
return absl::OkStatus();
}
absl::StatusOr<std::string> ParallelTFRecordWriter::GetUniqueFile() const {
std::string filename = absl::StrCat(file_prefix_, "__shard__",
absl::Hex(tsl::random::New64()), "_");
if (!env_->CreateUniqueFileName(&filename, ".tfrecord")) {
return absl::InternalError(
absl::StrCat("Failed to write file ", filename,
": Unable to open temporary files."));
}
return filename;
}
void ParallelTFRecordWriter::UpdateStatus(absl::Status status)
ABSL_LOCKS_EXCLUDED(mu_) {
if (status.ok()) {
return;
}
absl::MutexLock l(&mu_);
status_.Update(std::move(status));
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
}
} | #include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Each;
using ::testing::Eq;
using ::testing::Field;
using ::testing::Gt;
using ::testing::IsEmpty;
using ::testing::Le;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::string> TestDir() {
std::string test_dir;
if (!tsl::Env::Default()->LocalTempFilename(&test_dir)) {
return absl::FailedPreconditionError("Failed to create local temp file.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(test_dir));
return test_dir;
}
class RangeIterator {
public:
explicit RangeIterator(const int64_t range) : range_(range) {}
absl::Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) {
end_of_sequence = (next_ >= range_);
if (end_of_sequence) {
return absl::OkStatus();
}
element = {Tensor{next_++}};
return absl::OkStatus();
}
int64_t Cardinality() const { return range_; }
private:
const int64_t range_;
int64_t next_ = 0;
};
absl::StatusOr<ParallelTFRecordWriter::FileToStatsMap> WriteRecords(
ParallelTFRecordWriter& writer, RangeIterator& iterator,
bool finalize_writer = true) {
std::vector<Tensor> record;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator.GetNext(record, end_of_sequence));
while (!end_of_sequence) {
TF_RETURN_IF_ERROR(writer.Write(record));
TF_RETURN_IF_ERROR(iterator.GetNext(record, end_of_sequence));
}
if (finalize_writer) {
return writer.Finalize();
}
return ParallelTFRecordWriter::FileToStatsMap();
}
template <class T>
absl::StatusOr<std::vector<T>> ReadRecords(const std::string& filename,
const std::string& compression) {
snapshot_util::TFRecordReader reader(filename, compression,
DataTypeVector{DT_INT64});
TF_RETURN_IF_ERROR(reader.Initialize(tsl::Env::Default()));
std::vector<T> result;
while (true) {
std::vector<Tensor> record;
absl::Status status = reader.ReadTensors(&record);
if (absl::IsOutOfRange(status)) {
break;
}
TF_RETURN_IF_ERROR(status);
for (const Tensor& tensor : record) {
result.push_back(tensor.unaligned_flat<T>().data()[0]);
}
}
return result;
}
template <class T>
absl::StatusOr<std::vector<T>> ReadRecords(
const std::vector<std::string>& filenames, const std::string& compression) {
std::vector<T> result;
for (const std::string& filename : filenames) {
TF_ASSIGN_OR_RETURN(std::vector<T> records,
ReadRecords<T>(filename, compression));
absl::c_move(records, std::back_inserter(result));
}
return result;
}
std::vector<int64_t> Range(int64_t range) {
std::vector<int64_t> result(range);
std::iota(result.begin(), result.end(), 0);
return result;
}
std::vector<int64_t> Repeat(const std::vector<int64_t>& values,
int64_t repeat) {
std::vector<int64_t> result;
for (int64_t i = 0; i < repeat; ++i) {
absl::c_copy(values, std::back_inserter(result));
}
return result;
}
template <class K, class V>
std::pair<std::vector<K>, std::vector<V>> Unzip(
const absl::flat_hash_map<K, V>& m) {
std::vector<K> keys;
std::vector<V> values;
for (const auto& [k, v] : m) {
keys.push_back(k);
values.push_back(v);
}
return std::make_pair(keys, values);
}
class ParallelTFRecordWriterParamTest
: public ::testing::TestWithParam<std::tuple<
int64_t, int64_t, ByteSize, int64_t, int64_t, std::string>> {
protected:
int64_t NumElements() const { return std::get<0>(GetParam()); }
int64_t NumClients() const { return std::get<1>(GetParam()); }
ByteSize MaxFileSize() const { return std::get<2>(GetParam()); }
int64_t NumWriteThreads() const { return std::get<3>(GetParam()); }
int64_t BufferSize() const { return std::get<4>(GetParam()); }
std::string Compression() const { return std::get<5>(GetParam()); }
void VerifyFileStats(
const std::vector<ParallelTFRecordWriter::FileStats>& file_stats,
int64_t expected_num_elements) const {
auto add_num_elements = [](int64_t num_elements,
const ParallelTFRecordWriter::FileStats& stats) {
return num_elements + stats.num_records;
};
EXPECT_EQ(absl::c_accumulate(file_stats, 0, add_num_elements),
expected_num_elements);
EXPECT_THAT(
file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::num_records, Gt(0))));
EXPECT_THAT(file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::estimated_size,
Le(MaxFileSize() + ByteSize::Bytes(16)))));
if (MaxFileSize() <= ByteSize::Bytes(1)) {
EXPECT_THAT(
file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::num_records, Eq(1))));
EXPECT_THAT(file_stats, SizeIs(expected_num_elements));
}
if (MaxFileSize() >= ByteSize::GB(1)) {
EXPECT_THAT(file_stats, SizeIs(Le(NumWriteThreads())));
}
}
};
TEST_P(ParallelTFRecordWriterParamTest, WriteRecords) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, Compression(), tsl::Env::Default(), MaxFileSize(),
NumWriteThreads(), BufferSize());
RangeIterator range_iterator(NumElements());
TF_ASSERT_OK_AND_ASSIGN(
ParallelTFRecordWriter::FileToStatsMap file_stats,
WriteRecords(parallel_tfrecord_writer, range_iterator));
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, Compression()),
IsOkAndHolds(UnorderedElementsAreArray(Range(NumElements()))));
VerifyFileStats(stats, NumElements());
}
TEST_P(ParallelTFRecordWriterParamTest, ConcurrentWrites) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, Compression(), tsl::Env::Default(), MaxFileSize(),
NumWriteThreads(), BufferSize());
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[this, ¶llel_tfrecord_writer]() {
RangeIterator range_iterator(NumElements());
TF_ASSERT_OK(WriteRecords(parallel_tfrecord_writer, range_iterator,
false)
.status());
})));
}
client_threads.clear();
TF_ASSERT_OK_AND_ASSIGN(ParallelTFRecordWriter::FileToStatsMap file_stats,
parallel_tfrecord_writer.Finalize());
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, Compression()),
IsOkAndHolds(UnorderedElementsAreArray(
Repeat(Range(NumElements()), NumClients()))));
VerifyFileStats(stats, NumElements() * NumClients());
}
INSTANTIATE_TEST_SUITE_P(ParallelTFRecordWriterParams,
ParallelTFRecordWriterParamTest,
::testing::Combine(
::testing::Values(0, 1, 100),
::testing::Values(1, 5),
::testing::Values(ByteSize::Bytes(1),
ByteSize::Bytes(100),
ByteSize::GB(1)),
::testing::Values(1, 5),
::testing::Values(1, 10000),
::testing::Values(tsl::io::compression::kNone,
tsl::io::compression::kSnappy,
tsl::io::compression::kZlib)));
TEST(ParallelTFRecordWriterTest, WriteNoRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, tsl::io::compression::kNone, tsl::Env::Default());
TF_ASSERT_OK_AND_ASSIGN(ParallelTFRecordWriter::FileToStatsMap file_stats,
parallel_tfrecord_writer.Finalize());
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, tsl::io::compression::kNone),
IsOkAndHolds(IsEmpty()));
}
TEST(ParallelTFRecordWriterTest, CannotWriteFinalizedWriter) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
std::string file_prefix = "file";
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, tsl::io::compression::kNone, tsl::Env::Default());
std::unique_ptr<tsl::Thread> client_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Client",
[¶llel_tfrecord_writer]() {
RangeIterator range_iterator(std::numeric_limits<int64_t>::max());
EXPECT_THAT(WriteRecords(parallel_tfrecord_writer, range_iterator),
StatusIs(absl::StatusCode::kFailedPrecondition));
}));
parallel_tfrecord_writer.Finalize().status().IgnoreError();
client_thread.reset();
}
TEST(ParallelTFRecordWriterTest, DirectoryDoesNotExist) {
ParallelTFRecordWriter parallel_tfrecord_writer("/directory/does/not/exists",
tsl::io::compression::kNone,
tsl::Env::Default());
RangeIterator range_iterator(10);
std::vector<Tensor> element;
bool end_of_sequence = false;
TF_ASSERT_OK(range_iterator.GetNext(element, end_of_sequence));
parallel_tfrecord_writer.Write(element).IgnoreError();
EXPECT_THAT(parallel_tfrecord_writer.Finalize().status(),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/parallel_tfrecord_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a6b50088-4668-4bea-9a3a-97e531d48900 | cpp | tensorflow/tensorflow | data_service_client | tensorflow/core/data/service/client/data_service_client.cc | tensorflow/core/data/service/client/data_service_client_test.cc | #include "tensorflow/core/data/service/client/data_service_client.h"
#include <algorithm>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/ascii.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/client/validate_utils.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/worker_client.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
bool IsColocatedTask(const TaskInfo& task) {
return absl::c_any_of(task.worker_tags(), [](std::string_view worker_tag) {
return absl::AsciiStrToUpper(worker_tag) == kColocatedWorkerTag;
});
}
absl::StatusOr<DataTransferServerInfo> GetTransferServer(
const std::string& protocol, const TaskInfo& task_info) {
for (const auto& transfer_server : task_info.transfer_servers()) {
if (transfer_server.protocol() == protocol) {
return transfer_server;
}
}
return errors::NotFound("protocol ", protocol,
" is not available for worker ",
task_info.worker_address());
}
}
DataServiceClient::DataServiceClient(const DataServiceParams& params)
: params_(params),
max_outstanding_requests_(params.max_outstanding_requests) {}
DataServiceClient::~DataServiceClient() {
VLOG(2) << "Destroying data service client for iteration id "
<< iteration_client_id_;
task_thread_manager_.reset();
if (initialized_) {
Status s = dispatcher_->ReleaseIterationClient(iteration_client_id_);
if (!s.ok()) {
LOG(WARNING) << "Failed to release iteration client id: " << s;
}
}
for (auto& worker_thread : worker_threads_) {
worker_thread.reset();
}
DeleteLocalWorkerTasks();
VLOG(2) << "Destroyed data service dataset iterator for iteration id "
<< iteration_client_id_;
}
Status DataServiceClient::Initialize(
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator) {
accelerator_device_info_ = accelerator_device_info;
allocator_ = allocator;
TF_RETURN_IF_ERROR(ValidateDataServiceParams(params_));
VLOG(3) << "Connecting to " << params_.address
<< " in tf.data service client.";
dispatcher_ = std::make_unique<DataServiceDispatcherClient>(params_.address,
params_.protocol);
int64_t deadline_micros = kint64max;
std::optional<std::string> job_name;
if (!params_.job_name.empty()) {
job_name = params_.job_name;
}
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->GetOrCreateJob(
params_.dataset_id, params_.processing_mode, job_name,
params_.num_consumers,
params_.cross_trainer_cache_options.has_value(),
params_.target_workers, job_id_);
},
strings::StrCat("get or create job with dispatcher at ", params_.address),
deadline_micros));
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->GetOrCreateIteration(job_id_, params_.repetition,
iteration_client_id_);
},
strings::StrCat("get or create iteration with dispatcher at ",
params_.address),
deadline_micros));
initialized_ = true;
return absl::OkStatus();
}
absl::StatusOr<GetNextResult> DataServiceClient::GetNext(
DataServiceContextFactory context_factory) TF_LOCKS_EXCLUDED(mu_) {
VLOG(3) << "Getting the next element from tf.data service client.";
mutex_lock l(mu_);
if (ctx_ == nullptr) {
ctx_ = context_factory();
}
EnsureThreadsStarted();
std::shared_ptr<Result> result;
do {
while (!ResultReady() && !Finished() && !cancelled_ && status_.ok()) {
VLOG(3) << "Blocking in GetNext: " << DebugString();
get_next_cv_.wait(l);
}
if (cancelled_) {
VLOG(3) << "Returning from GetNext due to cancellation";
return errors::Cancelled("Data service iterator was cancelled");
}
if (!status_.ok()) {
VLOG(3) << "Returning from GetNext with error " << status_;
return status_;
}
if (results_.empty()) {
VLOG(3) << "Returning from GetNext with end_of_sequence";
return GetNextResult::EndOfSequence();
}
if (!ResultReady()) {
VLOG(3) << "Returning from GetNext with internal error";
return errors::Internal("Expected a result to be ready, but none were.");
}
result = PopNextResult();
worker_thread_cv_.notify_one();
if (result->skip) {
VLOG(3) << "Skipping result from task " << result->task_id;
}
} while (result->skip);
GetNextResult next;
next.end_of_sequence = result->end_of_sequence;
if (next.end_of_sequence) {
VLOG(1) << "Returning end_of_sequence";
return next;
}
VLOG(1) << "Returning the next element from data service dataset's "
<< "Iterator: task " << result->task_id << ", element "
<< result->element_index;
if (IsCoordinatedRead()) {
VLOG(1) << "Consumer " << *params_.consumer_index << ": Result "
<< get_next_index_++;
}
next.tensors.swap(result->element);
return next;
}
void DataServiceClient::Cancel() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
for (const auto& task : tasks_) {
task->worker->TryCancel();
}
cancelled_ = true;
worker_thread_cv_.notify_all();
manager_thread_cv_.notify_all();
get_next_cv_.notify_all();
}
TraceMeMetadata DataServiceClient::GetTraceMeMetadata() const {
TraceMeMetadata result;
int64_t num_tasks = -1;
int64_t autotuned_max_outstanding_requests = model::kAutotune;
if (mu_.try_lock()) {
num_tasks = tasks_.size() - finished_tasks_;
autotuned_max_outstanding_requests = max_outstanding_requests_;
mu_.unlock();
}
result.push_back(std::make_pair(
"num_tasks",
num_tasks == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(num_tasks))));
result.push_back(std::make_pair("job_name", params_.job_name));
result.push_back(std::make_pair(
"max_outstanding_requests",
strings::Printf(
"%lld", static_cast<long long>(params_.max_outstanding_requests))));
if (params_.max_outstanding_requests == model::kAutotune) {
result.push_back(std::make_pair(
"autotuned_max_outstanding_requests",
strings::Printf("%lld", static_cast<long long>(
autotuned_max_outstanding_requests))));
}
return result;
}
void DataServiceClient::EnsureThreadsStarted()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!task_thread_manager_ && !cancelled_) {
task_thread_manager_ = ctx_->StartThread("task-thread-manager",
[this]() { TaskThreadManager(); });
}
}
bool DataServiceClient::Finished() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return num_running_worker_threads_ == 0 && !ShouldWaitForNext();
}
bool DataServiceClient::ShouldWaitForNext() const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (should_finish_iteration_) {
return !iteration_finished_;
}
return tasks_.empty() || finished_tasks_ < tasks_.size();
}
void DataServiceClient::DeleteLocalWorkerTasks() TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::shared_ptr<Task>> tasks;
{
mutex_lock l(mu_);
tasks = tasks_;
}
for (const std::shared_ptr<Task>& task : tasks) {
std::shared_ptr<DataServiceWorkerImpl> worker =
LocalWorkers::Get(task->info.worker_address());
if (worker && ShouldDeleteLocalTask(task->info)) {
worker->DeleteLocalTask(task->info);
}
}
}
bool DataServiceClient::ShouldDeleteLocalTask(const TaskInfo& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (IsCoordinatedRead()) {
return false;
}
if (params_.target_workers == TARGET_WORKERS_LOCAL) {
return true;
}
return params_.target_workers == TARGET_WORKERS_AUTO && IsColocatedTask(task);
}
void DataServiceClient::TaskThreadManager() TF_LOCKS_EXCLUDED(mu_) {
auto cleanup =
gtl::MakeCleanup([] { VLOG(1) << "Task thread manager exiting"; });
VLOG(1) << "Starting task thread manager";
uint64 next_check = Env::Default()->NowMicros();
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && Env::Default()->NowMicros() < next_check) {
int64_t remaining_time = next_check - Env::Default()->NowMicros();
VLOG(4) << "Task thread manager waiting for " << remaining_time << "us";
manager_thread_cv_.wait_for(l,
std::chrono::microseconds(remaining_time));
}
if (cancelled_) {
VLOG(3) << "Task thread manager finished";
return;
}
}
Heartbeat();
UpdateBufferSize();
UpdateWorkerThreads();
next_check = Env::Default()->NowMicros() +
absl::ToInt64Microseconds(params_.task_refresh_interval);
}
}
void DataServiceClient::TryBlockRound(int64_t round)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (round_robin_round_limit_.has_value() &&
round_robin_round_limit_.value() == round) {
return;
}
if (current_round_ >= round) {
VLOG(1) << "Rejecting request to block round " << round
<< ", because processing has already begun for round "
<< current_round_;
return;
}
VLOG(1) << "Accepting request to block round " << round;
round_robin_round_limit_ = round;
}
void DataServiceClient::UpdateIterationFinished(bool iteration_finished)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!iteration_finished) {
return;
}
iteration_finished_ = true;
get_next_cv_.notify_all();
worker_thread_cv_.notify_all();
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateWorkerClient(const std::string& protocol,
const TaskInfo& task_info) {
TF_ASSIGN_OR_RETURN(DataTransferServerInfo transfer_server,
GetTransferServer(protocol, task_info));
return CreateDataServiceWorkerClient(params_.protocol, transfer_server,
accelerator_device_info_, allocator_);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateGrpcWorkerClient(const TaskInfo& task_info) {
return CreateWorkerClient(kGrpcTransferProtocol, task_info);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateAlternativeWorkerClientWithGrpcFallback(
const DataTransferServerInfo& transfer_server, const TaskInfo& task_info) {
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> worker =
CreateDataServiceWorkerClient(params_.protocol, transfer_server,
accelerator_device_info_, allocator_);
if (worker.ok()) {
LOG(INFO) << "Successfully started client for data transfer protocol '"
<< transfer_server.protocol() << "' for worker '"
<< task_info.worker_address() << "'.";
return worker;
}
LOG(INFO) << "Failed to start client for data transfer protocol '"
<< transfer_server.protocol() << "' for worker '"
<< task_info.worker_address() << "'; falling back to grpc. "
<< "Original error: " << worker.status();
metrics::RecordTFDataServiceDataTransferProtocolFallback(
transfer_server.protocol(),
static_cast<error::Code>(worker.status().raw_code()),
std::string(worker.status().message()));
return CreateGrpcWorkerClient(task_info);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateWorkerClient(const TaskInfo& task_info) {
if (params_.data_transfer_protocol == kLocalTransferProtocol ||
ForceLocalProtocol(task_info.worker_address())) {
DataTransferServerInfo info;
info.set_protocol(kLocalTransferProtocol);
info.set_address(task_info.worker_address());
return CreateDataServiceWorkerClient(params_.protocol, info,
accelerator_device_info_, allocator_);
}
if (!params_.data_transfer_protocol.empty()) {
TF_ASSIGN_OR_RETURN(
DataTransferServerInfo transfer_server,
GetTransferServer(params_.data_transfer_protocol, task_info));
return CreateAlternativeWorkerClientWithGrpcFallback(transfer_server,
task_info);
}
if (std::string default_protocol = DefaultDataTransferProtocol();
default_protocol != kGrpcTransferProtocol) {
absl::StatusOr<DataTransferServerInfo> transfer_server =
GetTransferServer(default_protocol, task_info);
if (transfer_server.ok()) {
return CreateAlternativeWorkerClientWithGrpcFallback(*transfer_server,
task_info);
}
VLOG(1) << "Failed to find transfer server for default data transfer "
"protocol '"
<< default_protocol << "' for worker '"
<< task_info.worker_address()
<< "'; falling back to grpc. Original error: "
<< transfer_server.status();
metrics::RecordTFDataServiceDataTransferProtocolFallback(
default_protocol, error::Code::NOT_FOUND,
"Failed to find transfer server for default protocol");
}
return CreateGrpcWorkerClient(task_info);
}
Status DataServiceClient::AddTask(const TaskInfo& task_info)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceWorkerClient> worker,
CreateWorkerClient(task_info));
metrics::RecordTFDataServiceDataTransferProtocolUsed(
worker->GetDataTransferProtocol(),
!params_.data_transfer_protocol.empty());
tasks_.push_back(std::make_shared<Task>(task_info, std::move(worker)));
worker_thread_cv_.notify_one();
if (IsCoordinatedRead()) {
VLOG(1) << "Consumer " << params_.consumer_index.value() << " adding task "
<< task_info.task_id() << " to read from worker "
<< task_info.worker_address()
<< ". Task starting round: " << task_info.starting_round();
DCHECK_LE(current_round_, task_info.starting_round());
if (current_round_ == task_info.starting_round()) {
DCHECK_EQ(next_task_index_, 0);
}
}
if (!IsCoordinatedRead()) {
std::mt19937 rng;
std::shuffle(tasks_.begin(), tasks_.end(), rng);
}
return absl::OkStatus();
}
void DataServiceClient::Heartbeat() TF_LOCKS_EXCLUDED(mu_) {
ClientHeartbeatRequest req;
req.set_iteration_client_id(iteration_client_id_);
if (IsCoordinatedRead()) {
mutex_lock l(mu_);
req.set_current_round(current_round_);
if (round_robin_round_limit_.has_value()) {
req.set_blocked_round(round_robin_round_limit_.value());
}
}
{
mutex_lock l(mu_);
double target_processing_time_nsec = ctx_->GetTargetProcessingTimeNsec();
req.set_target_processing_time_nsec(target_processing_time_nsec);
}
ClientHeartbeatResponse resp;
Status s = dispatcher_->ClientHeartbeat(req, resp);
if (!s.ok()) {
if (IsPreemptedError(s)) {
LOG(WARNING)
<< "Failed to heartbeat to dispatcher from iteration client id "
<< iteration_client_id_ << ". Dispatcher address: " << params_.address
<< ". Error: " << s;
return;
}
mutex_lock l(mu_);
status_ = s;
get_next_cv_.notify_all();
}
mutex_lock l(mu_);
UpdateIterationFinished(resp.iteration_finished());
if (resp.optional_block_round_case() ==
ClientHeartbeatResponse::kBlockRound) {
TryBlockRound(resp.block_round());
} else {
round_robin_round_limit_ = std::nullopt;
worker_thread_cv_.notify_all();
}
UpdateTasks(resp);
RecordTFMetrics(resp);
}
void DataServiceClient::UpdateTasks(const ClientHeartbeatResponse& resp)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::flat_hash_map<int64_t, TaskInfo> task_id_to_task;
for (auto& task : resp.task_info()) {
task_id_to_task[task.task_id()] = task;
}
if (iteration_finished_) {
return;
}
int index = 0;
while (index < tasks_.size()) {
std::shared_ptr<Task> task = tasks_[index];
if (task_id_to_task.contains(task->info.task_id())) {
task_id_to_task.erase(task->info.task_id());
++index;
} else {
if (task->end_of_sequence) {
finished_tasks_--;
}
tasks_.erase(tasks_.begin() + index);
if (index < next_task_index_) {
next_task_index_--;
}
if (!tasks_.empty() && next_task_index_ >= tasks_.size()) {
AdvanceTaskIndex();
}
}
}
for (auto& task : resp.task_info()) {
auto it = task_id_to_task.find(task.task_id());
if (it == task_id_to_task.end()) {
continue;
}
if (!ShouldReadFromTask(task)) {
VLOG(3) << "Skipping untargeted worker task " << task.task_id();
should_finish_iteration_ = false;
continue;
}
Status s = AddTask(it->second);
if (!s.ok()) {
status_ = s;
get_next_cv_.notify_all();
break;
}
}
}
bool DataServiceClient::ShouldReadFromTask(const TaskInfo& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (IsCoordinatedRead()) {
return true;
}
const bool is_local_task =
(LocalWorkers::Get(task.worker_address()) != nullptr);
if (params_.target_workers == TARGET_WORKERS_LOCAL && !is_local_task) {
return false;
}
const bool is_cross_tf_host_read = !is_local_task && IsColocatedTask(task);
if (params_.target_workers == TARGET_WORKERS_AUTO && is_cross_tf_host_read) {
return false;
}
return true;
}
void DataServiceClient::RecordTFMetrics(const ClientHeartbeatResponse& resp)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (const auto& task : resp.task_info()) {
if (worker_uids_.contains(task.worker_uid())) {
continue;
}
metrics::RecordTFDataServiceClientIterators(
task.worker_uid(), resp.deployment_mode(), params_.processing_mode,
IsCoordinatedRead());
worker_uids_.insert(task.worker_uid());
}
}
void DataServiceClient::UpdateBufferSize() TF_LOCKS_EXCLUDED(mu_) {
if (params_.max_outstanding_requests == model::kAutotune) {
mutex_lock l(mu_);
int64_t max_outstanding_requests = ctx_->UpdateMaxOutstandingRequests(
max_outstanding_requests_, tasks_.size());
if (max_outstanding_requests > max_outstanding_requests_) {
worker_thread_cv_.notify_all();
}
VLOG(3) << "Updated `max_outstanding_requests` from "
<< max_outstanding_requests_ << " to " << max_outstanding_requests
<< " with " << tasks_.size() << " tasks.";
max_outstanding_requests_ = max_outstanding_requests;
}
}
void DataServiceClient::UpdateWorkerThreads() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
const int64_t max_num_threads =
std::min<int64_t>(tasks_.size(), max_outstanding_requests_);
while (num_running_worker_threads_ < max_num_threads && !cancelled_ &&
status_.ok()) {
num_running_worker_threads_++;
auto done = [this]() {
mutex_lock l(mu_);
num_running_worker_threads_--;
get_next_cv_.notify_all();
};
worker_threads_.push_back(ctx_->StartThread(
"tf-data-service-task_thread", [this, done = std::move(done)]() {
RunWorkerThread(std::move(done));
}));
}
}
void DataServiceClient::RunWorkerThread(std::function<void()> done)
TF_LOCKS_EXCLUDED(mu_) {
auto cleanup = gtl::MakeCleanup([done = std::move(done)]() {
done();
VLOG(1) << "Worker thread exiting";
});
VLOG(1) << "Starting worker thread";
std::shared_ptr<Task> task_to_process;
int64_t num_consecutive_skipped = 0;
constexpr int64_t MAX_ROUND_FALLBACK_TO_BLOCKING = 5;
bool allow_skip = true;
while (true) {
std::shared_ptr<Result> result;
{
mutex_lock l(mu_);
if (task_to_process) {
task_to_process->in_use = false;
--outstanding_requests_;
task_to_process = nullptr;
worker_thread_cv_.notify_one();
}
while (true) {
if (cancelled_ || !ShouldWaitForNext()) {
return;
}
task_to_process = GetTaskToProcess();
if (task_to_process) {
VLOG(3) << "Selected a task to process: "
<< task_to_process->info.ShortDebugString();
break;
}
worker_thread_cv_.wait(l);
}
DCHECK(task_to_process != nullptr);
task_to_process->in_use = true;
++outstanding_requests_;
if (IsCoordinatedRead()) {
results_.push(std::make_shared<Result>());
ctx_->RecordBufferEnqueue(results_.back()->element);
result = results_.back();
} else {
result = std::make_shared<Result>();
}
VLOG(3) << "Processing task " << task_to_process->info.task_id();
}
int64_t deadline_micros = kint64max;
Status s = GetElementTraced(task_to_process.get(), deadline_micros,
!IsCoordinatedRead(),
allow_skip, result);
if (!s.ok()) {
mutex_lock l(mu_);
VLOG(1) << "Failed to get element from worker "
<< task_to_process->info.worker_address() << ": " << s;
task_to_process->in_use = false;
--outstanding_requests_;
status_ = errors::CreateWithUpdatedMessage(
s, absl::StrCat("Failed to get element from worker ",
task_to_process->info.worker_address(), ": ",
s.message()));
get_next_cv_.notify_all();
return;
}
if (!IsCoordinatedRead()) {
if (mutex_lock l(mu_); result->skip) {
num_consecutive_skipped++;
if (num_consecutive_skipped >=
MAX_ROUND_FALLBACK_TO_BLOCKING * tasks_.size()) {
allow_skip = false;
VLOG(1) << "`allow_skip` is turned off. Switching to blocking "
"get element calls to the workers.";
}
} else {
num_consecutive_skipped = 0;
allow_skip = true;
}
}
}
}
bool DataServiceClient::ShouldProcessTask() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (IsCoordinatedRead()) {
return results_.size() < max_outstanding_requests_;
}
return results_.size() + outstanding_requests_ < max_outstanding_requests_;
}
std::shared_ptr<DataServiceClient::Task> DataServiceClient::GetTaskToProcess()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!ShouldProcessTask()) {
return nullptr;
}
for (int i = 0; i < tasks_.size(); ++i) {
std::shared_ptr<Task>& task = tasks_[next_task_index_];
if (IsCoordinatedRead() &&
(task->in_use ||
current_round_ >= round_robin_round_limit_.value_or(
std::numeric_limits<int64_t>::max()))) {
VLOG(4) << "No round robin task found. in_use: " << task->in_use
<< ". current_round: " << current_round_
<< ". round_robin_round_limit: "
<< round_robin_round_limit_.value_or(-1);
return nullptr;
}
if (current_round_ < task->info.starting_round() || task->in_use ||
task->end_of_sequence || task->removed) {
VLOG(3) << "Skipping task " << next_task_index_
<< ". starting round: " << task->info.starting_round()
<< ". current round: " << current_round_
<< ". task->in_use: " << task->in_use
<< ". end_of_sequence: " << task->end_of_sequence
<< ". task->removed: " << task->removed;
AdvanceTaskIndex();
continue;
}
task->round = current_round_;
AdvanceTaskIndex();
return task;
}
return nullptr;
}
void DataServiceClient::AdvanceTaskIndex() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
next_task_index_++;
if (next_task_index_ >= tasks_.size()) {
current_round_++;
next_task_index_ = 0;
}
}
Status DataServiceClient::TryGetElement(const Task& task, bool allow_skip,
GetElementResult& result) {
GetElementRequest req;
req.set_task_id(task.info.task_id());
req.set_skipped_previous_round(task.skipped_previous_round);
if (IsCoordinatedRead()) {
req.set_consumer_index(params_.consumer_index.value());
req.set_round_index(task.round);
req.set_allow_skip(true);
} else {
req.set_allow_skip(allow_skip);
}
if (params_.cross_trainer_cache_options) {
req.set_trainer_id(params_.cross_trainer_cache_options->trainer_id());
}
return task.worker->GetElement(req, result);
}
void DataServiceClient::ProcessGetElementResponse(
bool enqueue_result, GetElementResult& get_element_result,
std::shared_ptr<Result> result, Task& task) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
result->ready = true;
result->end_of_sequence = get_element_result.end_of_sequence;
result->skip = get_element_result.skip;
if (!get_element_result.end_of_sequence && !get_element_result.skip) {
task.skipped_previous_round = false;
result->element = std::move(get_element_result.components);
result->element_index = get_element_result.element_index;
result->task_id = task.info.task_id();
} else if (get_element_result.skip) {
task.skipped_previous_round = true;
} else {
task.end_of_sequence = true;
finished_tasks_++;
}
if (enqueue_result && !result->end_of_sequence && !result->skip) {
ctx_->RecordBufferEnqueue(result->element);
results_.push(std::move(result));
}
get_next_cv_.notify_all();
}
Status DataServiceClient::GetElementTraced(Task* task, int64_t deadline_micros,
bool enqueue_result, bool allow_skip,
std::shared_ptr<Result> result)
TF_LOCKS_EXCLUDED(mu_) {
VLOG(3) << "Getting an element for task id " << task->info.task_id();
tsl::profiler::TraceMe activity("GetDataServiceElement",
tsl::profiler::TraceMeLevel::kInfo);
activity.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode(
{{"address", task->info.worker_address()}});
});
if (IsCoordinatedRead()) {
VLOG(3) << "Requesting element from consumer index "
<< params_.consumer_index.value() << ", round " << task->round;
activity.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode(
{{"consumer_index", params_.consumer_index.value()},
{"round_index", task->round}});
});
}
Status s =
GetElement(task, deadline_micros, enqueue_result, allow_skip, result);
mutex_lock l(mu_);
VLOG(3) << "Got an element for task id " << task->info.task_id();
return s;
}
Status DataServiceClient::MaybeRemoveTask(Task& task, int64_t deadline_micros,
Result& result)
TF_LOCKS_EXCLUDED(mu_) {
bool removed;
VLOG(1) << "Requesting task removal for worker " << task.info.worker_address()
<< " in round " << task.round;
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&] {
return dispatcher_->MaybeRemoveTask(task.info.task_id(),
params_.consumer_index.value(),
task.round, removed);
},
[&] {
mutex_lock l(mu_);
return !cancelled_;
},
"request task removal ", deadline_micros));
if (removed) {
mutex_lock l(mu_);
task.removed = true;
result.ready = true;
result.skip = true;
get_next_cv_.notify_all();
return absl::OkStatus();
}
VLOG(1) << "Failed to remove task for worker " << task.info.worker_address();
return absl::OkStatus();
}
Status DataServiceClient::GetElement(Task* task, int64_t deadline_micros,
bool enqueue_result, bool allow_skip,
std::shared_ptr<Result> result)
TF_LOCKS_EXCLUDED(mu_) {
GetElementResult get_element_result;
while (true) {
Status s = TryGetElement(*task, allow_skip, get_element_result);
if (s.ok()) {
task->num_retries = 0;
break;
}
if (!IsPreemptedError(s)) {
if (task->worker->GetDataTransferProtocol() == kGrpcTransferProtocol ||
task->worker->GetDataTransferProtocol() == kLocalTransferProtocol) {
return s;
}
LOG(ERROR) << "Failed to use alternative data transfer protocol '"
<< task->worker->GetDataTransferProtocol() << "' for worker '"
<< task->info.worker_address()
<< "'; falling back to grpc. Original error: " << s;
metrics::RecordTFDataServiceDataTransferProtocolError(
task->worker->GetDataTransferProtocol(),
static_cast<error::Code>(s.raw_code()), std::string(s.message()));
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceWorkerClient> worker,
CreateGrpcWorkerClient(task->info));
task->worker = std::move(worker);
continue;
}
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("DataServiceDataset iterator cancelled");
}
}
int64_t now_micros = Env::Default()->NowMicros();
if (now_micros > deadline_micros) {
return s;
}
if (IsCoordinatedRead() && task->num_retries > 0) {
TF_RETURN_IF_ERROR(MaybeRemoveTask(*task, deadline_micros, *result));
mutex_lock l(mu_);
if (result->skip) {
return absl::OkStatus();
}
}
int64_t backoff_until = std::min(
deadline_micros,
now_micros + absl::ToInt64Microseconds(
tsl::ComputeRetryBackoff(task->num_retries++)));
VLOG(1) << "Failed to get an element from worker "
<< task->info.worker_address() << ": " << s << ". Will retry in "
<< (backoff_until - now_micros) << " microseconds";
Env::Default()->SleepForMicroseconds(backoff_until - now_micros);
if (!IsCoordinatedRead()) {
mutex_lock l(mu_);
result->ready = true;
result->skip = true;
return absl::OkStatus();
}
}
ProcessGetElementResponse(enqueue_result, get_element_result, result, *task);
return absl::OkStatus();
}
bool DataServiceClient::ResultReady() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return !results_.empty() && results_.front()->ready;
}
std::shared_ptr<DataServiceClient::Result> DataServiceClient::PopNextResult()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<Result> result = results_.front();
results_.pop();
ctx_->RecordBufferDequeue(result->element);
return result;
}
bool DataServiceClient::IsCoordinatedRead() const {
return params_.num_consumers.has_value();
}
std::string DataServiceClient::DebugString() const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return absl::Substitute(
"results_ { size: $0 front.ready: $1 } iteration_finished_: $2 "
"tasks { size: $3 } finished_tasks_: $4 "
"num_running_worker_threads_: $5",
results_.size(), !results_.empty() && results_.front()->ready,
iteration_finished_, tasks_.size(), finished_tasks_,
num_running_worker_threads_);
}
}
} | #include "tensorflow/core/data/service/client/data_service_client.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::RangeDataset;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::UnorderedElementsAreArray;
DataServiceParams GetDataServiceParams(
const std::string& dataset_id, const std::string& data_service_address,
const ProcessingModeDef::ShardingPolicy sharding_policy) {
DataServiceParams params;
params.dataset_id = dataset_id;
params.processing_mode.set_sharding_policy(sharding_policy);
params.address = data_service_address;
params.protocol = "grpc";
params.data_transfer_protocol = "grpc";
params.job_name = "test_job";
params.repetition = 0;
params.max_outstanding_requests = 100;
params.task_refresh_interval = absl::Milliseconds(100);
return params;
}
std::vector<int64_t> Range(const int64_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
class TestDataServiceContext : public DataServiceContext {
public:
TestDataServiceContext() = default;
~TestDataServiceContext() override = default;
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
return absl::WrapUnique(
Env::Default()->StartThread({}, name, std::move(fn)));
}
MOCK_METHOD(void, RecordBufferEnqueue, (const std::vector<Tensor>& element),
(override));
MOCK_METHOD(void, RecordBufferDequeue, (const std::vector<Tensor>& element),
(override));
double GetTargetProcessingTimeNsec() const override { return 1.0e6; }
int64_t UpdateMaxOutstandingRequests(int64_t max_outstanding_requests,
int64_t new_size) override {
return new_size;
}
};
std::unique_ptr<TestDataServiceContext> GetTestDataServiceContext() {
return std::make_unique<TestDataServiceContext>();
}
template <class T>
StatusOr<std::vector<T>> GetResults(DataServiceClient& client) {
std::vector<T> results;
while (true) {
TF_ASSIGN_OR_RETURN(GetNextResult next,
client.GetNext(GetTestDataServiceContext));
if (next.end_of_sequence) {
return results;
}
results.push_back(next.tensors[0].unaligned_flat<T>().data()[0]);
}
return results;
}
template <class T>
StatusOr<T> GetNext(DataServiceClient& client) {
TF_ASSIGN_OR_RETURN(GetNextResult next,
client.GetNext(GetTestDataServiceContext));
if (next.end_of_sequence) {
return errors::OutOfRange(
"The tf.data service has reached the end of sequence");
}
return next.tensors[0].unaligned_flat<T>().data()[0];
}
TEST(DataServiceClientTest, NoSharding) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(ElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, DynamicSharding) {
TestCluster test_cluster(3);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::DYNAMIC);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(UnorderedElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, StaticSharding) {
TestCluster test_cluster(3);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> dataset_client(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
dataset_client.RegisterDataset(RangeDataset(10)));
DataServiceParams params =
GetDataServiceParams(dataset_id, test_cluster.DispatcherAddress(),
ProcessingModeDef::FILE_OR_DATA);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(UnorderedElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, RecordBufferEvents) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
auto mock_context = std::make_unique<TestDataServiceContext>();
TestDataServiceContext* ctx = mock_context.get();
EXPECT_CALL(*ctx, RecordBufferEnqueue(_)).Times(AtLeast(1));
EXPECT_CALL(*ctx, RecordBufferDequeue(_)).Times(AtLeast(1));
TF_ASSERT_OK_AND_ASSIGN(GetNextResult next, client.GetNext([&mock_context]() {
return std::move(mock_context);
}));
client.Cancel();
}
TEST(DataServiceClientTest, Cancel) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> dataset_client(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
dataset_client.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
client.Cancel();
EXPECT_THAT(client.GetNext(GetTestDataServiceContext),
StatusIs(error::CANCELLED));
}
TEST(DataServiceClientTest, ValidationError) {
DataServiceParams params = GetDataServiceParams(
"dataset_id", "tf_data_service_address", ProcessingModeDef::OFF);
params.target_workers = TARGET_WORKERS_LOCAL;
DataServiceClient client(params);
EXPECT_THAT(
client.Initialize(nullptr,
nullptr),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Local reads require local tf.data workers, but no local worker "
"is found.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/data_service_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/data_service_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
422bf0f2-be5f-4d65-a40c-dbfaaaace216 | cpp | tensorflow/tensorflow | message_wrappers | tensorflow/core/distributed_runtime/message_wrappers.cc | tensorflow/core/distributed_runtime/message_wrappers_test.cc | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/named_tensor.pb.h"
namespace tensorflow {
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(cpu_allocator(), tensor_proto)) {
*out_tensor = parsed;
return true;
}
}
return false;
}
const string& InMemoryRunStepRequest::session_handle() const {
return session_handle_;
}
void InMemoryRunStepRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
const string& InMemoryRunStepRequest::partial_run_handle() const {
return partial_run_handle_;
}
void InMemoryRunStepRequest::set_partial_run_handle(const string& handle) {
partial_run_handle_ = handle;
}
size_t InMemoryRunStepRequest::num_feeds() const { return feeds_.size(); }
const string& InMemoryRunStepRequest::feed_name(size_t i) const {
return feeds_[i].first;
}
Status InMemoryRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
*out_tensor = feeds_[i].second;
return absl::OkStatus();
}
Status InMemoryRunStepRequest::FeedValue(size_t i,
TensorProto* out_tensor) const {
feeds_[i].second.AsProtoTensorContent(out_tensor);
return absl::OkStatus();
}
void InMemoryRunStepRequest::add_feed(const string& name, const Tensor& value) {
feeds_.emplace_back(name, value);
}
size_t InMemoryRunStepRequest::num_fetches() const { return fetches_.size(); }
const string& InMemoryRunStepRequest::fetch_name(size_t i) const {
return fetches_[i];
}
void InMemoryRunStepRequest::add_fetch(const string& name) {
fetches_.push_back(name);
}
size_t InMemoryRunStepRequest::num_targets() const { return targets_.size(); }
const string& InMemoryRunStepRequest::target_name(size_t i) const {
return targets_[i];
}
void InMemoryRunStepRequest::add_target(const string& name) {
targets_.push_back(name);
}
const RunOptions& InMemoryRunStepRequest::options() const { return options_; }
RunOptions* InMemoryRunStepRequest::mutable_options() { return &options_; }
bool InMemoryRunStepRequest::store_errors_in_response_body() const {
return store_errors_in_response_body_;
}
int64_t InMemoryRunStepRequest::request_id() const {
return 0;
}
void InMemoryRunStepRequest::set_store_errors_in_response_body(
bool store_errors) {
store_errors_in_response_body_ = store_errors;
}
string InMemoryRunStepRequest::DebugString() const {
return ToProto().DebugString();
}
const RunStepRequest& InMemoryRunStepRequest::ToProto() const {
if (!proto_version_) {
proto_version_ = std::make_unique<RunStepRequest>();
proto_version_->set_session_handle(session_handle());
proto_version_->set_partial_run_handle(partial_run_handle());
for (size_t i = 0; i < num_feeds(); ++i) {
auto feed = proto_version_->add_feed();
feed->set_name(feed_name(i));
feeds_[i].second.AsProtoTensorContent(feed->mutable_tensor());
}
for (size_t i = 0; i < num_fetches(); ++i) {
proto_version_->add_fetch(fetch_name(i));
}
for (size_t i = 0; i < num_targets(); ++i) {
proto_version_->add_target(target_name(i));
}
*proto_version_->mutable_options() = options();
}
return *proto_version_;
}
const string& MutableProtoRunStepRequest::session_handle() const {
return request_.session_handle();
}
void MutableProtoRunStepRequest::set_session_handle(const string& handle) {
request_.set_session_handle(handle);
}
const string& MutableProtoRunStepRequest::partial_run_handle() const {
return request_.partial_run_handle();
}
void MutableProtoRunStepRequest::set_partial_run_handle(const string& handle) {
request_.set_partial_run_handle(handle);
}
size_t MutableProtoRunStepRequest::num_feeds() const {
return request_.feed_size();
}
const string& MutableProtoRunStepRequest::feed_name(size_t i) const {
return request_.feed(i).name();
}
Status MutableProtoRunStepRequest::FeedValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_.feed(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status MutableProtoRunStepRequest::FeedValue(size_t i,
TensorProto* out_tensor) const {
*out_tensor = request_.feed(i).tensor();
return absl::OkStatus();
}
void MutableProtoRunStepRequest::add_feed(const string& name,
const Tensor& value) {
NamedTensorProto* feed = request_.add_feed();
feed->set_name(name);
TensorProto* value_proto = feed->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
size_t MutableProtoRunStepRequest::num_fetches() const {
return request_.fetch_size();
}
const string& MutableProtoRunStepRequest::fetch_name(size_t i) const {
return request_.fetch(i);
}
void MutableProtoRunStepRequest::add_fetch(const string& name) {
request_.add_fetch(name);
}
size_t MutableProtoRunStepRequest::num_targets() const {
return request_.target_size();
}
const string& MutableProtoRunStepRequest::target_name(size_t i) const {
return request_.target(i);
}
void MutableProtoRunStepRequest::add_target(const string& name) {
request_.add_target(name);
}
const RunOptions& MutableProtoRunStepRequest::options() const {
return request_.options();
}
RunOptions* MutableProtoRunStepRequest::mutable_options() {
return request_.mutable_options();
}
bool MutableProtoRunStepRequest::store_errors_in_response_body() const {
return request_.store_errors_in_response_body();
}
void MutableProtoRunStepRequest::set_store_errors_in_response_body(
bool store_errors) {
request_.set_store_errors_in_response_body(store_errors);
}
int64_t MutableProtoRunStepRequest::request_id() const {
return request_.request_id();
}
string MutableProtoRunStepRequest::DebugString() const {
return request_.DebugString();
}
const RunStepRequest& MutableProtoRunStepRequest::ToProto() const {
return request_;
}
ProtoRunStepRequest::ProtoRunStepRequest(const RunStepRequest* request)
: request_(request) {}
const string& ProtoRunStepRequest::session_handle() const {
return request_->session_handle();
}
const string& ProtoRunStepRequest::partial_run_handle() const {
return request_->partial_run_handle();
}
size_t ProtoRunStepRequest::num_feeds() const { return request_->feed_size(); }
const string& ProtoRunStepRequest::feed_name(size_t i) const {
return request_->feed(i).name();
}
Status ProtoRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_->feed(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status ProtoRunStepRequest::FeedValue(size_t i, TensorProto* out_tensor) const {
*out_tensor = request_->feed(i).tensor();
return absl::OkStatus();
}
size_t ProtoRunStepRequest::num_fetches() const {
return request_->fetch_size();
}
const string& ProtoRunStepRequest::fetch_name(size_t i) const {
return request_->fetch(i);
}
size_t ProtoRunStepRequest::num_targets() const {
return request_->target_size();
}
const string& ProtoRunStepRequest::target_name(size_t i) const {
return request_->target(i);
}
const RunOptions& ProtoRunStepRequest::options() const {
return request_->options();
}
bool ProtoRunStepRequest::store_errors_in_response_body() const {
return request_->store_errors_in_response_body();
}
int64_t ProtoRunStepRequest::request_id() const {
return request_->request_id();
}
string ProtoRunStepRequest::DebugString() const {
return request_->DebugString();
}
const RunStepRequest& ProtoRunStepRequest::ToProto() const { return *request_; }
const string& InMemoryRunGraphRequest::session_handle() const {
return session_handle_;
}
bool InMemoryRunGraphRequest::create_worker_session_called() const {
return create_worker_session_called_;
}
void InMemoryRunGraphRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
void InMemoryRunGraphRequest::set_create_worker_session_called(bool called) {
create_worker_session_called_ = called;
}
const string& InMemoryRunGraphRequest::graph_handle() const {
return graph_handle_;
}
void InMemoryRunGraphRequest::set_graph_handle(const string& handle) {
graph_handle_ = handle;
}
int64_t InMemoryRunGraphRequest::step_id() const { return step_id_; }
void InMemoryRunGraphRequest::set_step_id(int64_t step_id) {
step_id_ = step_id;
}
const ExecutorOpts& InMemoryRunGraphRequest::exec_opts() const {
return exec_opts_;
}
ExecutorOpts* InMemoryRunGraphRequest::mutable_exec_opts() {
return &exec_opts_;
}
size_t InMemoryRunGraphRequest::num_sends() const { return sends_.size(); }
const string& InMemoryRunGraphRequest::send_key(size_t i) const {
return sends_[i].first;
}
Status InMemoryRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
*out_tensor = sends_[i].second;
return absl::OkStatus();
}
Status InMemoryRunGraphRequest::AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) {
Tensor tensor;
TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, &tensor));
sends_.emplace_back(send_key, std::move(tensor));
return absl::OkStatus();
}
Status InMemoryRunGraphRequest::AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) {
Tensor tensor;
if (!ParseTensorProtoToTensor(run_callable_request.feed(i), &tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
}
sends_.emplace_back(send_key, std::move(tensor));
return absl::OkStatus();
}
size_t InMemoryRunGraphRequest::num_recvs() const { return recvs_.size(); }
const string& InMemoryRunGraphRequest::recv_key(size_t i) const {
return recvs_[i];
}
void InMemoryRunGraphRequest::add_recv_key(const string& recv_key) {
recvs_.push_back(recv_key);
}
bool InMemoryRunGraphRequest::is_partial() const { return is_partial_; }
void InMemoryRunGraphRequest::set_is_partial(bool is_partial) {
is_partial_ = is_partial;
}
bool InMemoryRunGraphRequest::is_last_partial_run() const {
return is_last_partial_run_;
}
void InMemoryRunGraphRequest::set_is_last_partial_run(
bool is_last_partial_run) {
is_last_partial_run_ = is_last_partial_run;
}
bool InMemoryRunGraphRequest::store_errors_in_response_body() const {
return store_errors_in_response_body_;
}
void InMemoryRunGraphRequest::set_store_errors_in_response_body(
bool store_errors) {
store_errors_in_response_body_ = store_errors;
}
int64_t InMemoryRunGraphRequest::request_id() const { return request_id_; }
void InMemoryRunGraphRequest::set_request_id(int64_t request_id) {
request_id_ = request_id;
}
const RunGraphRequest& InMemoryRunGraphRequest::ToProto() const {
if (!proto_version_) {
proto_version_ = std::make_unique<RunGraphRequest>();
proto_version_->set_session_handle(session_handle());
proto_version_->set_create_worker_session_called(
create_worker_session_called());
proto_version_->set_graph_handle(graph_handle());
proto_version_->set_step_id(step_id());
*proto_version_->mutable_exec_opts() = exec_opts();
for (size_t i = 0; i < num_sends(); ++i) {
auto send = proto_version_->add_send();
send->set_name(send_key(i));
sends_[i].second.AsProtoTensorContent(send->mutable_tensor());
}
for (size_t i = 0; i < num_recvs(); ++i) {
proto_version_->add_recv_key(recv_key(i));
}
proto_version_->set_is_partial(is_partial());
proto_version_->set_is_last_partial_run(is_last_partial_run());
}
proto_version_->set_store_errors_in_response_body(
store_errors_in_response_body_);
proto_version_->set_request_id(request_id_);
return *proto_version_;
}
const string& MutableProtoRunGraphRequest::session_handle() const {
return request_.session_handle();
}
void MutableProtoRunGraphRequest::set_session_handle(const string& handle) {
request_.set_session_handle(handle);
}
bool MutableProtoRunGraphRequest::create_worker_session_called() const {
return request_.create_worker_session_called();
}
void MutableProtoRunGraphRequest::set_create_worker_session_called(
bool called) {
request_.set_create_worker_session_called(called);
}
const string& MutableProtoRunGraphRequest::graph_handle() const {
return request_.graph_handle();
}
void MutableProtoRunGraphRequest::set_graph_handle(const string& handle) {
request_.set_graph_handle(handle);
}
int64_t MutableProtoRunGraphRequest::step_id() const {
return request_.step_id();
}
void MutableProtoRunGraphRequest::set_step_id(int64_t step_id) {
request_.set_step_id(step_id);
}
const ExecutorOpts& MutableProtoRunGraphRequest::exec_opts() const {
return request_.exec_opts();
}
ExecutorOpts* MutableProtoRunGraphRequest::mutable_exec_opts() {
return request_.mutable_exec_opts();
}
size_t MutableProtoRunGraphRequest::num_sends() const {
return request_.send_size();
}
const string& MutableProtoRunGraphRequest::send_key(size_t i) const {
return request_.send(i).name();
}
Status MutableProtoRunGraphRequest::SendValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_.send(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status MutableProtoRunGraphRequest::AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) {
NamedTensorProto* send = request_.add_send();
send->set_name(send_key);
TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, send->mutable_tensor()));
return absl::OkStatus();
}
Status MutableProtoRunGraphRequest::AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) {
NamedTensorProto* send = request_.add_send();
send->set_name(send_key);
*send->mutable_tensor() = run_callable_request.feed(i);
return absl::OkStatus();
}
size_t MutableProtoRunGraphRequest::num_recvs() const {
return request_.recv_key_size();
}
const string& MutableProtoRunGraphRequest::recv_key(size_t i) const {
return request_.recv_key(i);
}
void MutableProtoRunGraphRequest::add_recv_key(const string& recv_key) {
request_.add_recv_key(recv_key);
}
bool MutableProtoRunGraphRequest::is_partial() const {
return request_.is_partial();
}
void MutableProtoRunGraphRequest::set_is_partial(bool is_partial) {
request_.set_is_partial(is_partial);
}
bool MutableProtoRunGraphRequest::is_last_partial_run() const {
return request_.is_last_partial_run();
}
void MutableProtoRunGraphRequest::set_is_last_partial_run(
bool is_last_partial_run) {
request_.set_is_last_partial_run(is_last_partial_run);
}
bool MutableProtoRunGraphRequest::store_errors_in_response_body() const {
return request_.store_errors_in_response_body();
}
void MutableProtoRunGraphRequest::set_store_errors_in_response_body(
bool store_errors) {
request_.set_store_errors_in_response_body(store_errors);
}
int64_t MutableProtoRunGraphRequest::request_id() const {
return request_.request_id();
}
void MutableProtoRunGraphRequest::set_request_id(int64_t request_id) {
request_.set_request_id(request_id);
}
const RunGraphRequest& MutableProtoRunGraphRequest::ToProto() const {
return request_;
}
ProtoRunGraphRequest::ProtoRunGraphRequest(const RunGraphRequest* request)
: request_(request) {}
const string& ProtoRunGraphRequest::session_handle() const {
return request_->session_handle();
}
bool ProtoRunGraphRequest::create_worker_session_called() const {
return request_->create_worker_session_called();
}
const string& ProtoRunGraphRequest::graph_handle() const {
return request_->graph_handle();
}
int64_t ProtoRunGraphRequest::step_id() const { return request_->step_id(); }
const ExecutorOpts& ProtoRunGraphRequest::exec_opts() const {
return request_->exec_opts();
}
size_t ProtoRunGraphRequest::num_sends() const { return request_->send_size(); }
const string& ProtoRunGraphRequest::send_key(size_t i) const {
return request_->send(i).name();
}
Status ProtoRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_->send(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
size_t ProtoRunGraphRequest::num_recvs() const {
return request_->recv_key_size();
}
const string& ProtoRunGraphRequest::recv_key(size_t i) const {
return request_->recv_key(i);
}
bool ProtoRunGraphRequest::is_partial() const { return request_->is_partial(); }
bool ProtoRunGraphRequest::is_last_partial_run() const {
return request_->is_last_partial_run();
}
bool ProtoRunGraphRequest::store_errors_in_response_body() const {
return request_->store_errors_in_response_body();
}
int64_t ProtoRunGraphRequest::request_id() const {
return request_->request_id();
}
const RunGraphRequest& ProtoRunGraphRequest::ToProto() const {
return *request_;
}
size_t InMemoryRunGraphResponse::num_recvs() const { return recvs_.size(); }
const string& InMemoryRunGraphResponse::recv_key(size_t i) const {
return recvs_[i].first;
}
Status InMemoryRunGraphResponse::RecvValue(size_t i, TensorProto* out_tensor) {
recvs_[i].second.AsProtoTensorContent(out_tensor);
return absl::OkStatus();
}
Status InMemoryRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
*out_tensor = recvs_[i].second;
return absl::OkStatus();
}
void InMemoryRunGraphResponse::AddRecv(const string& key, const Tensor& value) {
recvs_.emplace_back(key, value);
}
StepStats* InMemoryRunGraphResponse::mutable_step_stats() {
return &step_stats_;
}
CostGraphDef* InMemoryRunGraphResponse::mutable_cost_graph() {
return &cost_graph_;
}
Status InMemoryRunGraphResponse::status() const { return status_; }
errors::Code InMemoryRunGraphResponse::status_code() const {
return static_cast<errors::Code>(status_.code());
}
void InMemoryRunGraphResponse::set_status(const Status& status) {
status_ = status;
}
RunGraphResponse* InMemoryRunGraphResponse::get_proto() {
LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunGraphResponse";
return nullptr;
}
size_t InMemoryRunGraphResponse::num_partition_graphs() const {
return partition_graphs_.size();
}
GraphDef* InMemoryRunGraphResponse::mutable_partition_graph(size_t i) {
return &partition_graphs_[i];
}
void InMemoryRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
partition_graphs_.push_back(partition_graph);
}
size_t OwnedProtoRunGraphResponse::num_recvs() const {
return response_.recv_size();
}
const string& OwnedProtoRunGraphResponse::recv_key(size_t i) const {
return response_.recv(i).name();
}
Status OwnedProtoRunGraphResponse::RecvValue(size_t i,
TensorProto* out_tensor) {
out_tensor->Swap(response_.mutable_recv(i)->mutable_tensor());
return absl::OkStatus();
}
Status OwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
if (!ParseTensorProtoToTensor(response_.recv(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
} else {
return absl::OkStatus();
}
}
void OwnedProtoRunGraphResponse::AddRecv(const string& key,
const Tensor& value) {
NamedTensorProto* recv = response_.add_recv();
recv->set_name(key);
TensorProto* value_proto = recv->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
StepStats* OwnedProtoRunGraphResponse::mutable_step_stats() {
return response_.mutable_step_stats();
}
CostGraphDef* OwnedProtoRunGraphResponse::mutable_cost_graph() {
return response_.mutable_cost_graph();
}
Status OwnedProtoRunGraphResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_.status_code()),
response_.status_error_message());
}
absl::StatusCode OwnedProtoRunGraphResponse::status_code() const {
return static_cast<absl::StatusCode>(response_.status_code());
}
void OwnedProtoRunGraphResponse::set_status(const Status& status) {
response_.set_status_code(static_cast<tsl::error::Code>(status.code()));
response_.set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunGraphResponse* OwnedProtoRunGraphResponse::get_proto() { return &response_; }
size_t OwnedProtoRunGraphResponse::num_partition_graphs() const {
return response_.partition_graph_size();
}
GraphDef* OwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
return response_.mutable_partition_graph(i);
}
void OwnedProtoRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
GraphDef* graph_def = response_.mutable_partition_graph()->Add();
*graph_def = partition_graph;
}
NonOwnedProtoRunGraphResponse::NonOwnedProtoRunGraphResponse(
RunGraphResponse* response)
: response_(response) {}
size_t NonOwnedProtoRunGraphResponse::num_recvs() const {
return response_->recv_size();
}
const string& NonOwnedProtoRunGraphResponse::recv_key(size_t i) const {
return response_->recv(i).name();
}
Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i,
TensorProto* out_tensor) {
out_tensor->Swap(response_->mutable_recv(i)->mutable_tensor());
return absl::OkStatus();
}
Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
if (!ParseTensorProtoToTensor(response_->recv(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
} else {
return absl::OkStatus();
}
}
void NonOwnedProtoRunGraphResponse::AddRecv(const string& key,
const Tensor& value) {
NamedTensorProto* recv = response_->add_recv();
recv->set_name(key);
TensorProto* value_proto = recv->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
StepStats* NonOwnedProtoRunGraphResponse::mutable_step_stats() {
return response_->mutable_step_stats();
}
CostGraphDef* NonOwnedProtoRunGraphResponse::mutable_cost_graph() {
return response_->mutable_cost_graph();
}
Status NonOwnedProtoRunGraphResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_->status_code()),
response_->status_error_message());
}
absl::StatusCode NonOwnedProtoRunGraphResponse::status_code() const {
return static_cast<absl::StatusCode>(response_->status_code());
}
void NonOwnedProtoRunGraphResponse::set_status(const Status& status) {
response_->set_status_code(static_cast<tsl::error::Code>(status.code()));
response_->set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunGraphResponse* NonOwnedProtoRunGraphResponse::get_proto() {
return response_;
}
size_t NonOwnedProtoRunGraphResponse::num_partition_graphs() const {
return response_->partition_graph_size();
}
GraphDef* NonOwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
return response_->mutable_partition_graph(i);
}
void NonOwnedProtoRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
GraphDef* graph_def = response_->add_partition_graph();
*graph_def = partition_graph;
}
MutableRunStepResponseWrapper::~MutableRunStepResponseWrapper() {}
size_t InMemoryRunStepResponse::num_tensors() const { return tensors_.size(); }
const string& InMemoryRunStepResponse::tensor_name(size_t i) const {
return tensors_[i].first;
}
Status InMemoryRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
*out_tensor = tensors_[i].second;
return absl::OkStatus();
}
const RunMetadata& InMemoryRunStepResponse::metadata() const {
return metadata_;
}
Status InMemoryRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* wrapper, size_t i) {
Tensor tensor;
TF_RETURN_IF_ERROR(wrapper->RecvValue(i, &tensor));
tensors_.emplace_back(name, tensor);
return absl::OkStatus();
}
RunMetadata* InMemoryRunStepResponse::mutable_metadata() { return &metadata_; }
Status InMemoryRunStepResponse::status() const { return status_; }
errors::Code InMemoryRunStepResponse::status_code() const {
return static_cast<errors::Code>(status_.code());
}
void InMemoryRunStepResponse::set_status(const Status& status) {
status_ = status;
}
RunStepResponse* InMemoryRunStepResponse::get_proto() {
LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunStepResponse";
return nullptr;
}
size_t OwnedProtoRunStepResponse::num_tensors() const {
return response_.tensor_size();
}
const string& OwnedProtoRunStepResponse::tensor_name(size_t i) const {
return response_.tensor(i).name();
}
Status OwnedProtoRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(response_.tensor(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
} else {
return absl::OkStatus();
}
}
const RunMetadata& OwnedProtoRunStepResponse::metadata() const {
return response_.metadata();
}
Status OwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) {
NamedTensorProto* response_tensor = response_.add_tensor();
response_tensor->set_name(name);
return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
}
RunMetadata* OwnedProtoRunStepResponse::mutable_metadata() {
return response_.mutable_metadata();
}
Status OwnedProtoRunStepResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_.status_code()),
response_.status_error_message());
}
absl::StatusCode OwnedProtoRunStepResponse::status_code() const {
return static_cast<absl::StatusCode>(response_.status_code());
}
void OwnedProtoRunStepResponse::set_status(const Status& status) {
response_.set_status_code(static_cast<tsl::error::Code>(status.code()));
response_.set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunStepResponse* OwnedProtoRunStepResponse::get_proto() { return &response_; }
NonOwnedProtoRunStepResponse::NonOwnedProtoRunStepResponse(
RunStepResponse* response)
: response_(response) {}
size_t NonOwnedProtoRunStepResponse::num_tensors() const {
return response_->tensor_size();
}
const string& NonOwnedProtoRunStepResponse::tensor_name(size_t i) const {
return response_->tensor(i).name();
}
Status NonOwnedProtoRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(response_->tensor(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
} else {
return absl::OkStatus();
}
}
const RunMetadata& NonOwnedProtoRunStepResponse::metadata() const {
return response_->metadata();
}
Status NonOwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) {
NamedTensorProto* response_tensor = response_->add_tensor();
response_tensor->set_name(name);
return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
}
RunMetadata* NonOwnedProtoRunStepResponse::mutable_metadata() {
return response_->mutable_metadata();
}
Status NonOwnedProtoRunStepResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_->status_code()),
response_->status_error_message());
}
absl::StatusCode NonOwnedProtoRunStepResponse::status_code() const {
return static_cast<absl::StatusCode>(response_->status_code());
}
void NonOwnedProtoRunStepResponse::set_status(const Status& status) {
response_->set_status_code(static_cast<tsl::error::Code>(status.code()));
response_->set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunStepResponse* NonOwnedProtoRunStepResponse::get_proto() { return response_; }
} | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace {
Tensor TensorA() {
Tensor a_tensor(DT_INT32, TensorShape({2, 2}));
test::FillValues<int32>(&a_tensor, {3, 2, -1, 0});
return a_tensor;
}
Tensor TensorB() {
Tensor b_tensor(DT_INT32, TensorShape({1, 2}));
test::FillValues<int32>(&b_tensor, {1, 2});
return b_tensor;
}
void BuildRunStepRequest(MutableRunStepRequestWrapper* request) {
request->set_session_handle("handle");
request->set_partial_run_handle("partial_handle");
request->add_feed("feed_a:0", TensorA());
request->add_feed("feed_b:0", TensorB());
request->add_fetch("fetch_x:0");
request->add_fetch("fetch_y:0");
request->add_target("target_i");
request->add_target("target_j");
request->mutable_options()->set_timeout_in_ms(37);
}
void CheckRunStepRequest(const RunStepRequestWrapper& request) {
EXPECT_EQ("handle", request.session_handle());
EXPECT_EQ("partial_handle", request.partial_run_handle());
EXPECT_EQ(2, request.num_feeds());
EXPECT_EQ("feed_a:0", request.feed_name(0));
EXPECT_EQ("feed_b:0", request.feed_name(1));
Tensor val;
TF_EXPECT_OK(request.FeedValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.FeedValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(2, request.num_fetches());
EXPECT_EQ("fetch_x:0", request.fetch_name(0));
EXPECT_EQ("fetch_y:0", request.fetch_name(1));
EXPECT_EQ("target_i", request.target_name(0));
EXPECT_EQ("target_j", request.target_name(1));
EXPECT_EQ(37, request.options().timeout_in_ms());
}
void BuildRunGraphRequest(const RunStepRequestWrapper& run_step_request,
MutableRunGraphRequestWrapper* run_graph_request) {
run_graph_request->set_graph_handle("graph_handle");
run_graph_request->set_step_id(13);
run_graph_request->mutable_exec_opts()->set_record_timeline(true);
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 0,
"send_0"));
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 1,
"send_1"));
run_graph_request->add_recv_key("recv_2");
run_graph_request->add_recv_key("recv_3");
run_graph_request->set_is_partial(true);
}
void CheckRunGraphRequest(const RunGraphRequestWrapper& request) {
EXPECT_EQ("graph_handle", request.graph_handle());
EXPECT_EQ(13, request.step_id());
EXPECT_FALSE(request.exec_opts().record_costs());
EXPECT_TRUE(request.exec_opts().record_timeline());
EXPECT_FALSE(request.exec_opts().record_partition_graphs());
EXPECT_EQ(2, request.num_sends());
Tensor val;
TF_EXPECT_OK(request.SendValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.SendValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_TRUE(request.is_partial());
EXPECT_FALSE(request.is_last_partial_run());
}
void BuildRunGraphResponse(MutableRunGraphResponseWrapper* run_graph_response) {
run_graph_response->AddRecv("recv_2", TensorA());
run_graph_response->AddRecv("recv_3", TensorB());
run_graph_response->mutable_step_stats()->add_dev_stats()->set_device(
"/cpu:0");
run_graph_response->mutable_cost_graph()->add_node()->set_name("cost_node");
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
run_graph_response->AddPartitionGraph(graph_def);
}
void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
ASSERT_EQ(2, response->num_recvs());
EXPECT_EQ("recv_2", response->recv_key(0));
EXPECT_EQ("recv_3", response->recv_key(1));
Tensor val;
TF_EXPECT_OK(response->RecvValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response->RecvValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response->mutable_step_stats()->dev_stats_size());
EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device());
ASSERT_EQ(1, response->mutable_cost_graph()->node_size());
EXPECT_EQ("cost_node", response->mutable_cost_graph()->node(0).name());
ASSERT_EQ(1, response->num_partition_graphs());
EXPECT_EQ(1234, response->mutable_partition_graph(0)->versions().producer());
EXPECT_EQ(1234,
response->mutable_partition_graph(0)->versions().min_consumer());
}
void BuildRunStepResponse(MutableRunGraphResponseWrapper* run_graph_response,
MutableRunStepResponseWrapper* run_step_response) {
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_x:0", run_graph_response, 0));
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_y:0", run_graph_response, 1));
*run_step_response->mutable_metadata()->mutable_step_stats() =
*run_graph_response->mutable_step_stats();
protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs =
run_step_response->mutable_metadata()->mutable_partition_graphs();
for (size_t i = 0; i < run_graph_response->num_partition_graphs(); i++) {
partition_graph_defs->Add()->Swap(
run_graph_response->mutable_partition_graph(i));
}
}
void CheckRunStepResponse(const MutableRunStepResponseWrapper& response) {
ASSERT_EQ(2, response.num_tensors());
EXPECT_EQ("fetch_x:0", response.tensor_name(0));
EXPECT_EQ("fetch_y:0", response.tensor_name(1));
Tensor val;
TF_EXPECT_OK(response.TensorValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response.TensorValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response.metadata().step_stats().dev_stats_size());
EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device());
ASSERT_EQ(1, response.metadata().partition_graphs_size());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().producer());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().min_consumer());
}
TEST(MessageWrappers, RunStepRequest_Basic) {
InMemoryRunStepRequest in_memory_request;
BuildRunStepRequest(&in_memory_request);
CheckRunStepRequest(in_memory_request);
MutableProtoRunStepRequest proto_request;
BuildRunStepRequest(&proto_request);
CheckRunStepRequest(proto_request);
CheckRunStepRequest(ProtoRunStepRequest(&in_memory_request.ToProto()));
CheckRunStepRequest(ProtoRunStepRequest(&proto_request.ToProto()));
}
TEST(MessageWrappers, RunGraphRequest_Basic) {
InMemoryRunStepRequest in_memory_run_step_request;
BuildRunStepRequest(&in_memory_run_step_request);
MutableProtoRunStepRequest mutable_proto_run_step_request;
BuildRunStepRequest(&mutable_proto_run_step_request);
ProtoRunStepRequest proto_run_step_request(
&mutable_proto_run_step_request.ToProto());
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
}
TEST(MessageWrappers, RunGraphResponse_Basic) {
InMemoryRunGraphResponse in_memory_response;
BuildRunGraphResponse(&in_memory_response);
CheckRunGraphResponse(&in_memory_response);
OwnedProtoRunGraphResponse owned_proto_response;
BuildRunGraphResponse(&owned_proto_response);
CheckRunGraphResponse(&owned_proto_response);
RunGraphResponse response_proto;
NonOwnedProtoRunGraphResponse non_owned_proto_response(&response_proto);
BuildRunGraphResponse(&non_owned_proto_response);
CheckRunGraphResponse(&non_owned_proto_response);
}
TEST(MessageWrappers, RunStepResponse_Basic) {
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/message_wrappers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/message_wrappers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
029d686d-d1e9-4d27-9854-c4784353d924 | cpp | tensorflow/tensorflow | device_resolver_distributed | tensorflow/core/distributed_runtime/device_resolver_distributed.cc | tensorflow/core/distributed_runtime/device_resolver_distributed_test.cc | #include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
DeviceResolverDistributed::DeviceResolverDistributed(const DeviceMgr* dev_mgr) {
mutex_lock l(mu_);
for (Device* device : dev_mgr->ListDevices()) {
attr_table_[device->name()] = device->attributes();
}
}
Status DeviceResolverDistributed::GetDeviceAttributes(
const string& device, DeviceAttributes* attributes) {
mutex_lock l(mu_);
auto it = attr_table_.find(device);
if (it == attr_table_.end()) {
return errors::NotFound(device, " not found");
}
*attributes = it->second;
return absl::OkStatus();
}
Status DeviceResolverDistributed::GetAllDeviceAttributes(
const string& task, std::vector<DeviceAttributes>* attributes) {
mutex_lock l(mu_);
attributes->clear();
for (const auto& it : attr_table_) {
const string& device_name = it.first;
if (DeviceNameUtils::IsSameAddressSpace(task, device_name)) {
attributes->push_back(it.second);
}
}
if (attributes->empty()) {
return errors::NotFound(task, " not found in the cache");
}
return absl::OkStatus();
}
Status DeviceResolverDistributed::UpdateDeviceAttributes(
const std::vector<DeviceAttributes>& attributes) {
mutex_lock l(mu_);
for (const DeviceAttributes& attr : attributes) {
auto item = attr_table_.insert({attr.name(), attr});
auto it = item.first;
bool success = item.second;
if (!success && it->second.incarnation() != attr.incarnation()) {
return errors::FailedPrecondition(
attr.name(),
"exists in cache with a different incarnation. "
"This usually means the remote worker has restarted");
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
using ::testing::Property;
using ::testing::UnorderedElementsAre;
std::unique_ptr<Device> NewDevice(const string& type, const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class DeviceResDistTest : public ::testing::Test {
protected:
void SetUp() override {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:1"));
dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
dev_resolver_ =
std::make_unique<DeviceResolverDistributed>(dev_mgr_.get());
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:1")
->attributes());
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
}
std::unique_ptr<DeviceMgr> dev_mgr_;
std::unique_ptr<DeviceResolverDistributed> dev_resolver_;
};
TEST_F(DeviceResDistTest, GetDeviceAttributesLocal) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolver_->GetDeviceAttributes(
"/job:worker/replica:0/task:0/device:CPU:0", &attributes));
EXPECT_EQ(attributes.name(), "/job:worker/replica:0/task:0/device:CPU:0");
}
TEST_F(DeviceResDistTest, GetDeviceAttributesLocalUnknown) {
DeviceAttributes attributes;
EXPECT_TRUE(errors::IsNotFound(dev_resolver_->GetDeviceAttributes(
"/job:worker/replica:0/task:0/device:CPU:9", &attributes)));
}
TEST_F(DeviceResDistTest, GetAllDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:1")));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:1", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:1/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:1/device:CPU:1")));
}
TEST_F(DeviceResDistTest, GetAllDeviceAttributesUnknown) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsNotFound(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:3", &attributes)));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:1")
->attributes());
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:2", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:2/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:2/device:CPU:1")));
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
EXPECT_THAT(attributes,
UnorderedElementsAre(
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:0"),
Property(&DeviceAttributes::name,
"/job:worker/replica:0/task:0/device:CPU:1")));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributesExisting) {
std::vector<DeviceAttributes> attributes;
TF_ASSERT_OK(dev_resolver_->GetAllDeviceAttributes(
"/job:worker/replica:0/task:0", &attributes));
TF_ASSERT_OK(dev_resolver_->UpdateDeviceAttributes(attributes));
}
TEST_F(DeviceResDistTest, UpdateDeviceAttributesDifferentIncarnation) {
std::vector<DeviceAttributes> attributes;
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0")
->attributes());
attributes.push_back(
NewDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:1")
->attributes());
EXPECT_TRUE(errors::IsFailedPrecondition(
dev_resolver_->UpdateDeviceAttributes(attributes)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/device_resolver_distributed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/device_resolver_distributed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a63ecfb-0bf9-4b2d-bc5b-7ffc4cb555fa | cpp | tensorflow/tensorflow | recent_request_ids | tensorflow/core/distributed_runtime/recent_request_ids.cc | tensorflow/core/distributed_runtime/recent_request_ids_test.cc | #include "tensorflow/core/distributed_runtime/recent_request_ids.h"
#include <utility>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
RecentRequestIds::RecentRequestIds(int num_tracked_request_ids, int num_shards)
: index_buckets_(num_shards > 0 ? num_shards : 1) {
DCHECK(num_tracked_request_ids >= num_shards);
const int per_bucket_size = num_tracked_request_ids / index_buckets_.size();
for (auto& bucket : index_buckets_) {
mutex_lock l(bucket.mu);
bucket.circular_buffer.resize(per_bucket_size);
bucket.set.reserve(per_bucket_size);
}
}
bool RecentRequestIds::Insert(int64_t request_id) {
if (request_id == 0) {
return true;
}
const int bucket_index = request_id % index_buckets_.size();
auto& bucket = index_buckets_[bucket_index];
mutex_lock l(bucket.mu);
const bool inserted = bucket.set.insert(request_id).second;
if (!inserted) {
return false;
}
bucket.set.erase(bucket.circular_buffer[bucket.next_index]);
bucket.circular_buffer[bucket.next_index] = request_id;
bucket.next_index = (bucket.next_index + 1) % bucket.circular_buffer.size();
return true;
}
Status RecentRequestIds::TrackUnique(int64_t request_id,
const string& method_name,
const protobuf::Message& request) {
if (Insert(request_id)) {
return absl::OkStatus();
} else {
return errors::Aborted("The same ", method_name,
" request was received twice. ",
request.ShortDebugString());
}
}
} | #include "tensorflow/core/distributed_runtime/recent_request_ids.h"
#include <algorithm>
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
Status TrackUnique(int64_t request_id, RecentRequestIds* recent_request_ids) {
RecvTensorRequest request;
request.set_request_id(request_id);
return recent_request_ids->TrackUnique(request_id, "recent_request_ids_test",
request);
}
TEST(RecentRequestIds, Zero) {
RecentRequestIds recent_request_ids(1);
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
EXPECT_TRUE(TrackUnique(0, &recent_request_ids).ok());
}
TEST(RecentRequestIds, Unordered) {
RecentRequestIds recent_request_ids(6);
std::vector<int64_t> numbers = {53754, 23351, 164101, 7476,
162432, 130761, 164102};
for (int i = 0; i < 6; ++i) {
TF_EXPECT_OK(TrackUnique(numbers[i], &recent_request_ids));
for (int j = 0; j <= i; ++j) {
EXPECT_FALSE(TrackUnique(numbers[j], &recent_request_ids).ok())
<< "i=" << i << " j=" << j;
}
}
TF_EXPECT_OK(TrackUnique(numbers[6], &recent_request_ids));
for (int i = 1; i < 7; ++i) {
EXPECT_FALSE(TrackUnique(numbers[i], &recent_request_ids).ok())
<< "i=" << i;
}
TF_EXPECT_OK(TrackUnique(numbers[0], &recent_request_ids));
}
void TestOrdered(int num_request_ids, int num_shards) {
RecentRequestIds recent_request_ids(num_request_ids, num_shards);
for (int i = 1; i < 101; ++i) {
TF_EXPECT_OK(TrackUnique(i, &recent_request_ids));
for (int j = std::max(1, i - num_request_ids % num_shards + 1); j <= i;
++j) {
EXPECT_FALSE(TrackUnique(j, &recent_request_ids).ok())
<< "i=" << i << " j=" << j;
}
}
}
TEST(RecentRequestIds, Ordered2Shard1) { TestOrdered(2, 1); }
TEST(RecentRequestIds, Ordered3Shard1) { TestOrdered(3, 1); }
TEST(RecentRequestIds, Ordered4Shard1) { TestOrdered(4, 1); }
TEST(RecentRequestIds, Ordered5Shard1) { TestOrdered(5, 1); }
TEST(RecentRequestIds, Ordered10Shard3) { TestOrdered(10, 3); }
TEST(RecentRequestIds, Ordered11Shard3) { TestOrdered(11, 3); }
TEST(RecentRequestIds, Ordered12Shard4) { TestOrdered(12, 4); }
TEST(RecentRequestIds, Ordered100Shard8) { TestOrdered(100, 8); }
static void BM_TrackUnique(::testing::benchmark::State& state) {
RecentRequestIds recent_request_ids(100000);
RecvTensorRequest request;
for (auto s : state) {
TF_CHECK_OK(recent_request_ids.TrackUnique(GetUniqueRequestId(),
"BM_TrackUnique", request));
}
}
BENCHMARK(BM_TrackUnique);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/recent_request_ids.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/recent_request_ids_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e3460a20-4301-4749-99c9-483fa6ad0a48 | cpp | tensorflow/tensorflow | master | tensorflow/core/distributed_runtime/master.cc | tensorflow/core/distributed_runtime/master_test.cc | #include "tensorflow/core/distributed_runtime/master.h"
#include <memory>
#include <unordered_set>
#include <vector>
#include "xla/tsl/protobuf/rpc_options.pb.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
constexpr char kGrpcPrefixRegex[] = "^grpc.*:
}
Master::Master(MasterEnv* env, double session_gc_seconds)
: env_(env),
last_1000_steps_(1000),
step_count_(0),
session_gc_seconds_(session_gc_seconds),
recent_request_ids_(10000, env_->experimental_num_shards) {
CHECK(!env->local_devices.empty());
DCHECK_GT(env_->experimental_num_shards, 0);
if (session_gc_seconds_ > 0.0) {
gc_thread_ = env_->env->StartThread(ThreadOptions(), "TF_master_GC",
[this]() { GC(); });
} else {
gc_thread_ = nullptr;
}
}
Master::~Master() {
if (gc_thread_) {
mutex_lock l(mu_);
shutdown_ = true;
shutdown_cv_.notify_all();
delete gc_thread_;
}
}
void Master::GC() {
Env* env = Env::Default();
while (true) {
mutex_lock l(mu_);
const int kTimeoutMilliseconds = 10 * 1000;
WaitForMilliseconds(&l, &shutdown_cv_, kTimeoutMilliseconds);
if (shutdown_) {
break;
}
std::vector<string> handles;
const int64_t num_micros =
static_cast<int64_t>(session_gc_seconds_ * 1000000);
for (const auto& entry : sessions_) {
int64_t lat = entry.second->last_access_time_usec();
if (static_cast<int64_t>(env->NowMicros()) - lat > num_micros) {
handles.push_back(entry.first);
auto* sess = entry.second;
SchedClosure([this, sess]() {
LOG(WARNING) << "GC session " << sess->handle() << " after "
<< session_gc_seconds_ << " seconds. "
<< "Note that if you are starting multiple replicas "
<< "on a staggered delay, session_gc_seconds may need "
<< "to be raised.";
sess->GarbageCollect();
});
}
}
for (const auto& handle : handles) sessions_.erase(handle);
}
}
MasterSession* Master::FindMasterSession(const string& handle) {
MasterSession* session = nullptr;
{
mutex_lock l(mu_);
session = gtl::FindPtrOrNull(sessions_, handle);
if (session != nullptr) {
session->Ref();
}
}
return session;
}
class DeviceFinder {
public:
static Status GetRemoteDevices(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache,
std::vector<std::unique_ptr<Device>>* out_remote) {
DeviceFinder finder(device_filters, env, worker_cache);
finder.Start();
TF_RETURN_IF_ERROR(finder.Wait());
finder.GetRemoteDevices(env->local_devices, out_remote);
return absl::OkStatus();
}
static void GetRemoteWorkers(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache, std::vector<string>* workers) {
DeviceFinder finder(device_filters, env, worker_cache);
*workers = finder.targets_;
}
private:
explicit DeviceFinder(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache)
: env_(env), worker_cache_(worker_cache) {
CHECK(worker_cache) << "Worker cache was null!";
auto process_filter = [this](const string& filter) {
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(filter, &parsed)) {
filters_.push_back(parsed);
} else {
LOG(FATAL) << "Skipping invalid filter: " << filter;
}
};
for (const string& filter : device_filters) {
process_filter(filter);
}
if (filters_.empty()) {
std::vector<string> workers;
worker_cache->ListWorkers(&workers);
std::swap(workers, targets_);
} else {
CHECK_GT(env_->local_devices.size(), 0) << "No local devices provided.";
const string& local_device_name = env_->local_devices[0]->name();
DeviceNameUtils::ParsedName local_parsed_name;
CHECK(DeviceNameUtils::ParseFullName(local_device_name,
&local_parsed_name));
bool all_filters_have_job = true;
std::unordered_set<string> filter_job_names({local_parsed_name.job});
for (const DeviceNameUtils::ParsedName& filter : filters_) {
all_filters_have_job = all_filters_have_job && filter.has_job;
if (filter.has_job) {
filter_job_names.insert(filter.job);
}
}
std::vector<string> workers;
if (all_filters_have_job) {
for (const string& job_name : filter_job_names) {
VLOG(2) << "Selectively listing workers in job: " << job_name;
std::vector<string> workers_in_job;
worker_cache->ListWorkersInJob(job_name, &workers_in_job);
workers.insert(workers.end(), workers_in_job.begin(),
workers_in_job.end());
}
} else {
VLOG(2) << "Listing workers in all jobs because some device "
<< "filter has no job specified. Filters were:";
if (device_filters.empty()) {
VLOG(2) << "- <NO FILTERS>";
} else {
for (const string& filter : device_filters) {
VLOG(2) << "- " << filter;
}
}
worker_cache->ListWorkers(&workers);
}
for (const string& name : workers) {
if (MatchFilters(name) ||
DeviceNameUtils::IsSameAddressSpace(name, local_device_name)) {
targets_.push_back(name);
}
}
}
seen_targets_.assign(targets_.size(), false);
}
~DeviceFinder() {
for (Device* dev : found_) delete dev;
}
void Start() {
LOG(INFO) << "Scanning workers for devices: " << targets_.size()
<< " total workers";
{
mutex_lock l(mu_);
num_pending_ = targets_.size();
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
for (size_t i = 0; i < targets_.size(); ++i) {
NewRemoteDevices(
env_->env, worker_cache_, targets_[i],
[this, i](const Status& s, std::vector<Device*>* devices) {
WhenFound(i, s, devices);
});
}
}
const int32 kLoggingPeriodMs = 10 * 1000;
Status Wait() {
mutex_lock l(mu_);
while (num_pending_ != 0) {
pending_zero_.wait_for(l, std::chrono::milliseconds(kLoggingPeriodMs));
if (num_pending_ != 0) {
for (size_t i = 0; i < targets_.size(); ++i) {
if (!seen_targets_[i]) {
LOG(INFO)
<< "CreateSession still waiting for response from worker: "
<< targets_[i];
}
}
}
}
return status_;
}
void GetRemoteDevices(const std::vector<Device*>& local,
std::vector<std::unique_ptr<Device>>* remote) {
std::unordered_set<string> names(local.size());
for (Device* dev : local) names.insert(dev->name());
mutex_lock l(mu_);
for (Device* dev : found_) {
const string& name = dev->name();
if (names.insert(name).second && MatchFilters(name)) {
remote->push_back(std::unique_ptr<Device>(dev));
} else {
delete dev;
}
}
found_.clear();
}
typedef DeviceFinder ME;
const MasterEnv* env_;
WorkerCacheInterface* worker_cache_;
std::vector<DeviceNameUtils::ParsedName> filters_;
mutex mu_;
int num_pending_ TF_GUARDED_BY(mu_);
condition_variable pending_zero_;
std::vector<Device*> found_ TF_GUARDED_BY(mu_);
std::vector<string> targets_;
std::vector<bool> seen_targets_ TF_GUARDED_BY(mu_);
Status status_;
void WhenFound(int target_index, const Status& s,
std::vector<Device*>* devices) {
mutex_lock l(mu_);
seen_targets_[target_index] = true;
if (!s.ok()) {
LOG(ERROR) << "CreateSession failed because worker "
<< targets_[target_index] << " returned error: " << s;
status_.Update(s);
} else {
found_.insert(found_.end(), devices->begin(), devices->end());
devices->clear();
}
--num_pending_;
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
bool Intersects(const DeviceNameUtils::ParsedName& x,
const DeviceNameUtils::ParsedName& y) {
return (!x.has_job || !y.has_job || x.job == y.job) &&
(!x.has_replica || !y.has_replica || x.replica == y.replica) &&
(!x.has_task || !y.has_task || x.task == y.task) &&
(!x.has_type || !y.has_type || x.type == y.type) &&
(!x.has_id || !y.has_id || x.id == y.id);
}
bool MatchFilters(const string& name) {
if (filters_.empty()) return true;
DeviceNameUtils::ParsedName x;
if (DeviceNameUtils::ParseFullName(name, &x)) {
for (const auto& filter : filters_) {
if (Intersects(x, filter)) return true;
}
}
return false;
}
DeviceFinder(const DeviceFinder&) = delete;
void operator=(const DeviceFinder&) = delete;
};
void Master::CreateSession(const CreateSessionRequest* req,
CreateSessionResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
Status status;
WorkerCacheFactoryOptions worker_cache_factory_options;
auto call_done = gtl::MakeCleanup([&status, &done] { done(status); });
status = ValidateExternalGraphDefSyntax(req->graph_def());
if (!status.ok()) return;
WorkerCacheInterface* worker_cache = nullptr;
std::unique_ptr<WorkerCacheInterface> worker_cache_ptr;
std::unique_ptr<DeviceSet> device_set;
std::unique_ptr<std::vector<std::unique_ptr<Device>>> remote_devices(
new std::vector<std::unique_ptr<Device>>());
const ClusterDef& cluster_def = req->config().cluster_def();
if (!cluster_def.job().empty()) {
worker_cache_factory_options.cluster_def = cluster_def;
string normalized_string(req->target());
RE2::Replace(&normalized_string, kGrpcPrefixRegex, "");
for (auto&& job : cluster_def.job()) {
for (auto&& task : job.tasks()) {
if (task.second == normalized_string) {
if (!worker_cache_factory_options.job_name.empty()) {
status = errors::InvalidArgument(
"Found multiple matching tasks that correspond to "
"to the master. Master target: '",
req->target(),
"'. ClusterDef: ", cluster_def.ShortDebugString());
LOG(ERROR) << status;
return;
}
if (env_->local_devices[0]->parsed_name().job == job.name() &&
env_->local_devices[0]->parsed_name().task == task.first) {
status = errors::InvalidArgument(
"The ClusterSpec names the job and task index to be the same "
"names that were provided when the server booted. This is "
"currently not allowed. Job: ",
job.name(), ", task index: ", task.first);
return;
}
worker_cache_factory_options.job_name = job.name();
worker_cache_factory_options.task_index = task.first;
}
}
}
worker_cache_factory_options.rpc_options = req->config().rpc_options();
status = env_->worker_cache_factory(worker_cache_factory_options,
&worker_cache);
if (!status.ok()) return;
worker_cache_ptr = std::unique_ptr<WorkerCacheInterface>(worker_cache);
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set = std::make_unique<DeviceSet>();
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
DeviceNameUtils::ParsedName name = d->parsed_name();
if (name.job == worker_cache_factory_options.job_name &&
name.task == worker_cache_factory_options.task_index &&
name.type == "CPU" && name.id == 0) {
device_set->set_client_device(d.get());
}
}
} else {
worker_cache = env_->worker_cache;
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set = std::make_unique<DeviceSet>();
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
}
int num_local_devices = 0;
for (Device* d : env_->local_devices) {
device_set->AddDevice(d);
if (num_local_devices == 0) {
device_set->set_client_device(d);
}
num_local_devices++;
}
}
CHECK(device_set->client_device()) << "No client device found. Missing "
<< "CPU:0 device?";
SessionOptions options;
options.target = req->target();
options.config = req->config();
options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
std::vector<string> filtered_worker_list;
DeviceFinder::GetRemoteWorkers(req->config().device_filters(), env_,
worker_cache, &filtered_worker_list);
MasterSession* session = env_->master_session_factory(
options, env_, std::move(remote_devices), std::move(worker_cache_ptr),
std::move(device_set), std::move(filtered_worker_list));
GraphDef* gdef =
const_cast<CreateSessionRequest*>(req)->mutable_graph_def();
status = session->Create(std::move(*gdef), cluster_def);
if (!status.ok()) {
session->Close().IgnoreError();
session->Unref();
return;
}
resp->set_session_handle(session->handle());
{
mutex_lock l(mu_);
CHECK(sessions_.insert({session->handle(), session}).second);
}
});
}
void Master::ExtendSession(const ExtendSessionRequest* req,
ExtendSessionResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status status = ValidateExternalGraphDefSyntax(req->graph_def());
if (status.ok()) {
status = session->Extend(req, resp);
}
session->Unref();
done(status);
});
}
void Master::PartialRunSetup(const PartialRunSetupRequest* req,
PartialRunSetupResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"PartialRunSetup (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status s = session->PartialRunSetup(req, resp);
session->Unref();
done(s);
});
}
void Master::RunStep(CallOptions* opts, const RunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunStep (Master)", req);
if (!s.ok()) {
done(s);
return;
}
auto start_time = env_->env->NowMicros();
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([this, start_time, session, opts, req, resp, done]() {
Status status = session->Run(opts, *req, resp);
session->Unref();
uint64 done_time = env_->env->NowMicros();
done(status);
mutex_lock l(mu_);
last_1000_steps_.AddValue((done_time - start_time) / 1e9);
++step_count_;
});
}
void Master::CloseSession(const CloseSessionRequest* req,
CloseSessionResponse* resp, MyClosure done) {
MasterSession* session = nullptr;
{
mu_.lock();
auto iter = sessions_.find(req->session_handle());
if (iter == sessions_.end()) {
mu_.unlock();
done(errors::Aborted(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
session = iter->second;
sessions_.erase(iter);
mu_.unlock();
}
SchedClosure([session, done]() {
Status s = session->Close();
session->Unref();
done(s);
});
}
void Master::ListDevices(const ListDevicesRequest* req,
ListDevicesResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
if (!req->session_handle().empty()) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::InvalidArgument(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
core::ScopedUnref ref(session);
Status s = session->ListDevices(resp);
done(s);
return;
}
std::vector<std::unique_ptr<Device>> remote_devices;
Status s = DeviceFinder::GetRemoteDevices({}, env_, env_->worker_cache,
&remote_devices);
if (s.ok()) {
for (Device* dev : env_->local_devices) {
*(resp->add_local_device()) = dev->attributes();
}
for (auto&& dev : remote_devices) {
*(resp->add_remote_device()) = dev->attributes();
}
}
done(s);
});
}
void Master::CleanupWorkers(const ResetRequest& reset) {
std::vector<string> worker_names;
DeviceFinder::GetRemoteWorkers(reset.device_filters(), env_,
env_->worker_cache, &worker_names);
if (!worker_names.empty()) {
const int num_workers = worker_names.size();
std::vector<Notification> n(num_workers);
CleanupAllRequest req;
(*req.mutable_container()) = reset.container();
std::vector<CleanupAllResponse> resp(num_workers);
int c = 0;
for (int i = 0; i < num_workers; ++i) {
const string& worker_name = worker_names[i];
auto worker = env_->worker_cache->GetOrCreateWorker(worker_name);
if (worker) {
worker->CleanupAllAsync(
&req, &resp[i], [this, &n, worker_name, worker, c](Status s) {
if (!s.ok()) {
LOG(ERROR) << "Worker CleanupAll failed: " << s;
}
env_->worker_cache->ReleaseWorker(worker_name, worker);
n[c].Notify();
});
} else {
n[c].Notify();
}
++c;
}
for (size_t i = 0; i < n.size(); ++i) {
n[i].WaitForNotification();
}
}
}
void Master::Reset(const ResetRequest* req, ResetResponse* resp,
MyClosure done) {
std::vector<MasterSession*> sessions_to_close;
{
mutex_lock l(mu_);
for (const auto& entry : sessions_) {
sessions_to_close.push_back(entry.second);
}
sessions_.clear();
}
CleanupWorkers(*req);
SchedClosure([sessions_to_close, done]() {
Status s;
for (MasterSession* session : sessions_to_close) {
s.Update(session->Close());
session->Unref();
}
done(s);
});
}
void Master::MakeCallable(const MakeCallableRequest* req,
MakeCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"MakeCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->MakeCallable(*req, resp);
session->Unref();
done(s);
});
}
void Master::RunCallable(CallOptions* opts, const RunCallableRequest* req,
RunCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, opts, req, resp, done = std::move(done)]() {
Status s = session->RunCallable(opts, *req, resp);
session->Unref();
done(s);
});
}
void Master::ReleaseCallable(const ReleaseCallableRequest* req,
ReleaseCallableResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->ReleaseCallable(*req, resp);
session->Unref();
done(s);
});
}
} | #include "tensorflow/core/distributed_runtime/master.h"
#include <map>
#include <memory>
#include "grpcpp/grpcpp.h"
#include "Eigen/Core"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service_impl.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/master.pb.h"
namespace tensorflow {
class MasterTest : public ::testing::Test {
protected:
MasterTest() {
std::vector<string> targets;
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 1;
(*options.config.mutable_device_count())["GPU"] = 0;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(
test::TestClusterConfig().Options(options).Jobs(
{test::TestJob{"localhost", 2}}),
&cluster_));
SharedGrpcChannelPtr channel_ptr;
TF_CHECK_OK(NewHostPortGrpcChannel(
cluster_->targets()[0], &options.config.rpc_options(), &channel_ptr));
master_ = grpc::MasterService::NewStub(channel_ptr);
}
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<grpc::MasterService::Stub> master_;
Status CreateSession(const GraphDef& def, string* handle,
int64_t* initial_version) {
::grpc::ClientContext ctx;
CreateSessionRequest req;
*(req.mutable_graph_def()) = def;
req.mutable_config()->set_placement_period(1);
CreateSessionResponse resp;
const Status s = FromGrpcStatus(master_->CreateSession(&ctx, req, &resp));
if (s.ok()) {
*handle = resp.session_handle();
*initial_version = resp.graph_version();
}
return s;
}
Status ExtendSession(const string& handle, const GraphDef& def,
int64_t current_version, int64_t* new_version) {
::grpc::ClientContext ctx;
ExtendSessionRequest req;
req.set_session_handle(handle);
*(req.mutable_graph_def()) = def;
req.set_current_graph_version(current_version);
ExtendSessionResponse resp;
const Status s = FromGrpcStatus(master_->ExtendSession(&ctx, req, &resp));
if (s.ok()) {
*new_version = resp.new_graph_version();
}
return s;
}
Status RunStep(const string& handle,
const std::vector<std::pair<string, const Tensor*> >& feed,
const std::map<string, Tensor*>& fetch) {
::grpc::ClientContext ctx;
RunStepRequest req;
req.set_session_handle(handle);
for (const auto& p : feed) {
const string& feed_name = p.first;
const Tensor* feed_tensor = p.second;
auto f = req.add_feed();
f->set_name(feed_name);
feed_tensor->AsProtoTensorContent(f->mutable_tensor());
}
for (const auto& p : fetch) {
const string& fetch_name = p.first;
req.add_fetch(fetch_name);
}
RunStepResponse resp;
const Status s = FromGrpcStatus(master_->RunStep(&ctx, req, &resp));
if (s.ok()) {
for (const auto& fetch_resp : resp.tensor()) {
auto it = fetch.find(fetch_resp.name());
CHECK(it != fetch.end());
CHECK(it->second->FromProto(fetch_resp.tensor()));
}
}
return s;
}
Status CloseSession(const string& handle) {
::grpc::ClientContext ctx;
CloseSessionRequest req;
req.set_session_handle(handle);
CloseSessionResponse resp;
return FromGrpcStatus(master_->CloseSession(&ctx, req, &resp));
}
Status Reset() {
::grpc::ClientContext ctx;
ResetRequest req;
ResetResponse resp;
return FromGrpcStatus(master_->Reset(&ctx, req, &resp));
}
};
TEST_F(MasterTest, CreateClose) {
GraphDef def;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def, &handle, &initial_version));
EXPECT_TRUE(errors::IsAborted(CloseSession("randombits")));
EXPECT_TRUE(CloseSession(handle).ok());
}
TEST_F(MasterTest, ListDevices) {
::grpc::ClientContext ctx;
ListDevicesRequest req;
ListDevicesResponse resp;
const Status s = FromGrpcStatus(master_->ListDevices(&ctx, req, &resp));
TF_EXPECT_OK(s);
EXPECT_EQ(1, resp.local_device_size());
EXPECT_EQ("CPU", resp.local_device(0).device_type());
}
TEST_F(MasterTest, Reset) {
GraphDef def;
string s1, s2;
int64_t initial_version1, initial_version2;
TF_ASSERT_OK(CreateSession(def, &s1, &initial_version1));
TF_ASSERT_OK(CreateSession(def, &s2, &initial_version2));
EXPECT_TRUE(Reset().ok());
EXPECT_TRUE(errors::IsAborted(CloseSession(s1)));
EXPECT_TRUE(errors::IsAborted(CloseSession(s2)));
}
TEST_F(MasterTest, Extend) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Tensor A_expected(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&A_expected, {3.0, 2.0, -1.0, 0.0});
Tensor x_expected(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_expected, {2.0, 2.0});
Graph graph_1(OpRegistry::Global());
test::graph::Constant(&graph_1, A_expected, "A");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
Tensor A(DT_FLOAT, TensorShape({2, 2}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
test::ExpectTensorEqual<float>(A, A_expected);
Graph graph_2(OpRegistry::Global());
test::graph::Constant(&graph_2, x_expected, "x");
GraphDef def_2;
test::graph::ToGraphDef(&graph_2, &def_2);
int64_t version_2;
EXPECT_TRUE(errors::IsAborted(
ExtendSession("randombits", def_2, version_1, &version_2)));
TF_ASSERT_OK(ExtendSession(handle, def_2, version_1, &version_2));
EXPECT_GT(version_2, version_1);
Tensor x(DT_FLOAT, TensorShape({2, 1}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"x:0", &x}}));
test::ExpectTensorEqual<float>(A, A_expected);
test::ExpectTensorEqual<float>(x, x_expected);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendUpdateStatefulFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1, version_2;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsInvalidArgument(
ExtendSession(handle, def_1, version_1, &version_2)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendTwiceFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsAborted(
ExtendSession(handle, def_1, initial_version, &version_1)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendOnlyOneSucceeds) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification n;
mutex mu;
int succeeded = 0;
int failed = 0;
auto extend_fn = [this, handle, def_1, initial_version, &n, &mu, &succeeded,
&failed]() {
n.WaitForNotification();
int64_t new_version;
Status s = ExtendSession(handle, def_1, initial_version, &new_version);
EXPECT_TRUE(s.ok() || errors::IsAborted(s));
{
mutex_lock l(mu);
if (s.ok()) {
++succeeded;
} else {
++failed;
}
}
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 100);
for (int i = 0; i < 100; ++i) {
thread_pool.Schedule(extend_fn);
}
n.Notify();
}
EXPECT_EQ(failed, 99);
EXPECT_EQ(succeeded, 1);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendAndRun) {
Graph graph_0(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
test::graph::Constant(&graph_0, a_tensor, "A");
GraphDef def_0;
test::graph::ToGraphDef(&graph_0, &def_0);
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
Tensor b_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&b_tensor, {1, 0, 0, 1});
test::graph::Constant(&graph_1, b_tensor, "B");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification extend_done;
Notification extend_can_start;
auto get_a_fn = [this, handle, &extend_done]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
while (!extend_done.HasBeenNotified()) {
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
};
auto get_a_and_b_fn = [this, handle, &extend_done, &extend_can_start]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
Tensor B(DT_FLOAT, TensorShape({2, 2}));
EXPECT_TRUE(
errors::IsNotFound(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}})));
extend_can_start.Notify();
while (!extend_done.HasBeenNotified()) {
Status s = RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}});
EXPECT_TRUE(errors::IsNotFound(s) || s.ok());
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}}));
};
auto extend_fn = [this, handle, def_1, initial_version, &extend_done,
&extend_can_start]() {
extend_can_start.WaitForNotification();
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
extend_done.Notify();
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 3);
thread_pool.Schedule(get_a_fn);
thread_pool.Schedule(get_a_and_b_fn);
thread_pool.Schedule(extend_fn);
}
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, EigenProblem) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
Node* a_node = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {0, 0});
Node* x_node = test::graph::Constant(&graph, x_tensor);
Node* y_node = test::graph::Matmul(&graph, a_node, x_node, false, false);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
string handle;
int64_t initial_version;
TF_CHECK_OK(CreateSession(def, &handle, &initial_version));
const Eigen::array<Eigen::DenseIndex, 1> sum_along_dim{0};
const Eigen::array<Eigen::DenseIndex, 2> matrix_transpose{1, 0};
Tensor x(DT_FLOAT, TensorShape({2, 1}));
Tensor y(DT_FLOAT, TensorShape({2, 1}));
Eigen::Tensor<float, 1, Eigen::RowMajor> y_square_sum;
Eigen::Tensor<float, 2, Eigen::RowMajor> y_normalized(2, 1);
y_normalized.setRandom();
Eigen::Tensor<float, 1, Eigen::RowMajor> error_square_sum;
float lambda;
bool converged = false;
while (!converged) {
auto x_matrix = x.matrix<float>();
x_matrix = y_normalized;
TF_EXPECT_OK(
RunStep(handle, {{x_node->name(), &x}}, {{y_node->name() + ":0", &y}}));
auto y_matrix = y.matrix<float>();
{
lambda = y_matrix(0, 0) / x_matrix(0, 0);
y_square_sum = y.matrix<float>().square().sum(sum_along_dim);
const float norm = static_cast<float>(sqrt(y_square_sum(0)));
y_normalized = y_matrix * (1 / norm);
error_square_sum = (x_matrix - y_normalized).square().sum(sum_along_dim);
VLOG(1) << "x = [" << x_matrix.shuffle(matrix_transpose) << "] y = ["
<< y_matrix.shuffle(matrix_transpose) << "] lambda = " << lambda;
converged = sqrt(error_square_sum(0)) < 1e-10;
}
}
EXPECT_NEAR(lambda, 2.0, 0.01);
TF_EXPECT_OK(CloseSession(handle));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/master.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/master_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a390141-a179-474f-84ec-21165faf17ec | cpp | tensorflow/tensorflow | request_id | tensorflow/core/distributed_runtime/request_id.cc | tensorflow/core/distributed_runtime/request_id_test.cc | #include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
int64_t GetUniqueRequestId() {
int64_t request_id = 0;
while (request_id == 0) {
request_id = tsl::random::ThreadLocalNew64();
}
return request_id;
}
} | #include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(GetUniqueRequestId, Basic) {
for (int i = 0; i < 1000000; ++i) {
EXPECT_NE(GetUniqueRequestId(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/request_id.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/request_id_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5054f737-a436-48d6-bda7-202ce4f02830 | cpp | tensorflow/tensorflow | session_mgr | tensorflow/core/distributed_runtime/session_mgr.cc | tensorflow/core/distributed_runtime/session_mgr_test.cc | #include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "xla/tsl/protobuf/distributed_runtime_payloads.pb.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/graph_mgr.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache_wrapper.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
bool IsMultiClientLeader(const ServerDef& server_def,
const CoordinationServiceConfig& config) {
DeviceNameUtils::ParsedName leader_pn;
DeviceNameUtils::ParseFullName(config.service_leader(), &leader_pn);
return server_def.job_name() == leader_pn.job &&
server_def.task_index() == leader_pn.task;
}
void SetCoordinationServiceLeader(const ServerDef& server_def,
CoordinationServiceConfig* config) {
const std::string& collective_leader = server_def.default_session_config()
.experimental()
.collective_group_leader();
if (!collective_leader.empty()) {
config->set_service_leader(collective_leader);
LOG(INFO) << "No coordination leader is set, using the collective leader "
<< collective_leader;
} else {
const std::string& default_leader =
strings::StrCat("/job:", server_def.job_name(), "/replica:0/task:0");
config->set_service_leader(default_leader);
LOG(INFO) << "No coordination leader is set, using the default leader "
<< default_leader;
}
}
void SetCoordinatedJobList(const ServerDef& server_def,
CoordinationServiceConfig* config) {
for (const auto& job : server_def.cluster().job()) {
tensorflow::CoordinatedJob* coordinated_job =
config->mutable_coordinated_job_list()->Add();
coordinated_job->set_name(job.name());
coordinated_job->set_num_tasks(job.tasks().size());
}
}
}
SessionMgr::SessionMgr(
WorkerEnv* worker_env, const std::string& default_worker_name,
std::unique_ptr<WorkerCacheInterface> default_worker_cache,
WorkerCacheFactory worker_cache_factory,
tsl::CoordinationServiceRpcHandler* coordination_handler)
: worker_env_(worker_env),
default_worker_cache_(std::move(default_worker_cache)),
legacy_session_(WorkerSession::CreateWithBorrowedDeviceMgr(
"", default_worker_name,
std::unique_ptr<WorkerCacheInterface>(
new WorkerCacheWrapper(default_worker_cache_.get())),
worker_env->device_mgr,
std::make_unique<GraphMgr>(worker_env, worker_env->device_mgr),
nullptr,
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called,
remote_device_mgr);
})),
worker_cache_factory_(std::move(worker_cache_factory)),
coordination_handler_(coordination_handler) {}
std::string SessionMgr::WorkerNameFromServerDef(const ServerDef& server_def) {
return strings::StrCat("/job:", server_def.job_name(),
"/replica:", server_def.replica(),
"/task:", server_def.task_index());
}
Status SessionMgr::CreateSession(const std::string& session,
const ServerDef& server_def,
bool isolate_session_state,
StatusCallback coordination_error_callback) {
return CreateSession(session, server_def, {}, isolate_session_state,
"",
0, coordination_error_callback);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state) {
return CreateSession(session, server_def, cluster_device_attributes,
isolate_session_state,
"",
0);
}
Status SessionMgr::CreateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes,
bool isolate_session_state, std::string master_task,
int64_t master_incarnation, StatusCallback coordination_error_callback) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
if (!master_task.empty()) {
auto it_range = master_to_associated_sessions_.equal_range(master_task);
if (it_range.first != it_range.second &&
it_range.first->second.master_incarnation != master_incarnation) {
LOG(INFO) << "When creating WorkerSession for master task " << master_task
<< ", found old WorkerSessions created by the same master task "
<< "with a different incarnation. These sessions will "
<< "be garbage collected. Current WorkerSession count: "
<< sessions_.size();
auto it = it_range.first;
while (it != it_range.second) {
auto session_it = sessions_.find(it->second.session_handle);
if (session_it != sessions_.end()) {
sessions_.erase(session_it);
}
it = master_to_associated_sessions_.erase(it);
}
}
}
WorkerCacheInterface* worker_cache = nullptr;
std::string worker_name;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
worker_name = legacy_session_->worker_name();
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
worker_name = WorkerNameFromServerDef(server_def);
}
if (worker_cache != nullptr && default_worker_cache_ != nullptr) {
worker_cache->SetLogging(this->is_logging_active_);
}
CHECK(worker_env_->device_mgr)
<< "The WorkerEnv must have a device manager.";
std::vector<Device*> local_devices = worker_env_->device_mgr->ListDevices();
CHECK(!local_devices.empty())
<< "The WorkerEnv must have at least one device in `local_devices`.";
std::shared_ptr<WorkerSession> worker_session;
std::vector<std::unique_ptr<Device>> cluster_devices;
if (isolate_session_state || server_def.cluster().job_size()) {
if (server_def.cluster().job_size()) {
VLOG(1) << "ClusterSpec propagation is enabled.";
}
if (!isolate_session_state) {
VLOG(1) << "Session state isolation is disabled.";
}
std::vector<std::unique_ptr<Device>> renamed_devices;
renamed_devices.reserve(local_devices.size());
for (Device* d : local_devices) {
renamed_devices.push_back(RenamedDevice::NewRenamedDevice(
worker_name, d, false, isolate_session_state));
}
auto device_mgr =
std::make_unique<StaticDeviceMgr>(std::move(renamed_devices));
LookupLocalDevice cb = [&device_mgr](StringPiece name, Device** device) {
return device_mgr->LookupDevice(name, device);
};
AsRemoteDevices(worker_env_->env, cluster_device_attributes, cb,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr = std::make_unique<GraphMgr>(worker_env_, device_mgr.get());
worker_session.reset(new WorkerSession(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(device_mgr), std::move(graph_mgr), std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
}));
} else {
AsRemoteDevices(worker_env_->env, cluster_device_attributes, nullptr,
&cluster_devices);
std::unique_ptr<DynamicDeviceMgr> remote_devices;
if (!cluster_device_attributes.empty()) {
remote_devices = std::make_unique<DynamicDeviceMgr>();
TF_RETURN_IF_ERROR(
remote_devices->AddDevices(std::move(cluster_devices)));
}
auto graph_mgr =
std::make_unique<GraphMgr>(worker_env_, worker_env_->device_mgr);
worker_session = WorkerSession::CreateWithBorrowedDeviceMgr(
session, worker_name,
std::unique_ptr<WorkerCacheInterface>(worker_cache),
worker_env_->device_mgr, std::move(graph_mgr),
std::move(remote_devices),
[](WorkerSession* worker_session, bool create_worker_session_called,
DeviceMgr* remote_device_mgr)
-> std::unique_ptr<DistributedFunctionLibraryRuntime> {
return std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session, create_worker_session_called, remote_device_mgr);
});
}
sessions_.insert(std::make_pair(session, std::move(worker_session)));
if (!master_task.empty()) {
MasterAssociatedSession s{master_incarnation, session};
master_to_associated_sessions_.emplace(master_task, s);
}
CoordinationServiceConfig coordination_config =
server_def.default_session_config().experimental().coordination_config();
if (!coordination_config.service_type().empty() &&
!coordination_config.force_disable() &&
coordination_service_agent_ == nullptr) {
std::unique_ptr<CoordinationClientCache> client_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&client_cache));
if (coordination_config.service_leader().empty()) {
SetCoordinationServiceLeader(server_def, &coordination_config);
}
if (coordination_config.coordinated_job_list().empty()) {
SetCoordinatedJobList(server_def, &coordination_config);
}
if (IsMultiClientLeader(server_def, coordination_config)) {
coordination_service_ =
tsl::CoordinationServiceInterface::EnableCoordinationService(
worker_env_->env, coordination_config, std::move(client_cache));
if (coordination_handler_ != nullptr) {
coordination_handler_->SetServiceInstance(coordination_service_.get());
}
}
std::unique_ptr<CoordinationClientCache> agent_cache;
TF_RETURN_IF_ERROR(worker_cache->GetCoordinationClientCache(&agent_cache));
coordination_service_agent_ = tsl::CreateCoordinationServiceAgent();
TF_RETURN_IF_ERROR(coordination_service_agent_->Initialize(
worker_env_->env, server_def.job_name(), server_def.task_index(),
coordination_config,
agent_cache->GetOwnedClient(coordination_config.service_leader()),
std::move(coordination_error_callback)));
activity_watcher::MaybeEnableMultiWorkersWatching(
coordination_service_agent_.get());
}
return absl::OkStatus();
}
void SessionMgr::ResetDefaultWorkerCache(WorkerCacheInterface* worker_cache) {
default_worker_cache_.reset(worker_cache);
}
Status SessionMgr::UpdateSession(
const std::string& session, const ServerDef& server_def,
const protobuf::RepeatedPtrField<DeviceAttributes>&
cluster_device_attributes) {
mutex_lock l(mu_);
if (session.empty()) {
return errors::InvalidArgument("Session must be non-empty.");
}
auto it = sessions_.find(session);
if (it == sessions_.end()) {
return errors::InvalidArgument("Cannot update session ", session,
" because it does not exist.");
}
std::shared_ptr<WorkerSession> worker_session = it->second;
WorkerCacheInterface* worker_cache = nullptr;
if (server_def.cluster().job().empty()) {
worker_cache = new WorkerCacheWrapper(default_worker_cache_.get());
} else {
TF_RETURN_IF_ERROR(worker_cache_factory_(server_def, &worker_cache));
}
std::vector<std::string> updated_remote_workers;
worker_cache->ListWorkers(&updated_remote_workers);
std::vector<std::unique_ptr<Device>> cluster_devices;
const DeviceMgr* local_device_mgr = worker_session->device_mgr();
DeviceMgr* remote_device_mgr = worker_session->remote_device_mgr();
std::vector<Device*> curr_remote_devices = remote_device_mgr->ListDevices();
std::vector<std::unique_ptr<Device>> added_remote_devices;
std::vector<Device*> removed_remote_devices;
std::vector<DeviceAttributes> added_cluster_device_attrs;
for (const auto& da : cluster_device_attributes) {
Device* device;
if (!local_device_mgr->LookupDevice(da.name(), &device).ok() &&
!remote_device_mgr->LookupDevice(da.name(), &device).ok()) {
added_cluster_device_attrs.emplace_back(da);
} else if (device != nullptr &&
device->attributes().incarnation() != da.incarnation()) {
removed_remote_devices.emplace_back(device);
added_cluster_device_attrs.emplace_back(da);
}
}
for (Device* device : curr_remote_devices) {
std::string task_name;
DeviceNameUtils::GetTaskName(device->parsed_name(), &task_name);
if (std::find(updated_remote_workers.begin(), updated_remote_workers.end(),
task_name) == updated_remote_workers.end()) {
removed_remote_devices.emplace_back(device);
}
}
protobuf::RepeatedPtrField<DeviceAttributes> added_cluster_device_attrs_pb(
added_cluster_device_attrs.begin(), added_cluster_device_attrs.end());
AsRemoteDevices(worker_env_->env, added_cluster_device_attrs_pb, nullptr,
&added_remote_devices);
TF_RETURN_IF_ERROR(worker_session->UpdateWorkerCacheAndDevices(
std::unique_ptr<WorkerCacheInterface>(worker_cache),
std::move(added_remote_devices), removed_remote_devices));
return absl::OkStatus();
}
Status SessionMgr::DeleteSession(const std::string& session) {
mutex_lock l(mu_);
auto it = sessions_.find(session);
if (it != sessions_.end()) {
sessions_.erase(it);
}
return absl::OkStatus();
}
Status SessionMgr::DeleteAllSessions() {
std::map<std::string, std::shared_ptr<WorkerSession>> tmp_sessions;
{
mutex_lock l(mu_);
swap(sessions_, tmp_sessions);
}
for (auto& session : tmp_sessions) {
session.second.reset();
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSessionLocked(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
if (session_handle.empty()) {
*out_session = legacy_session_;
} else {
auto it = sessions_.find(session_handle);
if (it == sessions_.end()) {
return errors::AbortedWithPayloads(
strings::StrCat("Session handle is not found: ", session_handle,
". Possibly this worker (\"",
legacy_session_->worker_name(),
"\") just restarted."),
{{kWorkerPossiblyRestarted,
distributed_runtime::WorkerPossiblyRestarted()
.SerializeAsString()}});
} else {
*out_session = it->second;
}
}
return absl::OkStatus();
}
Status SessionMgr::WorkerSessionForSession(
const std::string& session_handle,
std::shared_ptr<WorkerSession>* out_session) {
mutex_lock l(mu_);
return WorkerSessionForSessionLocked(session_handle, out_session);
}
std::shared_ptr<WorkerSession> SessionMgr::LegacySession() {
return legacy_session_;
}
tsl::CoordinationServiceAgent* SessionMgr::GetCoordinationServiceAgent() {
return coordination_service_agent_.get();
}
void SessionMgr::SetLogging(bool active) {
mutex_lock l(mu_);
this->is_logging_active_ = active;
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->SetLogging(active);
}
}
}
}
void SessionMgr::RetrieveLogs(int64_t step_id, LoggingResponse* response) {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
auto step_stats = StepStats();
if (worker_cache->RetrieveLogs(step_id, &step_stats)) {
auto* labeled_step_stats = response->add_step();
labeled_step_stats->set_step_id(step_id);
labeled_step_stats->mutable_step_stats()->Swap(&step_stats);
}
}
}
}
}
void SessionMgr::ClearLogs() {
mutex_lock l(mu_);
if (legacy_session_) {
auto* worker_cache = legacy_session_->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
for (const auto& session_kv : sessions_) {
auto session = session_kv.second.get();
if (session) {
auto* worker_cache = session->worker_cache();
if (worker_cache) {
worker_cache->ClearLogs();
}
}
}
}
void SessionMgr::TeardownCoordinationService() {
coordination_service_ = nullptr;
}
void SessionMgr::TeardownCoordinationServiceAgent() {
coordination_service_agent_ = nullptr;
}
} | #include "tensorflow/core/distributed_runtime/session_mgr.h"
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/distributed_runtime/error_payloads.h"
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
namespace tensorflow {
class FakeDevice : public Device {
private:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
public:
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> MakeCPU(const std::string& name) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(DeviceType("FakeCPU").type());
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
};
class SessionMgrTest : public ::testing::Test {
protected:
SessionMgrTest()
: mgr_(&env_, "/job:mnist/replica:0/task:0",
std::unique_ptr<WorkerCacheInterface>(), factory_,
nullptr) {
device_mgr_ = std::make_unique<DynamicDeviceMgr>(
FakeDevice::MakeCPU("/job:mnist/replica:0/task:0/device:fakecpu:0"));
env_.device_mgr = device_mgr_.get();
}
std::unique_ptr<DeviceMgr> device_mgr_;
WorkerEnv env_;
SessionMgr::WorkerCacheFactory factory_ =
[](const ServerDef& server_def, WorkerCacheInterface** worker_cache) {
*worker_cache = nullptr;
return absl::OkStatus();
};
SessionMgr mgr_;
};
TEST_F(SessionMgrTest, CreateSessionSimple) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_NE(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionClusterDefWorkerName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
DeviceAttributes* local_cpu = cluster_device_attributes.Add();
local_cpu->set_name("/job:worker/replica:0/task:3/device:fakecpu:0");
DeviceAttributes* remote_cpu = cluster_device_attributes.Add();
remote_cpu->set_name("/job:coordinator/replica:0/task:0/device:fakecpu:0");
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def,
cluster_device_attributes, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
Device* device;
TF_EXPECT_OK(
session->remote_device_mgr()->LookupDevice(local_cpu->name(), &device));
EXPECT_TRUE(device->IsLocal());
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:worker/replica:0/task:3", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionDefaultWorkerName) {
ServerDef server_def;
std::string session_handle = "test_session_handle";
TF_EXPECT_OK(mgr_.CreateSession(session_handle, server_def, true));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_NE(nullptr, session) << "Session for " << session_handle << "was null";
EXPECT_EQ("/job:mnist/replica:0/task:0", session->worker_name());
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, CreateSessionIsolateSessionState) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
TF_EXPECT_OK(mgr_.CreateSession("handle_1", server_def, false));
std::shared_ptr<WorkerSession> session_1;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_1", &session_1));
std::vector<Device*> devices_1 = session_1->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_1.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_2", server_def, false));
std::shared_ptr<WorkerSession> session_2;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_2", &session_2));
std::vector<Device*> devices_2 = session_2->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_2.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_3", server_def, true));
std::shared_ptr<WorkerSession> session_3;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_3", &session_3));
std::vector<Device*> devices_3 = session_3->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_3.size());
TF_EXPECT_OK(mgr_.CreateSession("handle_4", server_def, true));
std::shared_ptr<WorkerSession> session_4;
TF_EXPECT_OK(mgr_.WorkerSessionForSession("handle_4", &session_4));
std::vector<Device*> devices_4 = session_4->device_mgr()->ListDevices();
EXPECT_EQ(1, devices_4.size());
EXPECT_EQ(devices_1[0]->resource_manager(), devices_2[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_3[0]->resource_manager());
EXPECT_NE(devices_1[0]->resource_manager(), devices_4[0]->resource_manager());
EXPECT_NE(devices_3[0]->resource_manager(), devices_4[0]->resource_manager());
}
TEST_F(SessionMgrTest, CreateSessionWithMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
const std::string master_name = "/job:master/replica:0/task:1";
const int64_t old_incarnation = random::New64();
const int64_t new_incarnation = random::New64();
std::string sess_handle1 = "test_session_handle_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::string sess_handle2 = "test_session_handle_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, master_name,
old_incarnation));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
std::string sess_handle3 = "test_session_handle_3";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle3, server_def,
cluster_device_attributes, true, master_name,
new_incarnation));
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle1, &session),
absl::OkStatus())
<< "Session for " << sess_handle1
<< " should have been garbage collected.";
EXPECT_NE(mgr_.WorkerSessionForSession(sess_handle2, &session),
absl::OkStatus())
<< "Session for " << sess_handle2
<< " should have been garbage collected.";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle3, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle3 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle3));
}
TEST_F(SessionMgrTest, CreateSessionWithoutMasterName) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
auto job = server_def.mutable_cluster()->add_job();
job->set_name("worker");
job->mutable_tasks()->insert({3, "localhost:3333"});
protobuf::RepeatedPtrField<DeviceAttributes> cluster_device_attributes;
std::string sess_handle1 = "test_session_handle_no_master_1";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle1, server_def,
cluster_device_attributes, true, "", 0));
std::string sess_handle2 = "test_session_handle_no_master_2";
TF_EXPECT_OK(mgr_.CreateSession(sess_handle2, server_def,
cluster_device_attributes, true, "", 0));
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle1, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle1 << "was null";
TF_EXPECT_OK(mgr_.WorkerSessionForSession(sess_handle2, &session));
EXPECT_NE(nullptr, session) << "Session for " << sess_handle2 << "was null";
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle1));
TF_EXPECT_OK(mgr_.DeleteSession(sess_handle2));
}
TEST_F(SessionMgrTest, LegacySession) {
std::string session_handle = "";
std::shared_ptr<WorkerSession> session;
TF_EXPECT_OK(mgr_.WorkerSessionForSession(session_handle, &session));
EXPECT_EQ(mgr_.LegacySession(), session);
TF_EXPECT_OK(mgr_.DeleteSession(session_handle));
}
TEST_F(SessionMgrTest, UnknownSessionHandle) {
std::string session_handle = "unknown_session_handle";
std::shared_ptr<WorkerSession> session;
Status s = mgr_.WorkerSessionForSession(session_handle, &session);
EXPECT_TRUE(absl::IsAborted(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Session handle is not found"));
EXPECT_TRUE(s.GetPayload(kWorkerPossiblyRestarted).has_value());
}
TEST_F(SessionMgrTest, WorkerNameFromServerDef) {
ServerDef server_def;
server_def.set_job_name("worker");
server_def.set_task_index(3);
std::string worker_name = SessionMgr::WorkerNameFromServerDef(server_def);
EXPECT_EQ("/job:worker/replica:0/task:3", worker_name);
}
TEST_F(SessionMgrTest, DeleteLegacySession) {
TF_EXPECT_OK(mgr_.DeleteSession(""));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/session_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/session_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e7903eb4-9985-4b6c-b0e5-c2348dbcc8ef | cpp | tensorflow/tensorflow | remote_device | tensorflow/core/distributed_runtime/remote_device.cc | tensorflow/core/distributed_runtime/remote_device_test.cc | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include <stdlib.h>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
class RemoteDevice : public Device {
public:
RemoteDevice(Env* env, const DeviceAttributes& da)
: Device(env, da),
local_dev_name_(DeviceNameUtils::LocalName(da.name())) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
ResourceMgr* resource_manager() override {
LOG(FATAL) << "Accessing the resource manager of a remote device is not "
<< "supported.";
std::abort();
}
bool IsLocal() const override { return false; }
bool IsRemoteCallAllowed() const override { return true; }
private:
const string local_dev_name_;
RemoteDevice(const RemoteDevice&) = delete;
void operator=(const RemoteDevice&) = delete;
};
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices) {
for (const auto& da : device_attributes) {
Device* local_device;
if (lookup_local_device != nullptr &&
lookup_local_device(da.name(), &local_device).ok()) {
remote_devices->emplace_back(RenamedDevice::NewRenamedDevice(
local_device->name(), local_device, false, false));
} else {
auto d = new RemoteDevice(env, da);
remote_devices->emplace_back(d);
}
}
}
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done) {
WorkerInterface* wi = worker_cache->GetOrCreateWorker(worker_name);
if (wi == nullptr) {
std::vector<Device*> empty;
done(errors::NotFound("Device ", worker_name, " is not found."), &empty);
return;
}
struct Call {
GetStatusRequest req;
GetStatusResponse resp;
};
Call* call = new Call;
auto cb = [env, worker_cache, worker_name, done, wi,
call](const Status& status) {
Status s = status;
std::vector<Device*> remote_devices;
auto cleanup = gtl::MakeCleanup(
[&worker_cache, &worker_name, &wi, &done, &remote_devices, &s, call] {
worker_cache->ReleaseWorker(worker_name, wi);
done(s, &remote_devices);
delete call;
});
if (!s.ok()) {
return;
}
DeviceNameUtils::ParsedName worker_name_parsed;
if (!DeviceNameUtils::ParseFullName(worker_name, &worker_name_parsed) ||
!worker_name_parsed.has_job || !worker_name_parsed.has_replica ||
!worker_name_parsed.has_task) {
s = errors::InvalidArgument("Could not parse worker name: ", worker_name);
LOG(WARNING) << s;
return;
}
remote_devices.reserve(call->resp.device_attributes_size());
for (const DeviceAttributes& da : call->resp.device_attributes()) {
DeviceNameUtils::ParsedName device_name_parsed;
CHECK(DeviceNameUtils::ParseFullName(da.name(), &device_name_parsed))
<< "Device attribute name '" << da.name() << "' could not be "
<< "parsed. Device Attribute: " << da.DebugString();
if (device_name_parsed.job == worker_name_parsed.job &&
device_name_parsed.replica == worker_name_parsed.replica &&
device_name_parsed.task == worker_name_parsed.task) {
auto d = new RemoteDevice(env, da);
remote_devices.push_back(d);
} else {
DeviceAttributes da_rewritten = da;
da_rewritten.set_name(DeviceNameUtils::FullName(
worker_name_parsed.job, worker_name_parsed.replica,
worker_name_parsed.task, device_name_parsed.type,
device_name_parsed.id));
auto d = new RemoteDevice(env, da_rewritten);
if (getenv("TPU_NO_POPULATE_DEVICE_LIST_FROM_CLUSTER_SPEC") !=
nullptr) {
if (worker_name_parsed.job == "worker" ||
device_name_parsed.type.find("TPU") == std::string::npos) {
remote_devices.push_back(d);
}
} else {
remote_devices.push_back(d);
}
}
}
};
wi->GetStatusAsync(nullptr, &call->req, &call->resp,
false, cb);
}
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute) {
return std::make_unique<RemoteDevice>(env, device_attribute);
}
} | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
const char* const kSession = "remote_session";
class RemoteDeviceTest : public ::testing::Test {
protected:
string remote_name_;
std::unique_ptr<WorkerCacheInterface> worker_cache_;
WorkerInterface* wi_;
std::vector<Device*> devices_;
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
RemoteDeviceTest() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(options, 1, &cluster_));
const string& hostport = cluster_->targets()[0];
GrpcChannelSpec spec;
TF_CHECK_OK(spec.AddHostPortsJob("localhost", {hostport}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
worker_cache_.reset(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
remote_name_ = "/job:localhost/replica:0/task:0";
wi_ = worker_cache_->GetOrCreateWorker(remote_name_);
}
~RemoteDeviceTest() override {
worker_cache_->ReleaseWorker(remote_name_, wi_);
}
void SetUp() override {
Notification n;
NewRemoteDevices(Env::Default(), worker_cache_.get(), remote_name_,
[&n, this](const Status& s, std::vector<Device*>* found) {
TF_CHECK_OK(s);
devices_ = *found;
n.Notify();
});
n.WaitForNotification();
EXPECT_EQ(devices_.size(), 2);
std::sort(devices_.begin(), devices_.end(), [](Device* a, Device* b) {
return a->name().compare(b->name()) < 0;
});
}
void TearDown() override {
for (auto d : devices_) delete d;
}
};
TEST_F(RemoteDeviceTest, GetStatus) {
EXPECT_EQ(devices_[0]->name(),
strings::StrCat(remote_name_, "/device:CPU:0"));
EXPECT_EQ(devices_[0]->attributes().device_type(),
DeviceType(DEVICE_CPU).type());
EXPECT_EQ(devices_[0]->attributes().memory_limit(), 256 << 20);
EXPECT_EQ(devices_[1]->name(),
strings::StrCat(remote_name_, "/device:CPU:1"));
EXPECT_EQ(devices_[1]->attributes().memory_limit(), 256 << 20);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/remote_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/remote_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed544b03-b54a-491f-ae4b-4e50be0c4758 | cpp | tensorflow/tensorflow | partial_run_mgr | tensorflow/core/distributed_runtime/partial_run_mgr.cc | tensorflow/core/distributed_runtime/partial_run_mgr_test.cc | #include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
namespace tensorflow {
bool PartialRunMgr::FindOrCreate(int step_id,
CancellationManager** cancellation_manager) {
mutex_lock l(mu_);
auto it = step_id_to_partial_run_.find(step_id);
if (it != step_id_to_partial_run_.end()) {
*cancellation_manager = it->second->cancellation_manager.get();
return false;
}
std::unique_ptr<PartialRunState> partial_run =
std::make_unique<PartialRunState>();
partial_run->cancellation_manager = std::make_unique<CancellationManager>();
*cancellation_manager = partial_run->cancellation_manager.get();
step_id_to_partial_run_[step_id] = std::move(partial_run);
return true;
}
void PartialRunMgr::ExecutorDone(int step_id, const Status& executor_status) {
StatusCallback done;
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
done = std::move(run_it->second->final_callback);
if (!executor_status.ok()) {
run_it->second->final_status = executor_status;
}
callback_status = run_it->second->final_status;
run_it->second->executor_done = true;
}
if (done != nullptr) {
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
}
void PartialRunMgr::PartialRunDone(int step_id, StatusCallback done,
const Status& status) {
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
run_it->second->final_status.Update(status);
if (!run_it->second->executor_done) {
run_it->second->final_callback = std::move(done);
return;
}
callback_status = run_it->second->final_status;
}
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
} | #include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PartialRunMgrFindOrCreate, Create) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
EXPECT_TRUE(cancellation_manager != nullptr);
}
TEST(PartialRunMgrFindOrCreate, Find) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
CancellationManager* found_cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &found_cancellation_manager);
EXPECT_EQ(cancellation_manager, found_cancellation_manager);
}
TEST(PartialRunMgrFindOrCreate, NewCreate) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int new_step_id = 2;
CancellationManager* new_cancellation_manager;
partial_run_mgr.FindOrCreate(new_step_id, &new_cancellation_manager);
EXPECT_NE(cancellation_manager, new_cancellation_manager);
}
TEST(PartialRunMgr, PartialRunRemoved) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int called = 0;
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
EXPECT_EQ(1, called);
}
struct StatusTestParam {
Status executor_status;
Status partial_run_status;
Status expected_status;
};
class StatusPropagationTest : public ::testing::TestWithParam<StatusTestParam> {
protected:
PartialRunMgr partial_run_mgr_;
Notification invoked_;
Status status_;
void set_status(const Status& status) {
status_ = status;
invoked_.Notify();
}
Status status() {
invoked_.WaitForNotification();
return status_;
}
};
TEST_P(StatusPropagationTest, ExecutorDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
EXPECT_EQ(status(), param.expected_status);
}
TEST_P(StatusPropagationTest, PartialRunDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
EXPECT_EQ(status(), param.expected_status);
}
Status ExecutorError() { return errors::Internal("executor error"); }
Status PartialRunError() { return errors::Internal("partial run error"); }
INSTANTIATE_TEST_SUITE_P(
PartialRunMgr, StatusPropagationTest,
::testing::Values(
StatusTestParam{absl::OkStatus(), absl::OkStatus(), absl::OkStatus()},
StatusTestParam{ExecutorError(), absl::OkStatus(), ExecutorError()},
StatusTestParam{absl::OkStatus(), PartialRunError(), PartialRunError()},
StatusTestParam{ExecutorError(), PartialRunError(), ExecutorError()}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/partial_run_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/partial_run_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09bea60d-dbf0-4bb3-b457-acc4a46d50db | cpp | tensorflow/tensorflow | collective_rma_distributed | tensorflow/core/distributed_runtime/collective_rma_distributed.cc | tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class RecvBufCall : public CancellableCall {
public:
RecvBufCall(int64_t step_id, const string& peer_device,
const string& peer_task, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
const DeviceAttributes& server_attributes,
CancellationManager* cancel_mgr, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, peer_task, wc) {
req_.set_step_id(step_id);
req_.set_buf_rendezvous_key(key);
*req_.mutable_client_locality() = client_locality;
*req_.mutable_server_locality() = server_attributes.locality();
req_.set_num_bytes(to_tensor->TotalBytes());
req_.set_buf_ptr(reinterpret_cast<int64_t>(DMAHelper::base(to_tensor)));
req_.set_src_device(peer_device);
req_.set_src_incarnation(server_attributes.incarnation());
req_.set_dst_device(to_device->name());
req_.set_request_id(GetUniqueRequestId());
}
~RecvBufCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->RecvBufAsync(&opts_, &req_, &resp_, done);
}
RecvBufRequest req_;
RecvBufResponse resp_;
};
void PopulateTensorFromExtra(const RecvBufRespExtra& extra,
Tensor* cpu_tensor) {
char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor));
for (const auto& tensor_content_chunk : extra.tensor_content()) {
memcpy(head, std::string(tensor_content_chunk).data(),
tensor_content_chunk.size());
head += tensor_content_chunk.size();
}
}
Status PopulateTensorFromResponse(const RecvBufResponse& response,
Tensor* cpu_tensor) {
const bool has_transport_options = response.has_transport_options();
if (!has_transport_options) return absl::OkStatus();
const int64_t total_bytes = cpu_tensor->TotalBytes();
int64_t num_bytes = 0;
RecvBufRespExtra extra;
response.transport_options().UnpackTo(&extra);
for (const auto& chunk : extra.tensor_content()) {
num_bytes += chunk.size();
}
if (num_bytes != total_bytes) {
return errors::Internal("Tensor Size Mismatch: RecvBufResponse returned ",
num_bytes,
" bytes, expected: ", cpu_tensor->TotalBytes());
}
PopulateTensorFromExtra(extra, cpu_tensor);
return absl::OkStatus();
}
}
void CollectiveRemoteAccessDistributed::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality, int dev_to_dev_stream_index,
CancellationManager* cancellation_manager, const StatusCallback& done) {
if (peer_is_local) {
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
cancellation_manager, done);
return;
}
struct State {
DeviceAttributes server_attributes;
std::unique_ptr<RecvBufCall> call;
std::unique_ptr<Tensor> cpu_tensor;
};
State* state = new State;
Status s = dev_resolver_->GetDeviceAttributes(peer_device,
&state->server_attributes);
if (!s.ok()) {
delete state;
done(s);
return;
}
Tensor* dst_tensor = nullptr;
Device* cpu_dev = nullptr;
if (to_device->tensorflow_accelerator_device_info()) {
Status status = dev_mgr_->LookupDevice("CPU:0", &cpu_dev);
if (!status.ok()) {
delete state;
done(s);
return;
}
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(
"CollectiveRemoteAccessDistributed::RecvFromPeer"
"::recv_buf_callback",
step_id_, "dynamic", to_tensor->dtype(),
[to_tensor]() { return to_tensor->shape().DebugString(); });
state->cpu_tensor =
std::make_unique<Tensor>(cpu_dev->GetAllocator(cpu_attr),
to_tensor->dtype(), to_tensor->shape());
dst_tensor = state->cpu_tensor.get();
} else {
dst_tensor = to_tensor;
}
auto recv_buf_callback =
[this, state, to_device, to_alloc_attr, to_device_ctx, to_tensor, cpu_dev,
dev_to_dev_stream_index, dst_tensor, done](const Status& s) {
if (s.ok()) {
Status status =
PopulateTensorFromResponse(state->call->resp_, dst_tensor);
if (!status.ok()) {
done(status);
delete state;
return;
}
if (to_device->tensorflow_accelerator_device_info()) {
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
CopyTensor::ViaDMA("",
nullptr , to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, dst_tensor,
to_tensor, dev_to_dev_stream_index,
[this, state, done](const Status& s) {
delete state;
work_queue_->Schedule([s, done] { done(s); });
});
return;
}
}
delete state;
done(s);
};
state->call = std::make_unique<RecvBufCall>(
step_id_, peer_device, peer_task, key, to_device, to_device_ctx,
to_alloc_attr, dst_tensor, client_locality, state->server_attributes,
cancellation_manager, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [state] { state->call->Cancel(); });
if (already_aborted) {
recv_buf_callback(errors::Cancelled("collective ops already aborted"));
} else {
state->call->Start([this, abortion_token,
done = std::move(recv_buf_callback)](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
done(s);
});
}
}
void CollectiveRemoteAccessDistributed::CheckPeerHealth(
const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) {
if (peer_task == task_name_) {
done(absl::OkStatus());
return;
}
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(peer_task);
if (wi == nullptr) {
done(errors::InvalidArgument(peer_task,
" not found. It's probably invalid. The "
"valid form is /job:xxx/replica:0/task:N"));
return;
}
auto opts = new CallOptions();
opts->SetTimeout(timeout_in_ms);
auto req = new GetStatusRequest();
auto resp = new GetStatusResponse();
wi->GetStatusAsync(
opts, req, resp, true,
[this, opts, req, resp, wi, peer_task, done](Status s) {
std::vector<DeviceAttributes> cached_attrs;
if (s.ok()) {
s = dev_resolver_->GetAllDeviceAttributes(peer_task, &cached_attrs);
}
if (s.ok()) {
absl::flat_hash_set<uint64> remote_incarnations;
for (const DeviceAttributes& da : resp->device_attributes()) {
remote_incarnations.insert(da.incarnation());
}
for (const DeviceAttributes& attr : cached_attrs) {
if (!remote_incarnations.contains(attr.incarnation())) {
s = errors::FailedPrecondition(
attr.name(), " with incarnation ", attr.incarnation(),
" is not available. This usually means ", peer_task,
" has restarted");
break;
}
}
} else if (absl::IsNotFound(s)) {
s = absl::OkStatus();
}
delete opts;
delete req;
delete resp;
worker_cache_->ReleaseWorker(peer_task, wi);
done(s);
});
}
void CollectiveRemoteAccessDistributed::StartAbort(const Status& s) {
CollectiveRemoteAccessLocal::StartAbort(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class FakeAllocator : public Allocator {
public:
string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
static std::unique_ptr<Device> NewDevice(const string& type, const string& name,
Allocator* allocator) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, Allocator* allocator)
: Device(nullptr, attr), allocator_(allocator) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return allocator_; }
private:
Allocator* const allocator_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr, allocator);
}
static int64_t kStepId = 123;
class FakeWorker : public TestWorkerInterface {
public:
FakeWorker(const string& name, DeviceMgr* dev_mgr,
DeviceResolverDistributed* dres, bool is_failed,
bool set_tensor_in_extra)
: name_(name),
device_mgr_(dev_mgr),
device_resolver_(dres),
buf_rendezvous_(kStepId, dev_mgr),
is_failed_(is_failed),
set_tensor_in_extra_(set_tensor_in_extra) {}
BufRendezvous* buf_rendezvous() { return &buf_rendezvous_; }
void GetStatusAsync(CallOptions* opts, const GetStatusRequest* request,
GetStatusResponse* response, bool fail_fast,
StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
std::vector<DeviceAttributes> dev_attr;
device_mgr_->ListDeviceAttributes(&dev_attr);
for (const auto& da : dev_attr) {
*response->add_device_attributes() = da;
}
done(absl::OkStatus());
}
void RecvBufAsync(CallOptions* opts, const RecvBufRequest* request,
RecvBufResponse* response, StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
opts->SetCancelCallback([this]() {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100);
buf_rendezvous_.StartAbort(errors::Internal("Cancelled"));
});
});
VLOG(2) << "ConsumeBuf key=" << request->buf_rendezvous_key()
<< " src_device=" << request->src_device()
<< " src_incarnation=" << request->src_incarnation();
buf_rendezvous_.ConsumeBuf(
request->buf_rendezvous_key(), request->src_device(),
request->src_incarnation(),
[this, opts, request, response, done](const Status& status,
BufRendezvous::Hook* h) {
Status s = status;
if (s.ok()) {
opts->ClearCancelCallback();
int64_t num_bytes = h->prod_value->TotalBytes();
if (set_tensor_in_extra_) {
RecvBufRespExtra extra;
extra.add_tensor_content(string(
reinterpret_cast<const char*>(DMAHelper::base(h->prod_value)),
num_bytes));
response->mutable_transport_options()->PackFrom(extra);
} else {
if (request->num_bytes() != num_bytes) {
s = errors::Internal("Tensor Size Mismatch.");
} else {
memcpy(reinterpret_cast<void*>(request->buf_ptr()),
DMAHelper::base(h->prod_value), num_bytes);
}
}
}
done(s);
if (h) BufRendezvous::DoneWithHook(h);
},
nullptr );
}
private:
string name_;
DeviceMgr* device_mgr_;
DeviceResolverDistributed* device_resolver_;
BufRendezvous buf_rendezvous_;
bool is_failed_;
const bool set_tensor_in_extra_;
};
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
enum TEST_PARAM_DEVICE_TYPE {
TEST_PARAM_DEVICE_TYPE_CPU = 0,
TEST_PARAM_DEVICE_TYPE_GPU,
};
enum TEST_PARAM_TENSOR_LOC {
TEST_PARAM_TENSOR_LOC_AT_BUF_PTR = 0,
TEST_PARAM_TENSOR_LOC_IN_EXTRA,
};
class CollRMADistTest
: public ::testing::TestWithParam<
std::tuple<TEST_PARAM_DEVICE_TYPE, TEST_PARAM_TENSOR_LOC>> {
protected:
CollRMADistTest()
: work_queue_(
std::make_shared<UnboundedWorkQueue>(Env::Default(), "test")) {}
~CollRMADistTest() override {
for (DeviceMgr* dm : device_mgrs_) {
delete dm;
}
for (auto it : dev_resolvers_) {
delete it.second;
}
for (FakeWorker* w : workers_) {
delete w;
}
}
void SetUp() override {
const int num_workers = 2;
const int num_devices = 1;
string device_type = "CPU";
string dev0_worker_name;
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
if (w == 0) {
dev0_worker_name = name;
}
DefineWorker(name, device_type, num_devices);
}
rma_ = std::make_unique<CollectiveRemoteAccessDistributed>(
device_mgrs_[0], dev_resolvers_[dev0_worker_name], work_queue_, &wc_,
kStepId, "/job:worker/replica:0/task:0");
const int kNumElts = 8;
expected_value_ = Tensor(DT_FLOAT, {kNumElts});
to_tensor_ = Tensor(DT_FLOAT, {kNumElts});
large_response_ = Tensor(DT_FLOAT, {2 * kNumElts});
auto exp_alias = expected_value_.flat<float>();
auto to_alias = to_tensor_.flat<float>();
auto large_response_alias = large_response_.flat<float>();
for (int i = 0; i < kNumElts; ++i) {
exp_alias(i) = i;
to_alias(i) = -1;
}
for (int i = 0; i < 2 * kNumElts; ++i) {
large_response_alias(i) = -2;
}
}
void ResolveDeviceAttributes() {
for (auto& dev_resolver_item : dev_resolvers_) {
DeviceResolverDistributed* dev_resolver = dev_resolver_item.second;
for (const auto& item : dev_by_task_) {
TF_CHECK_OK(dev_resolver->UpdateDeviceAttributes(item.second));
}
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i),
&fake_allocator_));
}
DeviceMgr* dev_mgr = new StaticDeviceMgr(std::move(devices));
device_mgrs_.push_back(dev_mgr);
std::vector<DeviceAttributes>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto d : dev_mgr->ListDevices()) {
dv->push_back(d->attributes());
}
DeviceResolverDistributed* dev_res = new DeviceResolverDistributed(dev_mgr);
dev_resolvers_[worker_name] = dev_res;
FakeWorker* fw =
new FakeWorker(worker_name, dev_mgr, dev_res, is_failed,
std::get<TEST_PARAM_TENSOR_LOC>(GetParam()) ==
TEST_PARAM_TENSOR_LOC_IN_EXTRA);
workers_.push_back(fw);
wc_.AddWorker(worker_name, fw);
}
void RestartWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
auto it = dev_resolvers_.find(worker_name);
if (it != dev_resolvers_.end()) {
delete it->second;
dev_resolvers_.erase(it);
}
DefineWorker(worker_name, device_type, num_devices, is_failed);
}
void ValidateResultTensor() {
ASSERT_EQ(expected_value_.NumElements(), to_tensor_.NumElements());
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(expected_value_.flat<float>()(i),
to_tensor_.flat<float>()(i));
}
}
void ValidateResultTensorUnchanged() {
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(-1, to_tensor_.flat<float>()(i));
}
}
void MaybeSetGPUDevice(Device* dst_device) {
if (std::get<TEST_PARAM_DEVICE_TYPE>(GetParam()) ==
TEST_PARAM_DEVICE_TYPE_GPU) {
dst_device->set_tensorflow_accelerator_device_info(
&accelerator_device_info_);
}
}
FakeCache wc_;
CancellationManager cm_;
std::vector<DeviceMgr*> device_mgrs_;
std::unordered_map<string, DeviceResolverDistributed*> dev_resolvers_;
std::unordered_map<string, std::vector<DeviceAttributes>> dev_by_task_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
std::vector<FakeWorker*> workers_;
std::unique_ptr<CollectiveRemoteAccessDistributed> rma_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
CallOptions opts_;
DeviceLocality device_locality_;
AllocatorAttributes alloc_attr_;
FakeAllocator fake_allocator_;
DeviceBase::AcceleratorDeviceInfo accelerator_device_info_;
Tensor expected_value_;
Tensor large_response_;
Tensor to_tensor_;
};
TEST_P(CollRMADistTest, ProdFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstAbort) {
ResolveDeviceAttributes();
Notification consumer_note;
Status consumer_status;
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
rma_->StartAbort(errors::Internal("Deliberate Failure"));
consumer_note.WaitForNotification();
EXPECT_EQ(consumer_status.message(), "Cancelled");
}
TEST_P(CollRMADistTest, ResponseTooLarge) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &large_response_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
EXPECT_THAT(consumer_status.message(),
::testing::HasSubstr("Tensor Size Mismatch"));
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensorUnchanged();
}
TEST_P(CollRMADistTest, WorkerRestart) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string buf_key = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
buf_key, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Notification post_restart_note;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &post_restart_note](const Status& s) {
consumer_status = s;
post_restart_note.Notify();
});
post_restart_note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(consumer_status));
}
TEST_P(CollRMADistTest, CheckHealthOKWithCachedAttr) {
ResolveDeviceAttributes();
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
TF_EXPECT_OK(check_health_status);
}
TEST_P(CollRMADistTest, CheckHealthOKWithoutCachedAttr) {
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(check_health_status.ok());
}
TEST_P(CollRMADistTest, CheckHealthRestarted) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthFailedPeer) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1,
true);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsUnavailable(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthRestartedWithDifferentDevices) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "GPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
INSTANTIATE_TEST_SUITE_P(
TensorInBufPtrOrExtra, CollRMADistTest,
::testing::Combine(::testing::Values(TEST_PARAM_TENSOR_LOC_AT_BUF_PTR,
TEST_PARAM_TENSOR_LOC_IN_EXTRA),
::testing::Values(TEST_PARAM_DEVICE_TYPE_CPU,
TEST_PARAM_DEVICE_TYPE_GPU)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_rma_distributed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_rma_distributed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7e1b2b6-c751-40a9-8248-b14670cbf6d6 | cpp | tensorflow/tensorflow | cluster_function_library_runtime | tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc | tensorflow/core/distributed_runtime/cluster_function_library_runtime_test.cc | #include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include <map>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/distributed_runtime/worker_session.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
namespace tensorflow {
namespace eager {
namespace {
void StripDefaultAttributesInRegisterFunctionOp(
RegisterFunctionOp* register_function) {
StripDefaultAttributes(
*OpRegistry::Global(),
register_function->mutable_function_def()->mutable_node_def());
for (auto& function :
*register_function->mutable_library()->mutable_function()) {
StripDefaultAttributes(*OpRegistry::Global(), function.mutable_node_def());
}
}
}
void EagerClusterFunctionLibraryRuntime::Instantiate(
const string& function_name, const FunctionLibraryDefinition& lib_def,
AttrSlice attrs, const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* handle,
FunctionLibraryRuntime::DoneCallback done) {
auto target = options.target;
auto released_op = std::make_unique<EagerOperation>(ctx_);
Status s =
released_op->Reset(function_name.c_str(), target.c_str(), true, nullptr);
if (!s.ok()) {
done(s);
return;
}
if (!released_op->is_function()) {
done(errors::Internal(function_name, " is not a function."));
return;
}
VLOG(1) << "CFLR::Instantiate: " << function_name << " on " << target
<< " (this: " << this << ")";
core::RefCountPtr<eager::EagerClient> eager_client;
s = ctx_->GetClient(target, &eager_client);
if (!s.ok()) {
done(s);
return;
}
if (eager_client == nullptr) {
done(errors::InvalidArgument("Could not find eager client for target: ",
target));
return;
}
const FunctionLibraryDefinition& func_lib_def =
options.lib_def ? *options.lib_def : lib_def;
auto request = std::make_shared<EnqueueRequest>();
auto response = std::make_shared<EnqueueResponse>();
request->set_context_id(context_id_);
RegisterFunctionOp* register_function =
request->add_queue()->mutable_register_function();
*register_function->mutable_function_def() =
*func_lib_def.Find(function_name);
register_function->set_is_component_function(true);
*register_function->mutable_library() =
func_lib_def.ReachableDefinitions(register_function->function_def())
.ToProto();
StripDefaultAttributesInRegisterFunctionOp(register_function);
const absl::optional<std::vector<int>>& ret_indices = options.ret_indices;
eager_client->EnqueueAsync(
nullptr, request.get(), response.get(),
[this, request, response, handle, released_op = released_op.release(),
target, ret_indices, eager_client = eager_client.get(),
done](const Status& s) {
{
mutex_lock l(mu_);
*handle = function_data_.size();
function_data_.emplace_back(target, ret_indices, eager_client,
absl::WrapUnique(released_op));
}
done(s);
});
}
void EagerClusterFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets, FunctionLibraryRuntime::DoneCallback done) {
std::vector<FunctionArg> function_args;
for (const auto& tensor : args) {
function_args.push_back(tensor);
}
std::vector<FunctionRet>* function_rets = new std::vector<FunctionRet>;
Run(opts, handle, function_args, function_rets,
[rets, function_rets, done = std::move(done)](const Status& s) {
Status status = s;
if (status.ok()) {
for (const auto& t : *function_rets) {
if (t.index() == 0) {
rets->push_back(std::get<Tensor>(t));
} else {
status.Update(
errors::Internal("Expect a Tensor as a remote function "
"output but got a TensorShape."));
break;
}
}
}
delete function_rets;
done(status);
});
}
void EagerClusterFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const FunctionArg> args, std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) {
FunctionData* function_data = nullptr;
{
mutex_lock l(mu_);
DCHECK_LE(handle, function_data_.size());
function_data = &function_data_[handle];
}
EagerClient* eager_client = function_data->eager_client.get();
if (eager_client == nullptr) {
done(errors::Internal("Could not find eager client"));
return;
}
EagerOperation* op = function_data->op.get();
if (!op->Inputs().empty()) {
done(errors::Internal("Inputs should not be set during instantiation."));
return;
}
auto request = std::make_shared<RunComponentFunctionRequest>();
auto response = std::make_shared<RunComponentFunctionResponse>();
request->set_context_id(context_id_);
eager::Operation* remote_op = request->mutable_operation();
if (function_data->ret_indices.has_value()) {
for (const int ret_index : function_data->ret_indices.value()) {
request->add_output_num(ret_index);
}
}
for (const auto& arg : args) {
if (arg.index() == 0) {
std::get<Tensor>(arg).AsProtoTensorContent(
remote_op->add_op_inputs()->mutable_tensor());
} else {
remote_op->add_op_inputs()->mutable_remote_handle()->Swap(
std::get<RemoteTensorHandle*>(arg));
}
}
if (opts.op_id.has_value()) {
remote_op->set_id(opts.op_id.value());
} else {
remote_op->set_id(kInvalidOpId);
}
remote_op->set_is_function(true);
remote_op->set_is_component_function(true);
remote_op->set_func_step_id(opts.step_id);
remote_op->set_name(op->Name());
op->Attrs().FillAttrValueMap(remote_op->mutable_attrs());
remote_op->set_device(function_data->target);
CancellationManager* cm = opts.cancellation_manager;
CancellationToken token = 0;
auto call_opts = std::make_shared<CallOptions>();
call_opts->SetTimeout(
ctx_->session_options().config.operation_timeout_in_ms());
if (cm != nullptr) {
token = cm->get_cancellation_token();
const bool already_cancelled = !cm->RegisterCallback(
token,
[call_opts, request, response, done]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(errors::Cancelled("EagerClusterFunctionLibraryRuntime::Run"));
return;
}
}
eager_client->RunComponentFunctionAsync(
call_opts.get(), request.get(), response.get(),
[request, response, rets, call_opts, cm, token,
done = std::move(done)](const Status& s) {
if (cm != nullptr) {
cm->TryDeregisterCallback(token);
}
if (!s.ok()) {
done(s);
return;
}
if (!response->shape().empty() && !response->tensor().empty()) {
done(errors::Internal(
"Both shape and tensor are specified in the same response"));
return;
}
for (const auto& shape : response->shape()) {
rets->push_back(shape);
}
for (const auto& tensor_proto : response->tensor()) {
Tensor t;
if (t.FromProto(tensor_proto)) {
rets->push_back(std::move(t));
} else {
done(errors::Internal("Could not convert tensor proto: ",
tensor_proto.DebugString()));
return;
}
}
done(absl::OkStatus());
});
}
void EagerClusterFunctionLibraryRuntime::CleanUp(
uint64 step_id, FunctionLibraryRuntime::LocalHandle handle,
FunctionLibraryRuntime::DoneCallback done) {
FunctionData* function_data = nullptr;
{
mutex_lock l(mu_);
DCHECK_LE(handle, function_data_.size());
function_data = &function_data_[handle];
}
EagerClient* eager_client = function_data->eager_client.get();
if (eager_client == nullptr) {
done(errors::Internal("Could not find eager client"));
return;
}
auto request = std::make_shared<EnqueueRequest>();
auto response = std::make_shared<EnqueueResponse>();
request->set_context_id(context_id_);
CleanupFunctionOp* cleanup_function =
request->add_queue()->mutable_cleanup_function();
cleanup_function->set_step_id(step_id);
eager_client->EnqueueAsync(
nullptr, request.get(), response.get(),
[request, response, done](const Status& status) { done(status); });
}
DistributedFunctionLibraryRuntime* CreateClusterFLR(
const uint64 context_id, EagerContext* ctx, WorkerSession* worker_session) {
return new EagerClusterFunctionLibraryRuntime(
context_id, ctx, worker_session->remote_device_mgr());
}
}
} | #include "tensorflow/core/distributed_runtime/cluster_function_library_runtime.h"
#include <map>
#include <memory>
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_session.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
class ClusterFunctionLibraryRuntimeTest : public ::testing::Test {
public:
ClusterFunctionLibraryRuntimeTest() {
SessionOptions options;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(
test::TestClusterConfig().Options(options).Jobs(
{test::TestJob{"localhost", 2}}),
&cluster_));
GrpcChannelSpec spec;
std::map<int, string> host_ports;
int i = 0;
for (const auto& target : cluster_->targets("localhost")) {
host_ports[i++] = target;
}
TF_CHECK_OK(spec.AddHostPortsJob("localhost", host_ports));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<WorkerCacheInterface> worker_cache(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
worker_session_ = std::make_unique<WorkerSession>(
"cluster_test_session", "/job:localhost/replica:0/task:0",
std::move(worker_cache), std::unique_ptr<DeviceMgr>(),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; });
cluster_flr_ = std::make_unique<ClusterFunctionLibraryRuntime>(
worker_session_.get(), true, nullptr);
}
Status ConstructFunctionGraphHelper(
const OpDef& sig, test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const FunctionLibraryDefinition& lib_def, GraphDef* g,
std::vector<string>* send_keys, std::vector<string>* recv_keys) {
return ClusterFunctionLibraryRuntime::ConstructFunctionGraph(
sig, attrs, options, lib_def, g, send_keys, recv_keys);
}
void Instantiate(const string& function_name,
const FunctionLibraryDefinition& lib_def,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* local_handle,
FunctionLibraryRuntime::DoneCallback done) {
cluster_flr_->Instantiate(function_name, lib_def, attrs, options,
local_handle, done);
}
Status InstantiateAndRun(
const string& function_name, const FunctionLibraryDefinition& lib_def,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const std::vector<Tensor>& args, std::vector<Tensor*> rets) {
FunctionLibraryRuntime::LocalHandle handle;
Status status;
Notification instantiate_done;
cluster_flr_->Instantiate(function_name, lib_def, attrs, options, &handle,
[&status, &instantiate_done](const Status& s) {
status = s;
instantiate_done.Notify();
});
instantiate_done.WaitForNotification();
if (!status.ok()) {
return status;
}
Notification done;
FunctionLibraryRuntime::Options opts;
std::vector<Tensor> out;
cluster_flr_->Run(opts, handle, args, &out,
[&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
return absl::OkStatus();
}
protected:
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<WorkerSession> worker_session_;
std::unique_ptr<ClusterFunctionLibraryRuntime> cluster_flr_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
};
TEST_F(ClusterFunctionLibraryRuntimeTest, ConstructFunctionGraph) {
GraphDef actual;
std::vector<string> send_keys, recv_keys;
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(ConstructFunctionGraphHelper(
test::function::Swap().signature(), {{"T", DT_FLOAT}}, instantiate_opts,
lib_def, &actual, &send_keys, &recv_keys));
GraphDef expected;
protobuf::TextFormat::ParseFromString(R"(
node {
name: "_recv_i0_0"
op: "_Recv"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "i0"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
node {
name: "_recv_i1_1"
op: "_Recv"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "i1"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/input/_0"
op: "Identity"
input: "_recv_i0_0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/input/_1"
op: "Identity"
input: "_recv_i1_1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Swap/o0"
op: "Identity"
input: "Func/Swap/input/_1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Swap/o1"
op: "Identity"
input: "Func/Swap/input/_0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/output/_2"
op: "Identity"
input: "Swap/o0"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Func/Swap/output/_3"
op: "Identity"
input: "Swap/o1"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "_send_o0_0"
op: "_Send"
input: "Func/Swap/output/_2"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "o0"
}
}
}
node {
name: "_send_o1_1"
op: "_Send"
input: "Func/Swap/output/_3"
device: "/job:a/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "client_terminated"
value {
b: true
}
}
attr {
key: "recv_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:a/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 1
}
}
attr {
key: "tensor_name"
value {
s: "o1"
}
}
}
)",
&expected);
TF_EXPECT_GRAPH_EQ(expected, actual);
}
TEST_F(ClusterFunctionLibraryRuntimeTest, DISABLED_InstantiateAndRun) {
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::XTimesTwoInt32();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:localhost/replica:0/task:1/cpu:0";
Tensor y;
auto x = test::AsTensor<int32>({1, 2, 3, 4});
TF_EXPECT_OK(InstantiateAndRun("XTimesTwoInt32", lib_def, {},
instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({2, 4, 6, 8}));
}
TEST_F(ClusterFunctionLibraryRuntimeTest,
DISABLED_InstantiateAndRunAttrSubstitution) {
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition lib_def(OpRegistry::Global(), proto);
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:localhost/replica:0/task:1/cpu:0";
Tensor y1, y2;
auto x1 = test::AsTensor<float>({1, 2, 3, 4});
auto x2 = test::AsTensor<float>({4, 3, 2, 1});
TF_EXPECT_OK(InstantiateAndRun("Swap", lib_def, {{"T", DT_FLOAT}},
instantiate_opts, {x1, x2}, {&y1, &y2}));
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({4, 3, 2, 1}));
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({1, 2, 3, 4}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/cluster_function_library_runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e44108f-0ab0-4cba-9936-a5afa356d05c | cpp | tensorflow/tensorflow | collective_param_resolver_distributed | tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc | tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/strings/escaping.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
class CompleteGroupCall : public CancellableCall {
public:
CompleteGroupCall(const CollGroupParams& group,
const DeviceAttributes& device,
CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_device_type(group.device_type.type_string());
*req_.mutable_device_attributes() = device;
}
~CompleteGroupCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteGroupAsync(&opts_, &req_, &resp_, done);
}
CompleteGroupRequest req_;
CompleteGroupResponse resp_;
};
class CompleteInstanceCall : public CancellableCall {
public:
CompleteInstanceCall(const CollGroupParams& group,
const CollInstanceParams& instance,
const string& node_name, const string& device_name,
bool is_source, CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_name(node_name);
req_.set_type(instance.type);
req_.set_step_id(instance.step_id);
req_.set_data_type(instance.data_type);
instance.shape.AsProto(req_.mutable_shape());
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_instance_key(instance.instance_key);
req_.set_device_type(group.device_type.type_string());
for (int32_t offset : instance.impl_details.subdiv_offsets) {
req_.add_subdiv_offset(offset);
}
req_.set_device(device_name);
req_.set_is_source(is_source);
}
~CompleteInstanceCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteInstanceAsync(&opts_, &req_, &resp_, done);
}
CompleteInstanceRequest req_;
CompleteInstanceResponse resp_;
};
}
CollectiveParamResolverDistributed::CollectiveParamResolverDistributed(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverDistributed* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveParamResolverLocal(config, dev_mgr, dev_resolver,
nccl_communicator, task_name),
worker_cache_(worker_cache),
group_leader_(task_name == config.experimental().collective_group_leader()
? ""
: config.experimental().collective_group_leader()) {
VLOG(1) << "CompleteParamResolverDistributed ctor task={" << task_name
<< "} config.collective_group_leader={"
<< config.experimental().collective_group_leader() << "}"
<< " config.collective_nccl={"
<< config.experimental().collective_nccl() << "}";
}
void CollectiveParamResolverDistributed::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams distributed " << device.name() << " for " << cp
<< ": " << cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupDistributed(
device, &cp->group, cancel_mgr,
[this, device, cp, cancel_mgr, done](Status s) {
if (s.ok()) {
std::vector<DeviceAttributes> devices;
devices.reserve(cp->group.group_size);
for (const CollGroupMember& m : cp->group.members) {
devices.push_back(m.device);
}
s = dev_resolver_->UpdateDeviceAttributes(devices);
}
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
});
} else {
auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverDistributed::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupDistributed(device, group_params, cancel_mgr, done);
}
void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
GroupRec* gr = GetCachedGroup(request->group_key());
if (gr == nullptr) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" not found. This normally means the server has restarted"));
return;
}
CollectiveParams* cp = new CollectiveParams;
{
mutex_lock l(gr->mu);
if (!gr->status.ok()) {
done(gr->status);
return;
} else if (gr->group.members.size() != gr->group.group_size) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" failed to resolve. This normally means the server has restarted"));
return;
}
cp->group = gr->group;
}
cp->name = request->name();
cp->instance.type = CollectiveType(request->type());
cp->instance.instance_key = request->instance_key();
cp->instance.step_id = request->step_id();
cp->instance.data_type = request->data_type();
cp->instance.shape = TensorShape(request->shape());
cp->is_source = request->is_source();
for (int32_t offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
StatusCallback done_and_cleanup = [cp, done](const Status& s) {
done(s);
cp->Unref();
};
CompleteInstanceDistributed(
request->device(), cp, cancel_mgr,
[this, cp, response, done_and_cleanup](Status status) {
if (status.ok()) {
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
{
mutex_lock l(ir->mu);
status = ir->status;
if (ir->status.ok()) {
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
}
}
}
done_and_cleanup(status);
});
}
CollectiveParamResolverDistributed::GroupRec*
CollectiveParamResolverDistributed::GetCachedGroup(int32_t group_key) {
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return nullptr;
}
return it->second.get();
}
Status CollectiveParamResolverDistributed::UpdateGroupCache(
const CompleteGroupResponse& resp) {
std::unique_ptr<GroupRec> gr(new GroupRec);
{
mutex_lock grl(gr->mu);
gr->group.device_type = DeviceType(resp.device_type());
gr->group.group_key = resp.group_key();
gr->group.group_size = resp.group_size();
gr->group.num_tasks = resp.num_tasks();
if (resp.device_attributes().empty()) {
return errors::Internal(
"CompleteGroupResponse device_attributes is empty. Make sure you're "
"running the same version of Tensorflow on all workers.");
}
if (resp.device_attributes_size() != gr->group.group_size) {
return errors::Internal(
"CompleteGroupResponse group_size doesn't match device_name list");
}
gr->group.members.reserve(resp.device_attributes().size());
for (const DeviceAttributes& device : resp.device_attributes()) {
CollGroupMember member;
member.device = device;
gr->group.members.push_back(std::move(member));
gr->incarnations_by_device_name[device.name()] = device.incarnation();
}
gr->group.runtime_details.communicator_key = resp.communicator_key();
FinishGroup(gr.get());
}
GroupRec* previous_gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(resp.group_key());
if (it == group_table_.end()) {
VLOG(2) << "UpdateGroupCache: communicator_key="
<< absl::CEscape(resp.communicator_key());
group_table_[gr->group.group_key] = std::move(gr);
} else {
previous_gr = it->second.get();
}
}
if (previous_gr != nullptr) {
mutex_lock grl(previous_gr->mu);
if (previous_gr->group.runtime_details.communicator_key !=
resp.communicator_key()) {
return errors::Internal(
"UpdateGroupCache: CompleteGroupResponse for group ",
resp.group_key(),
" gives communicator_key=", absl::CEscape(resp.communicator_key()),
" but cache already holds communicator_key=",
absl::CEscape(previous_gr->group.runtime_details.communicator_key));
}
}
return absl::OkStatus();
}
void CollectiveParamResolverDistributed::CompleteGroupDistributed(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteGroupDistributed group_key=" << group_params->group_key
<< " dev: " << device.name()
<< " is_leader=" << (group_leader_.empty());
if (group_leader_.empty()) {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else if (GetCachedGroup(group_params->group_key) == nullptr) {
CompleteGroupCall* call = new CompleteGroupCall(
*group_params, device, cancel_mgr, group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, group_params, call, cancel_mgr, abortion_token,
done](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
Status status = UpdateGroupCache(call->resp_);
if (status.ok()) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else {
done(status);
}
} else {
done(s);
}
delete call;
});
return;
} else {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
}
bool CollectiveParamResolverDistributed::InstanceIsCached(
int32_t group_key, const CollInstanceParams& instance) {
mutex_lock l(instance_mu_);
auto group_it = instance_table_.find(group_key);
if (group_it == instance_table_.end()) {
return false;
}
auto instance_it =
group_it->second.find({instance.step_id, instance.instance_key});
return instance_it != group_it->second.end();
}
Status CollectiveParamResolverDistributed::UpdateInstanceCache(
CollectiveParams* cp, const CompleteInstanceResponse& resp) {
int32_t source_rank = resp.source_rank();
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
return ir->status;
}
if (ir->source_rank != source_rank) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal(
"UpdateInstanceCache: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " gives source_rank=", source_rank,
" but cache already holds value=", ir->source_rank);
return ir->status;
}
ir->source_rank = source_rank;
}
if (ir->known_count < cp->group.group_size) {
ir->known_count = cp->group.group_size;
const int ir_known_size = ir->known.size();
if (ir_known_size != cp->group.group_size) {
ir->status = errors::Internal(
"UpdateInstanceCache:: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " has known.size()=", ir->known.size(),
" < group_size=", cp->group.group_size);
return ir->status;
}
for (int i = 0; i < ir_known_size; ++i) {
ir->known[i] = true;
}
}
return ir->status;
}
void CollectiveParamResolverDistributed::CompleteInstanceDistributed(
const string& device, CollectiveParams* cp, CancellationManager* cancel_mgr,
const StatusCallback& done) {
if (group_leader_.empty()) {
return CompleteInstanceLocal(device, cp, done);
} else if (InstanceIsCached(cp->group.group_key, cp->instance)) {
return CompleteInstanceLocal(device, cp, done);
} else {
CompleteInstanceCall* call = new CompleteInstanceCall(
cp->group, cp->instance, cp->name, device, cp->is_source, cancel_mgr,
group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, cp, call, abortion_token, done](Status s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
s = UpdateInstanceCache(cp, call->resp_);
}
if (s.ok()) {
CompleteInstanceLocal(device, cp, done);
} else {
done(s);
}
delete call;
});
return;
}
}
void CollectiveParamResolverDistributed::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverDistributed already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
static std::unique_ptr<Device> NewDevice(const string& type,
const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
class FakeNcclCommunicator : public NcclCommunicatorInterface {
public:
string GenerateCommunicatorKey() override { return "mock-communicator-key"; }
void Enqueue(std::shared_ptr<CollectiveContext> col_ctx,
StatusCallback done) override {
done(absl::OkStatus());
}
void StartAbort(const Status& s) override {}
};
class DeviceResDistTest : public ::testing::Test {
public:
~DeviceResDistTest() override {
for (auto& name_param : cp_) {
name_param.second->Unref();
}
}
protected:
void DefineWorkers(int num_workers, int num_devices,
const string& device_type, bool nccl) {
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
DefineWorker(name, device_type, num_devices, nccl);
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool nccl) {
ConfigProto config;
config.mutable_experimental()->set_collective_group_leader(
"/job:worker/replica:0/task:0");
config.mutable_experimental()->set_collective_nccl(nccl);
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i)));
}
device_mgrs_[worker_name] =
std::make_unique<StaticDeviceMgr>(std::move(devices));
std::vector<string>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto* d : device_mgrs_[worker_name]->ListDevices()) {
dv->push_back(d->name());
}
dev_resolvers_[worker_name] = std::make_unique<DeviceResolverDistributed>(
device_mgrs_[worker_name].get());
cp_resolvers_[worker_name] =
std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgrs_[worker_name].get(),
dev_resolvers_[worker_name].get(), &nccl_communicator_, &wc_,
worker_name);
auto worker_env = std::make_unique<WorkerEnv>();
worker_env->env = Env::Default();
worker_env->device_mgr = device_mgrs_[worker_name].get();
worker_env->collective_executor_mgr =
std::make_unique<TestCollectiveExecutorMgr>(
cp_resolvers_[worker_name].get(), nullptr);
workers_[worker_name] = std::make_unique<Worker>(worker_env.get());
worker_envs_[worker_name] = std::move(worker_env);
wc_.AddWorker(worker_name, workers_[worker_name].get());
}
void DefineCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
int source_rank = 0) {
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
int idx = wi * num_devices + di;
string device_name =
strings::StrCat(task_name, "/device:", device_type, ":", di);
cp_[device_name] =
CreateCollectiveParams(num_workers, num_devices, device_type,
coll_type, idx == source_rank);
}
}
}
CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type,
bool is_source) {
const int kGroupKey = 5;
const int kInstanceKey = 3;
auto* cp = new CollectiveParams();
cp->is_source = is_source;
cp->group.group_key = kGroupKey;
cp->group.group_size = num_workers * num_devices;
cp->group.device_type = DeviceType(device_type);
cp->group.num_tasks = num_workers;
cp->instance.instance_key = kInstanceKey;
cp->instance.type = coll_type;
cp->instance.data_type = DT_FLOAT;
cp->instance.shape = TensorShape({64});
cp->instance.impl_details.subdiv_offsets.push_back(0);
return cp;
}
void IssueRequests(int num_workers, int num_devices) {
{
mutex_lock l(mu_);
num_done_ = 0;
}
int group_size = num_workers * num_devices;
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
IssueRequest(task_name, device_name, group_size);
}
}
}
void IssueRequest(const string& task_name, const string& device_name,
int group_size) {
Device* device = nullptr;
TF_CHECK_OK(device_mgrs_[task_name]->LookupDevice(device_name, &device));
CollectiveParams* cp = cp_[device_name];
CollectiveParamResolverDistributed* cp_res = cp_resolvers_[task_name].get();
CHECK(cp_res);
cp_res->CompleteParamsAsync(
device->attributes(), cp, &cm_,
[this, device_name, group_size](const Status& s) {
status_[device_name] = s;
{
mutex_lock l(mu_);
++num_done_;
if (num_done_ == group_size) {
done_.notify_all();
}
}
});
}
void ValidateCollectiveParams(int num_workers, int num_devices) {
int device_count = num_workers * num_devices;
{
mutex_lock l(mu_);
if (num_done_ < device_count) {
done_.wait(l);
}
}
const int dev_count = num_workers * num_devices;
string dev0 = "/job:worker/replica:0/task:0/device:CPU:0";
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
int idx = wi * num_devices + di;
TF_ASSERT_OK(status_[device_name]);
EXPECT_EQ(cp_[device_name]->default_rank, idx);
EXPECT_EQ(cp_[device_name]->group.members.size(), dev_count);
EXPECT_EQ(cp_[device_name]->group.members[idx].device.name(),
device_name);
EXPECT_EQ(cp_[device_name]->group.members[idx].task, task_name);
ValidateDeviceResolver(*cp_[device_name], task_name);
if (idx > 0) {
EXPECT_EQ(cp_[dev0]->group.runtime_details.communicator_key,
cp_[device_name]->group.runtime_details.communicator_key);
for (int i = 0; i < dev_count; ++i) {
EXPECT_EQ(cp_[dev0]->group.members[i].device.name(),
cp_[device_name]->group.members[i].device.name());
EXPECT_EQ(cp_[dev0]->group.members[i].task,
cp_[device_name]->group.members[i].task);
}
}
}
}
}
void ValidateDeviceResolver(const CollectiveParams& cp, const string& task) {
for (const CollGroupMember& member : cp.group.members) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolvers_[task]->GetDeviceAttributes(
member.device.name(), &attributes));
}
}
void RestartWorker(int worker_idx, int num_workers, int num_devices,
const string& device_type, bool nccl,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
bool is_source = false) {
string worker_name =
strings::StrCat("/job:worker/replica:0/task:", worker_idx);
DefineWorker(worker_name, device_type, num_devices, nccl);
for (int i = 0; i < num_devices; ++i) {
string device_name =
strings::StrCat(worker_name, "/device:", device_type, ":", i);
if (cp_.find(device_name) != cp_.end()) {
cp_[device_name]->Unref();
}
cp_[device_name] = CreateCollectiveParams(
num_workers, num_devices, device_type, coll_type, is_source);
status_.erase(device_name);
}
}
FakeCache wc_;
FakeNcclCommunicator nccl_communicator_;
CancellationManager cm_;
absl::flat_hash_map<string, std::unique_ptr<DeviceMgr>> device_mgrs_;
absl::flat_hash_map<string, std::unique_ptr<DeviceResolverDistributed>>
dev_resolvers_;
absl::flat_hash_map<string,
std::unique_ptr<CollectiveParamResolverDistributed>>
cp_resolvers_;
absl::flat_hash_map<string, std::vector<string>> dev_by_task_;
absl::flat_hash_map<string, std::unique_ptr<WorkerEnv>> worker_envs_;
absl::flat_hash_map<string, std::unique_ptr<Worker>> workers_;
absl::flat_hash_map<string, CollectiveParams*> cp_;
absl::flat_hash_map<string, Status> status_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
};
TEST_F(DeviceResDistTest, Workers1Devices1) {
const int num_workers = 1;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers2Devices2) {
const int num_workers = 2;
const int num_devices = 2;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, DifferentIncarnation) {
const int num_workers = 2;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
RestartWorker(1, num_workers, num_devices, "CPU", false);
const string task_name = "/job:worker/replica:0/task:1";
const string device_name = absl::StrCat(task_name, "/device:CPU:0");
IssueRequest(task_name, device_name, num_workers * num_devices);
EXPECT_TRUE(errors::IsFailedPrecondition(status_[device_name]));
}
TEST_F(DeviceResDistTest, BroadcastSourceRank0) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 0;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, BroadcastSourceRank3) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 3;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers4Devices3) {
const int num_workers = 4;
const int num_devices = 3;
DefineWorkers(num_workers, num_devices, "CPU", true);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79cf9640-53d5-4de9-930d-2b402a98d224 | cpp | tensorflow/tensorflow | rpc_collective_executor_mgr | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc | tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc | #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
RpcCollectiveExecutorMgr::RpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverDistributed> dev_resolver,
std::unique_ptr<CollectiveParamResolverDistributed> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveExecutorMgr(config, dev_mgr, std::move(dev_resolver),
std::move(param_resolver),
std::move(nccl_communicator)),
worker_cache_(worker_cache),
task_name_(task_name) {
group_leader_ = (task_name == config.experimental().collective_group_leader())
? ""
: config.experimental().collective_group_leader();
}
RpcCollectiveExecutorMgr::~RpcCollectiveExecutorMgr() {
for (auto it : sequence_table_) {
delete it.second;
}
}
CollectiveExecutor* RpcCollectiveExecutorMgr::Create(int64_t step_id) {
CollectiveRemoteAccessDistributed* rma =
new CollectiveRemoteAccessDistributed(dev_mgr_, dev_resolver_.get(),
work_queue_, worker_cache_, step_id,
task_name_);
return new BaseCollectiveExecutor(this, rma, step_id, dev_mgr_, work_queue_);
}
namespace {
static const int64_t kStepIdMask = (((1uLL << 56) - 1) | (1uLL << 56));
int64_t NewRandomStepId() {
int64_t step_id = random::New64();
step_id &= kStepIdMask;
return step_id;
}
}
void RpcCollectiveExecutorMgr::RefreshStepIdSequenceAsync(
int64_t graph_key, const StatusCallback& done) {
if (group_leader_.empty()) {
mutex_lock l(sequence_mu_);
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(graph_key);
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = NewRandomStepId();
done(absl::OkStatus());
} else {
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(group_leader_);
GetStepSequenceRequest* req = new GetStepSequenceRequest;
GetStepSequenceResponse* resp = new GetStepSequenceResponse;
req->add_graph_key(graph_key);
wi->GetStepSequenceAsync(
req, resp, [this, req, resp, done](const Status& s) {
if (!s.ok()) {
LOG(ERROR) << "Bad response [" << s
<< "] from GetStepSequenceAsync call to "
<< group_leader_;
done(s);
} else {
done(UpdateStepSequences(*resp));
}
delete req;
delete resp;
});
}
}
void RpcCollectiveExecutorMgr::GetStepSequenceAsync(
const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
const StatusCallback& done) {
if (!group_leader_.empty()) {
LOG(ERROR) << "GetStepSequence called at non-group-leader";
done(errors::Internal("GetStepSequenceAsync called at non-group-leader"));
} else {
mutex_lock l(sequence_mu_);
for (int64_t graph_key : request->graph_key()) {
auto it = sequence_table_.find(graph_key);
GraphKeySequence* gks = nullptr;
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(graph_key);
gks->next_step_id_ = NewRandomStepId();
sequence_table_[graph_key] = gks;
} else {
gks = it->second;
}
StepSequence* ss = response->add_step_sequence();
ss->set_graph_key(graph_key);
ss->set_next_step_id(gks->next_step_id_);
}
done(absl::OkStatus());
}
}
Status RpcCollectiveExecutorMgr::UpdateStepSequences(
const GetStepSequenceResponse& resp) {
mutex_lock l(sequence_mu_);
for (const StepSequence& ss : resp.step_sequence()) {
GraphKeySequence* gks = nullptr;
auto it = sequence_table_.find(ss.graph_key());
if (it == sequence_table_.end()) {
gks = new GraphKeySequence(ss.graph_key());
sequence_table_[ss.graph_key()] = gks;
} else {
gks = it->second;
}
gks->next_step_id_ = ss.next_step_id();
}
return absl::OkStatus();
}
int64_t RpcCollectiveExecutorMgr::NextStepId(int64_t graph_key) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
return it->second->next_step_id_;
}
return CollectiveExecutor::kInvalidId;
}
void RpcCollectiveExecutorMgr::RetireStepId(int64_t graph_key,
int64_t step_id) {
mutex_lock l(sequence_mu_);
auto it = sequence_table_.find(graph_key);
if (it != sequence_table_.end()) {
if (step_id == it->second->next_step_id_) {
it->second->next_step_id_ = (it->second->next_step_id_ + 1) & kStepIdMask;
} else {
it->second->next_step_id_ = CollectiveExecutor::kInvalidId;
}
} else {
LOG(ERROR) << "Failed to find graph_key " << graph_key << " to retire.";
}
}
std::unique_ptr<RpcCollectiveExecutorMgr> CreateProdRpcCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator,
WorkerCacheInterface* worker_cache, const string& default_worker_name) {
auto dev_resolver = std::make_unique<DeviceResolverDistributed>(device_mgr);
auto param_resolver = std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgr, dev_resolver.get(), nccl_communicator.get(),
worker_cache, default_worker_name);
return std::make_unique<RpcCollectiveExecutorMgr>(
config, device_mgr, std::move(dev_resolver), std::move(param_resolver),
std::move(nccl_communicator), worker_cache, default_worker_name);
}
} | #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include <stdlib.h>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class RpcCollectiveExecutorMgrTest : public ::testing::Test {
protected:
RpcCollectiveExecutorMgrTest() {
string task_name = "/job:localhost/replica:0/task:0";
SessionOptions options;
options.config.mutable_experimental()->set_collective_group_leader(
task_name);
WorkerCacheInterface* worker_cache = nullptr;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
std::unique_ptr<DeviceResolverDistributed> dr(
new DeviceResolverDistributed(device_mgr_.get()));
std::unique_ptr<CollectiveParamResolverDistributed> cpr(
new CollectiveParamResolverDistributed(
options.config, device_mgr_.get(), dr.get(),
nullptr, worker_cache, task_name));
cme_.reset(new RpcCollectiveExecutorMgr(
options.config, device_mgr_.get(), std::move(dr), std::move(cpr),
MaybeCreateNcclCommunicator(options.config), worker_cache, task_name));
}
std::unique_ptr<RpcCollectiveExecutorMgr> cme_;
std::unique_ptr<DeviceMgr> device_mgr_;
};
TEST_F(RpcCollectiveExecutorMgrTest, FindOrCreate) {
CollectiveExecutor::Handle* h =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_TRUE(h->get());
CollectiveExecutor::Handle* h2 =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_EQ(h->get(), h2->get());
CollectiveExecutor* ce = h->get();
delete h;
delete h2;
CollectiveExecutor* ce2 = cme_->FindOrCreate(1);
EXPECT_EQ(ce, ce2);
ce2->Unref();
cme_->Cleanup(1);
}
TEST_F(RpcCollectiveExecutorMgrTest, NextStepId) {
int64_t x = cme_->NextStepId(7);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
EXPECT_TRUE(status.ok());
}
x = cme_->NextStepId(7);
EXPECT_NE(x, CollectiveExecutor::kInvalidId);
EXPECT_EQ(x, cme_->NextStepId(7));
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(6, x);
EXPECT_EQ(x, cme_->NextStepId(7));
cme_->RetireStepId(7, x);
int64_t y = cme_->NextStepId(7);
EXPECT_EQ((x + 1) & (((1uLL << 56) - 1) | (1uLL << 56)), y);
{
Notification note;
Status status;
cme_->RefreshStepIdSequenceAsync(7,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
int64_t z = cme_->NextStepId(7);
EXPECT_NE(y, z);
EXPECT_GT(llabs(y - z), 3);
}
TEST_F(RpcCollectiveExecutorMgrTest, GetStepSequence) {
int64_t x = cme_->NextStepId(3);
EXPECT_EQ(x, CollectiveExecutor::kInvalidId);
int64_t y = cme_->NextStepId(4);
EXPECT_EQ(y, CollectiveExecutor::kInvalidId);
GetStepSequenceRequest request;
GetStepSequenceResponse response;
request.add_graph_key(3);
request.add_graph_key(4);
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
std::unordered_map<int64_t, int64_t> values;
for (const auto& ss : response.step_sequence()) {
values[ss.graph_key()] = ss.next_step_id();
}
EXPECT_NE(values[3], CollectiveExecutor::kInvalidId);
EXPECT_NE(values[4], CollectiveExecutor::kInvalidId);
response.Clear();
{
Notification note;
Status status;
cme_->GetStepSequenceAsync(&request, &response,
[this, &status, ¬e](const Status& s) {
status = s;
note.Notify();
});
note.WaitForNotification();
EXPECT_TRUE(status.ok());
}
ASSERT_EQ(2, response.step_sequence_size());
for (const auto& ss : response.step_sequence()) {
EXPECT_EQ(values[ss.graph_key()], ss.next_step_id());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc_collective_executor_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3afdaf8b-caec-4d41-8aa6-2983d81f5758 | cpp | tensorflow/tensorflow | coordination_service_barrier_proxy | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.cc | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy_test.cc | #include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
namespace tensorflow {
std::pair<Status, bool> BarrierProxy::Wait() {
mutex_lock l(mu_);
if (status_set_) {
return std::make_pair(
absl::FailedPreconditionError(absl::StrCat(
"The barrier has already passed or timed out. key=", key_)),
false);
}
if (num_entered_ >= num_local_threads_) {
return std::make_pair(absl::FailedPreconditionError(absl::StrCat(
"Wait() called too many (>", num_local_threads_,
") times. key=", key_)),
false);
}
++num_entered_;
++num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit_=" << num_to_exit_;
if (num_entered_ == num_local_threads_) {
if (tasks_.size() != 1) {
tsl::profiler::TraceMe traceme("BarrierProxy::Wait::WaitAtBarrier");
status_ = agent_->WaitAtBarrier(key_, timeout_, tasks_);
} else {
status_ = absl::OkStatus();
}
status_set_ = true;
cv_.notify_all();
} else if (WaitForMilliseconds(&l, &cv_, timeout_ / absl::Milliseconds(1)) ==
kCond_Timeout) {
if (!status_set_) {
if (tasks_.size() != 1) {
agent_->CancelBarrier(key_).IgnoreError();
}
status_ = absl::DeadlineExceededError(
absl::StrCat("BarrierProxy timeout: key=", key_));
status_set_ = true;
cv_.notify_all();
}
} else {
CHECK(status_set_);
}
--num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit=" << num_to_exit_;
return std::make_pair(status_, num_to_exit_ == 0);
}
size_t BarrierProxyManager::size() const {
mutex_lock l(mu_);
return barriers_.size();
}
Status BarrierProxyManager::Wait(tsl::CoordinationServiceAgent* agent,
const std::vector<CoordinatedTask>& tasks,
int num_local_threads, absl::string_view key,
absl::Duration timeout) {
if (tasks.size() == 1 && num_local_threads <= 1) return absl::OkStatus();
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"BarrierProxyManager::Wait",
{
{"num_tasks", tasks.size()},
{"num_local_threads", num_local_threads},
});
});
std::shared_ptr<BarrierProxy> barrier;
{
mutex_lock l(mu_);
auto [iter, inserted] = barriers_.try_emplace(key);
if (inserted) {
iter->second = std::make_shared<BarrierProxy>(
agent, tasks, num_local_threads, key, timeout);
VLOG(1) << "BarrierProxy key=" << key << " created.";
}
barrier = iter->second;
}
CHECK(barrier);
auto [status, last_exit] = barrier->Wait();
if (last_exit) {
mutex_lock l(mu_);
barriers_.erase(key);
VLOG(1) << "BarrierProxy key=" << key << " removed.";
}
return status;
}
} | #include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::Return;
using tsl::CallOptions;
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
class MockCoordinationServiceAgent : public CoordinationServiceAgent {
public:
MOCK_METHOD(Status, WaitAtBarrier,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks),
(override));
MOCK_METHOD(Status, CancelBarrier, (std::string_view barrier_id), (override));
MOCK_METHOD(Status, Initialize,
(Env * env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(Status, Initialize,
(Env * env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(bool, IsInitialized, (), (override));
MOCK_METHOD(bool, IsConnected, (), (override));
MOCK_METHOD(bool, IsError, (), (override));
MOCK_METHOD(Status, Connect, (), (override));
MOCK_METHOD(Status, WaitForAllTasks, (const DeviceInfo& local_devices),
(override));
MOCK_METHOD(const DeviceInfo&, GetClusterDeviceInfo, (), (override));
MOCK_METHOD(absl::StatusOr<CoordinatedTask>, GetOwnTask, (), (override));
MOCK_METHOD(absl::StatusOr<std::vector<CoordinatedTaskStateInfo>>,
GetTaskState, (const std::vector<CoordinatedTask>& task),
(override));
MOCK_METHOD(Status, ReportError, (const Status& error), (override));
MOCK_METHOD(Status, Shutdown, (), (override));
MOCK_METHOD(Status, Reset, (), (override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue, (std::string_view key),
(override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue,
(std::string_view key, absl::Duration timeout), (override));
MOCK_METHOD(std::shared_ptr<CallOptions>, GetKeyValueAsync,
(std::string_view key, StatusOrValueCallback done), (override));
MOCK_METHOD(absl::StatusOr<std::string>, TryGetKeyValue,
(std::string_view key), (override));
MOCK_METHOD(absl::StatusOr<std::vector<KeyValueEntry>>, GetKeyValueDir,
(std::string_view key), (override));
MOCK_METHOD(void, GetKeyValueDirAsync,
(std::string_view key, StatusOrValueDirCallback done),
(override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value,
bool allow_overwrite),
(override));
MOCK_METHOD(Status, DeleteKeyValue, (std::string_view key), (override));
MOCK_METHOD(Status, UpdateKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, StartWatchKey,
(std::string_view key, ChangedKeyValuesCallback on_change),
(override));
MOCK_METHOD(Status, StopWatchKey, (std::string_view key), (override));
MOCK_METHOD(void, WaitAtBarrierAsync,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks, StatusCallback done),
(override));
MOCK_METHOD(void, CancelBarrierAsync,
(std::string_view barrier_id, StatusCallback done), (override));
MOCK_METHOD(absl::StatusOr<Env*>, GetEnv, (), (override));
MOCK_METHOD(void, SetError, (const Status& error), (override));
MOCK_METHOD(Status, ActivateWatch,
(std::string_view key,
(const std::map<std::string, std::string>&)),
(override));
};
constexpr auto kTestKey = "test_key";
constexpr auto kTestTimeout = absl::Seconds(1);
const int kThreadPoolSize = 32;
void TestBarrierProxyWait(
int num_tasks, int num_threads_planned, int num_threads_entered,
int expected_ok_count, std::optional<Status> agent_wait_status,
std::optional<Status> expected_same_exit_status_for_all_threads) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks(num_tasks);
BarrierProxy barrier(agent.get(), tasks, num_threads_planned, kTestKey,
kTestTimeout);
std::atomic<int> last_exit_count = 0;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
} else {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _)).Times(0);
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
auto [status, last_exit] = barrier.Wait();
if (expected_same_exit_status_for_all_threads.has_value()) {
ASSERT_EQ(status, expected_same_exit_status_for_all_threads.value());
}
actual_ok_count += status.ok();
last_exit_count += last_exit;
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(last_exit_count, 1);
}
TEST(BarrierProxyTest, AllThreadsExitBarrier) {
TestBarrierProxyWait(
2,
8,
8,
8,
absl::OkStatus(),
absl::OkStatus());
}
TEST(BarrierProxyTest, AgentErrorBroadcastedToAllThreads) {
TestBarrierProxyWait(
2,
8,
8,
0,
errors::Internal(""),
errors::Internal(""));
}
TEST(BarrierProxyTest, AgentIsIgnoredIfThereIsOnlyOneTask) {
TestBarrierProxyWait(
1,
8,
8,
8,
{},
absl::OkStatus());
}
TEST(BarrierProxyTest, TimeoutIfNotEnoughThreadEntered) {
TestBarrierProxyWait(
2,
8,
7,
0,
{},
errors::DeadlineExceeded("BarrierProxy timeout: key=", kTestKey));
}
TEST(BarrierProxyTest, ExtraThreadsEnteringTheBarrierGetErrors) {
TestBarrierProxyWait(
2,
8,
10,
8,
absl::OkStatus(),
{});
}
void TestBarrierProxyManagerWaitSingleKey(
int num_threads_planned, int num_threads_entered,
std::optional<Status> agent_wait_status, int expected_ok_count) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
num_threads_planned);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
actual_ok_count += mgr.Wait(agent.get(), tasks, num_threads_planned,
kTestKey, kTestTimeout)
.ok();
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(mgr.size(), 0);
}
TEST(BarrierProxyManagerTest, AllThreadExited) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, AllThreadTimedOut) {
TestBarrierProxyManagerWaitSingleKey(
8,
7,
{},
0);
}
TEST(BarrierProxyManagerTest, CoordinationServiceError) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
errors::Internal(""),
0);
}
TEST(BarrierProxyManagerTest, ExtraThreadsEnteringTheSameKeyGetErrors) {
TestBarrierProxyManagerWaitSingleKey(
8,
10,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, DifferentKeysDoNotInterfereWithEachOther) {
constexpr int kNumThreads = 8;
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
EXPECT_CALL(*agent, WaitAtBarrier("key0", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*agent, WaitAtBarrier("key1", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < kNumThreads * 2; ++i) {
pool.Schedule([&, key = absl::StrCat("key", i % 2)]() {
ASSERT_EQ(mgr.Wait(agent.get(), tasks, kNumThreads, key, kTestTimeout),
absl::OkStatus());
});
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02f756a9-dd1d-4fcf-a26f-3d0ce3a6c096 | cpp | tensorflow/tensorflow | grpc_session | tensorflow/core/distributed_runtime/rpc/grpc_session.cc | tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_session.h"
#include <unordered_map>
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/local_master.h"
#include "tensorflow/core/distributed_runtime/master_interface.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_remote_master.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/master.pb.h"
namespace tensorflow {
const char* const kSchemePrefix = "grpc:
const size_t kSchemePrefixLength = strlen(kSchemePrefix);
GrpcSession::GrpcSession(const SessionOptions& options)
: options_(options), current_graph_version_(-1) {}
GrpcSession::~GrpcSession() {}
Status GrpcSession::Create(const SessionOptions& options,
std::unique_ptr<GrpcSession>* out_session) {
std::unique_ptr<GrpcSession> session(new GrpcSession(options));
std::unique_ptr<MasterInterface> master;
if (!options.config.rpc_options().use_rpc_for_inprocess_master()) {
master = LocalMaster::Lookup(options.target);
}
if (!master) {
SharedGrpcChannelPtr master_channel;
TF_RETURN_IF_ERROR(
NewHostPortGrpcChannel(options.target.substr(kSchemePrefixLength),
&options.config.rpc_options(), &master_channel));
master.reset(NewGrpcMaster(master_channel));
} else {
session->is_local_ = true;
}
session->SetRemoteMaster(std::move(master));
*out_session = std::move(session);
return absl::OkStatus();
}
namespace {
void ReEncodeConsts(GraphDef* gdef) {
for (NodeDef& ndef : *(gdef->mutable_node())) {
if (ndef.op() == "Const") {
TensorProto* proto = nullptr;
for (auto& attr : *ndef.mutable_attr()) {
if (attr.first == "value") {
proto = attr.second.mutable_tensor();
}
}
if (proto != nullptr && proto->tensor_content().empty() &&
proto->ByteSizeLong() > 64) {
Tensor parsed(proto->dtype());
if (parsed.FromProto(*proto)) {
parsed.AsProtoTensorContent(proto);
}
}
}
}
}
}
void GrpcSession::SetHandleAndGraphVersion(string handle,
int64_t graph_version) {
mutex_lock l(mu_);
handle_ = std::move(handle);
current_graph_version_ = graph_version;
}
Status GrpcSession::Handle(string* out_handle) {
mutex_lock l(mu_);
if (handle_.empty()) {
return errors::InvalidArgument("A session is not created yet....");
}
*out_handle = handle_;
return absl::OkStatus();
}
Status GrpcSession::CreateImpl(CallOptions* call_options, GraphDef graph) {
{
mutex_lock l(mu_);
if (!handle_.empty()) {
return errors::InvalidArgument("A session is alive.");
}
}
CreateSessionRequest req;
*req.mutable_config() = options_.config;
req.mutable_graph_def()->Swap(&graph);
req.set_target(options_.target);
ReEncodeConsts(req.mutable_graph_def());
CreateSessionResponse resp;
Status s = master_->CreateSession(call_options, &req, &resp);
if (s.ok()) {
SetHandleAndGraphVersion(resp.session_handle(), resp.graph_version());
}
return s;
}
Status GrpcSession::Create(const GraphDef& graph) {
return Create(GraphDef(graph));
}
Status GrpcSession::Create(const RunOptions& run_options,
const GraphDef& graph) {
return Create(run_options, GraphDef(graph));
}
Status GrpcSession::Create(GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return CreateImpl(&call_options, std::move(graph));
}
Status GrpcSession::Create(const RunOptions& run_options, GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(run_options.timeout_in_ms());
return CreateImpl(&call_options, std::move(graph));
}
Status GrpcSession::ExtendImpl(CallOptions* call_options, GraphDef graph) {
bool handle_is_empty;
{
mutex_lock l(mu_);
handle_is_empty = handle_.empty();
}
if (handle_is_empty) {
return Create(std::move(graph));
}
mutex_lock l(mu_);
ExtendSessionRequest req;
req.set_session_handle(handle_);
req.mutable_graph_def()->Swap(&graph);
req.set_current_graph_version(current_graph_version_);
ExtendSessionResponse resp;
Status s = master_->ExtendSession(call_options, &req, &resp);
if (s.ok()) {
current_graph_version_ = resp.new_graph_version();
}
return s;
}
Status GrpcSession::Extend(const GraphDef& graph) {
return Extend(GraphDef(graph));
}
Status GrpcSession::Extend(const RunOptions& run_options,
const GraphDef& graph) {
return Extend(run_options, GraphDef(graph));
}
Status GrpcSession::Extend(GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return ExtendImpl(&call_options, std::move(graph));
}
Status GrpcSession::Extend(const RunOptions& run_options, GraphDef&& graph) {
CallOptions call_options;
call_options.SetTimeout(run_options.timeout_in_ms());
return ExtendImpl(&call_options, std::move(graph));
}
Status GrpcSession::RunHelper(
const RunOptions& run_options,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names, std::vector<Tensor>* outputs,
RunMetadata* run_metadata, const string& prun_handle) {
std::unique_ptr<MutableRunStepRequestWrapper> req(
master_->CreateRunStepRequest());
std::unique_ptr<MutableRunStepResponseWrapper> resp(
master_->CreateRunStepResponse());
*req->mutable_options() = run_options;
if (run_options.timeout_in_ms() == 0) {
req->mutable_options()->set_timeout_in_ms(
options_.config.operation_timeout_in_ms());
}
if (!prun_handle.empty()) {
req->set_partial_run_handle(prun_handle);
}
for (const auto& it : inputs) {
req->add_feed(it.first, it.second);
}
req->set_store_errors_in_response_body(true);
std::unordered_map<string, int> output_name_to_offset;
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
const string& name = output_tensor_names[i];
if (output_name_to_offset.insert(std::make_pair(name, i)).second) {
req->add_fetch(name);
}
}
for (const string& target : target_node_names) {
req->add_target(target);
}
CallOptions call_options;
call_options.SetTimeout(req->options().timeout_in_ms());
TF_RETURN_IF_ERROR(RunProto(&call_options, req.get(), resp.get()));
if (resp->status_code() != absl::StatusCode::kOk) {
return resp->status();
}
if (!output_tensor_names.empty()) {
outputs->resize(output_tensor_names.size());
}
for (size_t i = 0; i < resp->num_tensors(); ++i) {
auto fetch_it = output_name_to_offset.find(resp->tensor_name(i));
if (fetch_it == output_name_to_offset.end()) {
return errors::Internal("Received response for unrequested fetch: ",
resp->tensor_name(i));
}
Tensor output;
TF_RETURN_IF_ERROR(resp->TensorValue(i, &output));
(*outputs)[fetch_it->second] = output;
}
if (output_name_to_offset.size() != output_tensor_names.size()) {
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
const string& name = output_tensor_names[i];
int offset = output_name_to_offset[name];
if (offset != i) {
(*outputs)[i] = (*outputs)[offset];
}
}
}
if (run_metadata) {
run_metadata->Swap(resp->mutable_metadata());
}
return absl::OkStatus();
}
Status GrpcSession::Run(const RunOptions& run_options,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) {
return RunHelper(run_options, inputs, output_tensor_names, target_node_names,
outputs, run_metadata, "");
}
Status GrpcSession::Run(const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_node_names,
std::vector<Tensor>* outputs) {
RunOptions run_options;
run_options.set_timeout_in_ms(options_.config.operation_timeout_in_ms());
return Run(run_options, inputs, output_tensor_names, target_node_names,
outputs, nullptr);
}
Status GrpcSession::RunProto(CallOptions* call_options,
MutableRunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp) {
string handle;
TF_RETURN_IF_ERROR(Handle(&handle));
req->set_session_handle(handle);
return master_->RunStep(call_options, req, resp);
}
Status GrpcSession::PRunSetup(const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
string* handle) {
PartialRunSetupRequest req;
PartialRunSetupResponse resp;
CallOptions call_options;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
for (const string& feed : input_names) {
req.add_feed(feed);
}
for (const string& fetch : output_names) {
req.add_fetch(fetch);
}
for (const string& target : target_nodes) {
req.add_target(target);
}
if (!is_local_) req.set_request_id(GetUniqueRequestId());
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->PartialRunSetup(&call_options, &req, &resp));
*handle = resp.partial_run_handle();
return absl::OkStatus();
}
Status GrpcSession::PRun(const string& handle,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
RunOptions run_options;
run_options.set_timeout_in_ms(options_.config.operation_timeout_in_ms());
return RunHelper(run_options, inputs, output_names, {}, outputs,
nullptr, handle);
}
Status GrpcSession::Close() {
CloseSessionRequest req;
{
mutex_lock l(mu_);
if (handle_.empty()) {
return absl::OkStatus();
}
req.set_session_handle(handle_);
handle_.clear();
}
CloseSessionResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return master_->CloseSession(&call_options, &req, &resp);
}
Status GrpcSession::ListDevices(std::vector<DeviceAttributes>* response) {
ListDevicesRequest req;
{
mutex_lock l(mu_);
req.set_session_handle(handle_);
}
if (req.session_handle().empty()) {
LOG(WARNING) << "GrpcSession::ListDevices will initialize the session with "
"an empty graph and other defaults because the session has "
"not yet been created.";
GraphDef graph_def;
TF_RETURN_IF_ERROR(Create(graph_def));
{
mutex_lock l(mu_);
req.set_session_handle(handle_);
}
}
ListDevicesResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
Status s = master_->ListDevices(&call_options, &req, &resp);
if (!s.ok()) {
LOG(ERROR) << "Could not list devices: " << s;
return s;
}
response->clear();
response->reserve(resp.local_device_size() + resp.remote_device_size());
for (const auto& device_attr : resp.local_device()) {
response->emplace_back(device_attr);
}
for (const auto& device_attr : resp.remote_device()) {
response->emplace_back(device_attr);
}
return absl::OkStatus();
}
void GrpcSession::SetRemoteMaster(std::unique_ptr<MasterInterface> master) {
master_ = std::move(master);
}
Status GrpcSession::Reset(const SessionOptions& options,
const std::vector<string>& containers) {
SharedGrpcChannelPtr master_channel;
TF_RETURN_IF_ERROR(
NewHostPortGrpcChannel(options.target.substr(kSchemePrefixLength),
nullptr, &master_channel));
auto master = NewGrpcMaster(master_channel);
ResetRequest req;
req.mutable_container()->Reserve(containers.size());
for (const auto& c : containers) req.add_container(c);
ResetResponse resp;
CallOptions call_options;
call_options.SetTimeout(options.config.operation_timeout_in_ms());
Status ret = master->Reset(&call_options, &req, &resp);
delete master;
return ret;
}
Status GrpcSession::MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) {
MakeCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
*req.mutable_options() = callable_options;
if (!is_local_) req.set_request_id(GetUniqueRequestId());
MakeCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->MakeCallable(&call_options, &req, &resp));
*out_handle = resp.handle();
return absl::OkStatus();
}
Status GrpcSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
RunCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
req.set_handle(handle);
if (!is_local_) req.set_request_id(GetUniqueRequestId());
for (const Tensor& feed : feed_tensors) {
feed.AsProtoTensorContent(req.mutable_feed()->Add());
}
RunCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
TF_RETURN_IF_ERROR(master_->RunCallable(&call_options, &req, &resp));
for (const TensorProto& fetch : resp.fetch()) {
Tensor fetch_tensor;
if (!fetch_tensor.FromProto(cpu_allocator(), fetch)) {
return errors::Internal(
"Could not parse fetched tensor data in response from master.");
}
fetch_tensors->push_back(std::move(fetch_tensor));
}
return absl::OkStatus();
}
Status GrpcSession::ReleaseCallable(CallableHandle handle) {
ReleaseCallableRequest req;
TF_RETURN_IF_ERROR(Handle(req.mutable_session_handle()));
req.set_handle(handle);
ReleaseCallableResponse resp;
CallOptions call_options;
call_options.SetTimeout(options_.config.operation_timeout_in_ms());
return master_->ReleaseCallable(&call_options, &req, &resp);
}
class GrpcSessionFactory : public SessionFactory {
public:
bool AcceptsOptions(const SessionOptions& options) override {
return absl::StartsWith(options.target, kSchemePrefix);
}
Status NewSession(const SessionOptions& options,
Session** out_session) override {
std::unique_ptr<GrpcSession> session;
TF_RETURN_IF_ERROR(GrpcSession::Create(options, &session));
*out_session = session.release();
return absl::OkStatus();
}
Status Reset(const SessionOptions& options,
const std::vector<string>& containers) override {
return GrpcSession::Reset(options, containers);
}
};
class GrpcSessionRegistrar {
public:
GrpcSessionRegistrar() {
SessionFactory::Register("GRPC_SESSION", new GrpcSessionFactory());
}
};
static GrpcSessionRegistrar registrar;
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_session.h"
#include <string>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/port.h"
namespace tensorflow {
static SessionOptions Devices(int num_cpus, int num_gpus) {
SessionOptions result;
(*result.config.mutable_device_count())["CPU"] = num_cpus;
(*result.config.mutable_device_count())["GPU"] = num_gpus;
return result;
}
void CreateGraphDef(GraphDef* graph_def, string node_names[3]) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({1, 2}));
test::FillValues<float>(&a_tensor, {1, 2});
Node* a = test::graph::Constant(&graph, a_tensor);
node_names[0] = a->name();
Tensor b_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&b_tensor, {2, 1});
Node* b = test::graph::Constant(&graph, b_tensor);
node_names[1] = b->name();
Node* c = test::graph::Matmul(&graph, a, b, false, false);
node_names[2] = c->name();
test::graph::ToGraphDef(&graph, graph_def);
}
static void IsSingleFloatValue(const Tensor& val, float expected_val) {
ASSERT_EQ(val.dtype(), DT_FLOAT);
ASSERT_EQ(val.NumElements(), 1);
ASSERT_EQ(val.flat<float>()(0), expected_val);
}
static SessionOptions Options(const string& target, int placement_period) {
SessionOptions options;
options.target = strings::StrCat("grpc:
options.config.set_isolate_session_state(false);
options.config.set_placement_period(placement_period);
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
return options;
}
static Session* NewRemote(const SessionOptions& options) {
LOG(INFO) << "Connecting to " << options.target;
return CHECK_NOTNULL(NewSession(options));
}
using test::TestClusterConfig;
using test::TestJob;
TEST(GrpcSessionTest, BasicNonProtoAPI) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
for (int iters = 0; iters < 25; ++iters) {
TF_ASSERT_OK(session->Create(graph));
{
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> targets = {node_names[2]};
TF_ASSERT_OK(session->Run(inputs, {}, targets, nullptr));
}
{
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> names = {node_names[2] + ":0"};
std::vector<string> targets = {node_names[1]};
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(inputs, names, targets, &outputs));
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
}
TF_ASSERT_OK(session->Close());
}
}
TEST(GrpcSessionTest, BasicCallable) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
for (int iters = 0; iters < 25; ++iters) {
TF_ASSERT_OK(session->Create(graph));
{
CallableOptions opts;
opts.add_target(node_names[2]);
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle));
TF_ASSERT_OK(session->RunCallable(handle, {}, nullptr, nullptr));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
{
CallableOptions opts;
opts.add_target(node_names[1]);
opts.add_fetch(node_names[2] + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
TF_ASSERT_OK(session->Close());
}
}
TEST(GrpcSessionTest, CallableWithOnDeviceFeedsAndFetches) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
std::vector<DeviceAttributes> devices;
TF_ASSERT_OK(session->ListDevices(&devices));
ASSERT_GT(devices.size(), 0);
const string device_name = devices.back().name();
CallableOptions opts;
const string fetch = node_names[2] + ":0";
opts.add_fetch(fetch);
opts.mutable_fetch_devices()->insert({fetch, device_name});
Session::CallableHandle handle;
Status status = session->MakeCallable(opts, &handle);
EXPECT_EQ(error::UNIMPLEMENTED, status.code());
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, BasicNonProtoAPIConsistentOrder) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
ASSERT_TRUE(session->Create(graph).ok());
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> names = {node_names[2] + ":0", node_names[0] + ":0",
node_names[1] + ":0"};
std::vector<string> target_ops = {node_names[1]};
std::vector<Tensor> outputs;
ASSERT_TRUE(session->Run(inputs, names, target_ops, &outputs).ok());
ASSERT_TRUE(outputs[0].IsInitialized());
ASSERT_EQ(4.0, outputs[0].flat<float>()(0));
ASSERT_TRUE(outputs[1].IsInitialized());
ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
ASSERT_TRUE(outputs[2].IsInitialized());
ASSERT_EQ(2.0, outputs[2].flat<float>()(0));
ASSERT_TRUE(session->Close().ok());
}
TEST(GrpcSessionTest, NonLocalWithFilters) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
SessionOptions options;
options.target = strings::StrCat("grpc:
options.config.add_device_filters(cluster->devices()[0].name());
std::unique_ptr<Session> session(NewRemote(options));
ASSERT_TRUE(session != nullptr);
{
GraphDef graph_copy(graph);
graph::SetDefaultDevice(cluster->devices()[0].name(), &graph_copy);
TF_ASSERT_OK(session->Create(graph_copy));
TF_ASSERT_OK(session->Run({}, {}, {node_names[2]}, nullptr));
TF_ASSERT_OK(session->Close());
}
{
GraphDef graph_copy(graph);
graph::SetDefaultDevice(cluster->devices()[1].name(), &graph_copy);
auto status = session->Create(graph_copy);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
}
TEST(GrpcSessionTest, FetchMultipleTimes) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
const std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
const string node = node_names[2] + ":0";
TF_ASSERT_OK(session->Run(inputs, {node, node}, {}, &outputs));
EXPECT_EQ(2, outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
const Tensor& t = outputs[i];
ASSERT_TRUE(t.IsInitialized()) << i;
ASSERT_EQ(4.0, t.flat<float>()(0)) << i;
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, DisableOutputPartitionGraphs) {
GraphDef graph;
string node_names[3];
CreateGraphDef(&graph, node_names);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
SessionOptions options = Options(cluster->targets()[0], 1);
options.config.mutable_experimental()->set_disable_output_partition_graphs(
true);
std::unique_ptr<Session> session(NewRemote(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
{
TF_ASSERT_OK(session->Run({}, {}, {node_names[2]}, nullptr));
}
{
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
Status s = session->Run(run_options, {}, {}, {node_names[2]}, nullptr,
&run_metadata);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "disable_output_partition_graphs"));
}
TF_ASSERT_OK(session->Close());
}
void FindMaxEigen(const string& target) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
Node* a = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {0, 0});
Node* x = test::graph::Constant(&graph, x_tensor);
Node* y = test::graph::Matmul(&graph, a, x, false, false);
Node* y2 = test::graph::Unary(&graph, "Square", y);
Tensor rdim_tensor(DT_INT32, TensorShape({}));
rdim_tensor.scalar<int32>()() = 0;
Node* rdim = test::graph::Constant(&graph, rdim_tensor);
Node* y2_sum = test::graph::Reduce(&graph, "Sum", y2, rdim);
Node* y_norm = test::graph::Unary(&graph, "Sqrt", y2_sum);
Node* y_normalized = test::graph::Binary(&graph, "Div", y, y_norm);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
std::unique_ptr<Session> session(NewRemote(Options(target, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
float lambda;
Tensor feed_value(DT_FLOAT, TensorShape({2, 1}));
feed_value.matrix<float>()(0, 0) = -3.1415;
feed_value.matrix<float>()(1, 0) = +2.7183;
for (int i = 0; i < 25; ++i) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({{x->name(), feed_value}},
{y->name(), y_normalized->name()}, {}, &outputs));
const Tensor& y = outputs[0];
const Tensor& y_normalized = outputs[1];
CHECK_EQ(2, feed_value.NumElements());
CHECK_EQ(2, y.NumElements());
lambda = y.flat<float>()(0) / feed_value.flat<float>()(0);
printf("%06d lambda = %8.6f x = [%8.6f %8.6f] y = [%8.6f %8.6f]\n", i,
lambda, feed_value.flat<float>()(0), feed_value.flat<float>()(1),
y.flat<float>()(0), y.flat<float>()(1));
feed_value = y_normalized;
}
EXPECT_NEAR(2.0, lambda, 1e-6);
}
TEST(FindMaxEigenTest, RemoteDevice) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
FindMaxEigen(cluster->targets()[0]);
}
void SetDevice(GraphDef* graph, const string& name, const string& dev) {
for (int i = 0; i < graph->node_size(); ++i) {
if (graph->node(i).name() == name) {
graph->mutable_node(i)->set_device(dev);
return;
}
}
LOG(FATAL) << "Name '" << name << "' not found.";
}
TEST(GrpcSessionTest, DISABLED_MultiDevices) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
Graph graph(OpRegistry::Global());
const int kSize = 1048576;
Tensor a_tensor(DT_FLOAT, TensorShape({1, kSize}));
Tensor b_tensor(DT_FLOAT, TensorShape({kSize, 1}));
for (int i = 0; i < kSize; ++i) {
a_tensor.flat<float>()(i) = 2;
b_tensor.flat<float>()(i) = 3;
}
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Constant(&graph, b_tensor);
Node* c = test::graph::Matmul(&graph, a, b, false, false);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
for (const auto& a_dev : cluster->devices()) {
for (const auto& b_dev : cluster->devices()) {
for (const auto& c_dev : cluster->devices()) {
LOG(INFO) << "a: " << a_dev.name() << " b: " << b_dev.name()
<< " c: " << c_dev.name();
SetDevice(&def, a->name(), a_dev.name());
SetDevice(&def, b->name(), b_dev.name());
SetDevice(&def, c->name(), c_dev.name());
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
RunOptions options;
options.set_trace_level(RunOptions::FULL_TRACE);
RunMetadata metadata;
TF_ASSERT_OK(
session->Run(options, {}, {c->name()}, {}, &outputs, &metadata));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 6.0 * kSize);
const StepStats& ss = metadata.step_stats();
bool c_placed_correctly = false;
for (const auto& dev : ss.dev_stats()) {
for (const auto& node : dev.node_stats()) {
if (node.node_name() == c->name() &&
dev.device() == c_dev.name()) {
c_placed_correctly = true;
}
}
}
ASSERT_TRUE(c_placed_correctly);
}
TF_ASSERT_OK(session->Close());
}
}
}
}
TEST(GrpcSessionTest, LargeTensorSend) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
Graph graph(OpRegistry::Global());
Tensor fill_shape_tensor(DT_INT32, TensorShape({4}));
fill_shape_tensor.vec<int32>()(0) = 1;
fill_shape_tensor.vec<int32>()(1) = 256;
fill_shape_tensor.vec<int32>()(2) = 1024;
fill_shape_tensor.vec<int32>()(3) = 1024;
Node* fill_shape_node = test::graph::Constant(&graph, fill_shape_tensor);
Tensor fill_val_tensor(DT_FLOAT, TensorShape({}));
fill_val_tensor.flat<float>()(0) = 1.0;
Node* fill_val_node = test::graph::Constant(&graph, fill_val_tensor);
Node* fill_node =
test::graph::Binary(&graph, "Fill", fill_shape_node, fill_val_node);
Tensor max_axes_tensor(DT_INT32, TensorShape({4}));
max_axes_tensor.vec<int32>()(0) = 0;
max_axes_tensor.vec<int32>()(1) = 1;
max_axes_tensor.vec<int32>()(2) = 2;
max_axes_tensor.vec<int32>()(3) = 3;
Node* max_axes_node = test::graph::Constant(&graph, max_axes_tensor);
Node* max_node = test::graph::Reduce(&graph, "Max", fill_node, max_axes_node);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
SetDevice(&def, fill_node->name(), cluster->devices()[0].name());
SetDevice(&def, fill_node->name(), cluster->devices()[1].name());
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {max_node->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 1.0);
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, MultiDevices_String) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 1))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1000)));
ASSERT_TRUE(session != nullptr);
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_STRING, TensorShape({2, 2}));
for (int i = 0; i < 4; ++i) {
a_tensor.flat<tstring>()(i) = "hello, world";
}
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
for (const auto& a_dev : cluster->devices()) {
for (const auto& b_dev : cluster->devices()) {
LOG(INFO) << "a: " << a_dev.name() << " b: " << b_dev.name();
SetDevice(&def, a->name(), a_dev.name());
SetDevice(&def, b->name(), b_dev.name());
Status s = session->Create(def);
if (s.ok()) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {b->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(outputs[0].dtype(), DT_STRING);
ASSERT_EQ(outputs[0].NumElements(), 4);
for (int i = 0; i < outputs[0].NumElements(); ++i) {
EXPECT_EQ(outputs[0].flat<tstring>()(i), "hello, world");
}
TF_ASSERT_OK(session->Close());
} else {
LOG(ERROR) << "Error: " << s;
ASSERT_TRUE((a_dev.device_type() == DEVICE_GPU) ||
(b_dev.device_type() == DEVICE_GPU));
ASSERT_FALSE(s.ok());
}
}
}
}
TEST(GrpcSessionTest, SendRecv_Node_Naming) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 3}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
ASSERT_TRUE(session != nullptr);
CHECK_GE(cluster->devices().size(), 3);
const DeviceAttributes& src = cluster->devices()[0];
const DeviceAttributes& dst0 = cluster->devices()[1];
const DeviceAttributes& dst1 = cluster->devices()[2];
LOG(INFO) << "src = " << src.name() << " dst0 = " << dst0.name()
<< " dst1 = " << dst1.name();
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({1, 1}));
a_tensor.flat<float>()(0) = 100;
Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a);
Node* c = test::graph::Identity(&graph, a);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
SetDevice(&def, a->name(), src.name());
SetDevice(&def, b->name(), dst0.name());
SetDevice(&def, c->name(), dst1.name());
TF_ASSERT_OK(session->Create(def));
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {b->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 100);
}
{
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {c->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], 100);
}
TF_ASSERT_OK(session->Close());
}
TEST(GrpcSessionTest, Error) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
auto a_err = test::graph::Error(&g, a, "fantasia!");
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
test::graph::ToGraphDef(&g, &gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, ErrorStatusLog) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
auto a_err = test::graph::Error(&g, a, "fantasia!", true);
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
std::cerr << status << "\n";
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
EXPECT_NE(status.ToString().find("ErrorOp: fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, LongErrorMessage) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto master = cluster->targets()[0];
const string& dev_a = cluster->devices()[0].name();
const string& dev_b = cluster->devices()[1].name();
LOG(INFO) << "master " << master << "dev_a " << dev_a << "dev_b " << dev_b;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor());
a->set_assigned_device_name(dev_a);
std::vector<char> long_string_buffer(1024 * 1024, 'x');
StringPiece long_string(long_string_buffer.data(), 1024 * 1024);
string name = strings::StrCat(long_string, "fantasia!");
auto a_err = test::graph::Error(&g, a, name);
a_err->set_assigned_device_name(dev_a);
auto a2 = test::graph::Add(&g, a, a_err);
a2->set_assigned_device_name(dev_a);
fetches.push_back(a2->name());
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);
auto b_delay = test::graph::Delay(&g, b, Microseconds(1000000));
b_delay->set_assigned_device_name(dev_b);
auto b2 = test::graph::Add(&g, b, b_delay);
b2->set_assigned_device_name(dev_b);
fetches.push_back(b2->name());
test::graph::ToGraphDef(&g, &gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
Status status = session->Run({}, fetches, {}, nullptr);
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(SessionTest, SharedVar) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 1}}),
&cluster));
const string master = cluster->targets()[0];
CHECK_EQ(cluster->devices().size(), 1);
GraphDef gdef;
string init_name;
string inc_name;
string get_name;
{
Graph g(OpRegistry::Global());
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
Node* var = test::graph::Var(&g, DT_FLOAT, one.shape());
Node* init = test::graph::Assign(&g, var, test::graph::Constant(&g, one));
init_name = init->name();
Node* update = test::graph::Assign(
&g, var, test::graph::Add(&g, var, test::graph::Constant(&g, one)));
inc_name = update->name();
get_name = var->name();
test::graph::ToGraphDef(&g, &gdef);
}
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(sess->Run(inp, {}, {init_name}, nullptr));
TF_ASSERT_OK(sess->Close());
delete sess;
}
for (int rep = 1; rep < 10; ++rep) {
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(sess->Run(inp, {}, {inc_name}, nullptr));
TF_ASSERT_OK(sess->Close());
delete sess;
}
{
Session* sess = NewRemote(Options(master, 1));
TF_ASSERT_OK(sess->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
std::vector<Tensor> ret;
TF_ASSERT_OK(sess->Run(inp, {get_name}, {}, &ret));
ASSERT_EQ(ret.size(), 1);
EXPECT_EQ(ret[0].scalar<float>()(), 1.0 * (1 + rep));
TF_ASSERT_OK(sess->Close());
delete sess;
}
}
}
TEST(SessionTest, SharedVarWithMultipleLearnerReplicas) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"variable_server", 1},
TestJob{"learner", 2, 2}}),
&cluster));
for (const auto& device : cluster->devices()) {
LOG(INFO) << device.DebugString();
}
ASSERT_EQ(cluster->devices().size(), 3);
GraphDef gdef;
string init_name;
string inc_name;
string get_name;
std::string var_server_device = "/job:variable_server/replica:0/task:0";
std::string learner_0_device = "/job:learner/replica:0/task:0";
std::string learner_1_device = "/job:learner/replica:1/task:0";
LOG(INFO) << "Learners: " << absl::StrJoin(cluster->targets("learner"), "; ");
{
Graph g(OpRegistry::Global());
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
Node* var = test::graph::Var(&g, DT_FLOAT, one.shape());
var->mutable_def()->set_device(var_server_device);
Node* init = test::graph::Assign(&g, var, test::graph::Constant(&g, one));
init_name = init->name();
Node* update = test::graph::Assign(
&g, var, test::graph::Add(&g, var, test::graph::Constant(&g, one)));
inc_name = update->name();
get_name = var->name();
test::graph::ToGraphDef(&g, &gdef);
}
Session* learner0 = NewRemote(Options(cluster->targets("learner")[0], 1));
{
TF_ASSERT_OK(learner0->Create(gdef));
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(learner0->Run(inp, {}, {init_name}, nullptr));
}
for (int rep = 1; rep < 10; ++rep) {
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(learner0->Run(inp, {}, {inc_name}, nullptr));
}
Session* learner1 = NewRemote(Options(cluster->targets("learner")[1], 1));
TF_ASSERT_OK(learner1->Create(gdef));
for (int rep = 1; rep < 10; ++rep) {
std::vector<std::pair<string, Tensor>> inp;
TF_ASSERT_OK(learner1->Run(inp, {}, {inc_name}, nullptr));
}
std::vector<std::pair<string, Tensor>> inp;
std::vector<Tensor> ret;
TF_ASSERT_OK(learner0->Run(inp, {get_name}, {}, &ret));
ASSERT_EQ(ret.size(), 1);
EXPECT_EQ(ret[0].scalar<float>()(), 1.0 * 19);
TF_ASSERT_OK(learner1->Run(inp, {get_name}, {}, &ret));
ASSERT_EQ(ret.size(), 1);
EXPECT_EQ(ret[0].scalar<float>()(), 1.0 * 19);
TF_ASSERT_OK(learner0->Close());
TF_ASSERT_OK(learner1->Close());
}
void CreateInvalidGraph(const string& graph_def_ascii,
const string& error_substring) {
GraphDef graph;
CHECK(protobuf::TextFormat::ParseFromString(graph_def_ascii, &graph));
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
Status s = session->Create(graph);
ASSERT_FALSE(s.ok());
EXPECT_NE(s.message().find(error_substring), string::npos);
}
TEST(SessionTest, InvalidOpName) {
CreateInvalidGraph(R"(
node {
name: 'a:b' op: 'Const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
)",
"Illegal op name");
CreateInvalidGraph(R"(
node {
name: 'a:0' op: 'Const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
)",
"Illegal op name");
CreateInvalidGraph(R"(
node {
name: '_a' op: 'Const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
)",
"Illegal op name");
}
TEST(SessionTest, InvalidOpInputName) {
CreateInvalidGraph(R"(
node {
name: 'a' op: 'const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
node {
name:'b' op:'MatMul' input:'a:first' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
"Illegal op input name");
CreateInvalidGraph(R"(
node {
name: 'a' op: 'const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
node {
name:'b' op:'MatMul' input:'_a' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
"Illegal op input name");
CreateInvalidGraph(R"(
node {
name: 'a' op: 'const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
node {
name:'b' op:'MatMul' input:'_a:0' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
"Illegal op input name");
CreateInvalidGraph(R"(
node {
name: 'a' op: 'const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
node {
name:'b' op:'MatMul' input:'a:01' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
"Illegal op input name");
}
TEST(SessionTest, ExtendValidation) {
GraphDef graph;
bool success = protobuf::TextFormat::ParseFromString(R"(
node {
name: 'a' op: 'Const'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'value' value {
tensor { dtype: DT_FLOAT tensor_shape { dim [{size:1}, {size:1}] }
float_val: [100] }
} }
}
)",
&graph);
ASSERT_TRUE(success);
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
std::unique_ptr<Session> session(
NewRemote(Options(cluster->targets()[0], 1)));
TF_ASSERT_OK(session->Create(graph));
GraphDef extension;
success = protobuf::TextFormat::ParseFromString(R"(
node {
name:'b' op:'MatMul' input:'a:first' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
&extension);
ASSERT_TRUE(success);
Status s = session->Extend(extension);
ASSERT_FALSE(s.ok());
EXPECT_NE(s.message().find("Illegal op input name"), string::npos);
success = protobuf::TextFormat::ParseFromString(R"(
node {
name:'b' op:'MatMul' input:'a' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
&extension);
ASSERT_TRUE(success);
TF_ASSERT_OK(session->Extend(extension));
success = protobuf::TextFormat::ParseFromString(R"(
node {
name:'b' op:'MatMul' input:'a' input:'a'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'transpose_a' value { b: false } }
attr { key: 'transpose_b' value { b: false } }
}
)",
&extension);
ASSERT_TRUE(success);
s = session->Extend(extension);
ASSERT_FALSE(s.ok());
EXPECT_NE(s.message().find("'b', which was created by a previous call"),
string::npos);
}
TEST(SessionTest, CreateTimeoutWithSessionOptions) {
SessionOptions options = Options("example.org:2222", 1);
options.config.set_operation_timeout_in_ms(100);
std::unique_ptr<Session> session(NewRemote(options));
Graph graph(OpRegistry::Global());
Node* b = test::graph::Constant(&graph, Tensor());
test::graph::Delay(&graph, b, Microseconds(1000000));
GraphDef gdef;
test::graph::ToGraphDef(&graph, &gdef);
Status status = session->Create(gdef);
EXPECT_TRUE(error::DEADLINE_EXCEEDED == status.code() ||
error::UNAVAILABLE == status.code());
}
TEST(SessionTest, CreateTimeoutWithRunOptions) {
SessionOptions options = Options("example.org:2222", 1);
std::unique_ptr<Session> session(NewRemote(options));
Graph graph(OpRegistry::Global());
Node* b = test::graph::Constant(&graph, Tensor());
test::graph::Delay(&graph, b, Microseconds(1000000));
GraphDef gdef;
test::graph::ToGraphDef(&graph, &gdef);
RunOptions run_options;
run_options.set_timeout_in_ms(20);
Status status = session->Create(run_options, gdef);
EXPECT_TRUE(error::DEADLINE_EXCEEDED == status.code() ||
error::UNAVAILABLE == status.code());
}
TEST(SessionTest, RunTimeoutWithSessionOptions) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 1}}),
&cluster));
SessionOptions options = Options(cluster->targets()[0], 100);
options.config.set_operation_timeout_in_ms(1);
std::unique_ptr<Session> session(NewRemote(options));
Graph graph(OpRegistry::Global());
Node* b = test::graph::Constant(&graph, Tensor());
Node* b_delay = test::graph::Delay(&graph, b, Microseconds(2000000));
GraphDef gdef;
test::graph::ToGraphDef(&graph, &gdef);
RunOptions run_options;
TF_ASSERT_OK(session->Create(run_options, gdef));
std::vector<std::pair<string, Tensor>> inputs;
Status status = session->Run(inputs, {}, {b_delay->name()}, nullptr);
EXPECT_TRUE(error::DEADLINE_EXCEEDED == status.code() ||
error::INTERNAL == status.code());
}
TEST(SessionTest, RunTimeoutWithRunOptions) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 1}}),
&cluster));
SessionOptions options = Options(cluster->targets()[0], 1);
std::unique_ptr<Session> session(NewRemote(options));
Graph graph(OpRegistry::Global());
Node* b = test::graph::Constant(&graph, Tensor());
Node* b_delay = test::graph::Delay(&graph, b, Microseconds(1000000));
GraphDef gdef;
test::graph::ToGraphDef(&graph, &gdef);
TF_ASSERT_OK(session->Create(gdef));
std::vector<std::pair<string, Tensor>> inputs;
RunOptions run_options;
run_options.set_timeout_in_ms(100);
Status status = session->Run(run_options, inputs, {}, {b_delay->name()},
nullptr, nullptr);
EXPECT_TRUE(error::DEADLINE_EXCEEDED == status.code() ||
error::INTERNAL == status.code());
}
TEST(SessionTest, TestCompression) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 1}}),
&cluster));
SessionOptions options = Options(cluster->targets()[0], 100);
RPCOptions* rpc_options = options.config.mutable_rpc_options();
rpc_options->set_compression_algorithm("deflate");
rpc_options->set_compression_level(GRPC_COMPRESS_LEVEL_HIGH);
std::unique_ptr<Session> session(NewRemote(options));
static const float kTestValue = 409.1934f;
Graph graph(OpRegistry::Global());
Tensor tensor(DT_FLOAT, TensorShape({1, 1}));
tensor.flat<float>()(0) = kTestValue;
Node* b = test::graph::Constant(&graph, tensor);
GraphDef gdef;
graph.ToGraphDef(&gdef);
RunOptions run_options;
TF_ASSERT_OK(session->Create(run_options, gdef));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(inputs, {b->name()}, {}, &outputs));
ASSERT_EQ(1, outputs.size());
IsSingleFloatValue(outputs[0], kTestValue);
}
TEST(GrpcSessionTest, ErrorAggregationTwoWorkersTwoErrors) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(1, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto& devs = cluster->devices();
auto master = cluster->targets()[0];
const string w1_dev1 = devs[0].name();
const string w2_dev1 = devs[1].name();
LOG(INFO) << "master " << master << "w1_dev1 " << w1_dev1 << " w2_dev1 "
<< w2_dev1;
GraphDef gdef;
std::vector<string> fetches;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor(1));
a->set_assigned_device_name(w1_dev1);
auto a_err = test::graph::Error(&g, a, "fantasia1!");
a_err->set_assigned_device_name(w1_dev1);
fetches.push_back(a_err->name());
auto b = test::graph::Constant(&g, Tensor(1));
b->set_assigned_device_name(w2_dev1);
auto b_err = test::graph::Error(&g, b, "fantasia2!");
b_err->set_assigned_device_name(w2_dev1);
fetches.push_back(b_err->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
std::vector<Tensor> outputs;
Status status = session->Run({}, fetches, {}, &outputs);
LOG(INFO) << status;
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia"), string::npos);
EXPECT_EQ(status.code(), error::Code::INTERNAL);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, ErrorAggregationTwoWorkerRace) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(2, 0))
.Jobs({TestJob{"localhost", 2}}),
&cluster));
auto& devs = cluster->devices();
auto master = cluster->targets()[0];
const string w1_dev1 = devs[0].name();
const string w1_dev2 = devs[1].name();
const string w2_dev1 = devs[2].name();
LOG(INFO) << "master " << master << "w1_dev1 " << w1_dev1 << " w1_dev2 "
<< w1_dev2 << " w2_dev1 " << w2_dev1;
GraphDef gdef;
std::vector<string> fetches;
std::vector<string> targets;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor(1));
a->set_assigned_device_name(w1_dev1);
auto a_err = test::graph::Error(&g, a, "fantasia!");
a_err->set_assigned_device_name(w1_dev1);
auto a_delay = test::graph::Delay(&g, a, Microseconds(5000000));
a_delay->set_assigned_device_name(w1_dev2);
targets.push_back(a_delay->name());
fetches.push_back(a_err->name());
auto b = test::graph::Constant(&g, Tensor(3));
b->set_assigned_device_name(w2_dev1);
auto b2 = test::graph::Add(&g, b, a_err);
b2->set_assigned_device_name(w2_dev1);
fetches.push_back(b2->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
std::vector<Tensor> outputs;
Status status = session->Run({}, fetches, targets, &outputs);
LOG(INFO) << status;
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
EXPECT_EQ(status.ToString().find("Cancelled"), string::npos);
EXPECT_EQ(status.code(), error::Code::INTERNAL);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, ErrorAggregationThreeWorkerRaceVariant1) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(2, 0))
.Jobs({TestJob{"localhost", 3}}),
&cluster));
auto& devs = cluster->devices();
auto master = cluster->targets()[0];
const string w1_dev1 = devs[0].name();
const string w1_dev2 = devs[1].name();
const string w2_dev1 = devs[2].name();
const string w3_dev1 = devs[4].name();
LOG(INFO) << "master " << master << "w1_dev1 " << w1_dev1 << " w1_dev2 "
<< w1_dev2 << " w2_dev1 " << w2_dev1 << " w3_dev1 " << w3_dev1;
GraphDef gdef;
std::vector<string> fetches;
std::vector<string> targets;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor(1));
a->set_assigned_device_name(w1_dev1);
auto a_err = test::graph::Error(&g, a, "fantasia!");
a_err->set_assigned_device_name(w1_dev1);
auto a_delay = test::graph::Delay(&g, a, Microseconds(5000000));
a_delay->set_assigned_device_name(w1_dev2);
targets.push_back(a_delay->name());
fetches.push_back(a_err->name());
auto b = test::graph::Constant(&g, Tensor(3));
b->set_assigned_device_name(w2_dev1);
auto b2 = test::graph::Add(&g, b, a_err);
b2->set_assigned_device_name(w2_dev1);
fetches.push_back(b2->name());
auto c = test::graph::Constant(&g, Tensor(3));
c->set_assigned_device_name(w3_dev1);
auto c_delay = test::graph::Delay(&g, c, Microseconds(4000000));
c_delay->set_assigned_device_name(w3_dev1);
targets.push_back(c_delay->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
std::vector<Tensor> outputs;
Status status = session->Run({}, fetches, targets, &outputs);
LOG(INFO) << status;
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
EXPECT_EQ(status.ToString().find("Cancelled"), string::npos);
EXPECT_EQ(status.ToString().find("Aborted"), string::npos);
EXPECT_EQ(status.code(), error::Code::INTERNAL);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
TEST(GrpcSessionTest, ErrorAggregationThreeWorkerRaceVariant2) {
std::unique_ptr<test::TestCluster> cluster;
TF_ASSERT_OK(test::TestCluster::MakeTestCluster(
TestClusterConfig()
.Options(Devices(2, 0))
.Jobs({TestJob{"localhost", 3}}),
&cluster));
auto& devs = cluster->devices();
auto master = cluster->targets()[0];
const string w1_dev1 = devs[0].name();
const string w1_dev2 = devs[1].name();
const string w2_dev1 = devs[2].name();
const string w3_dev1 = devs[4].name();
LOG(INFO) << "master " << master << "w1_dev1 " << w1_dev1 << " w1_dev2 "
<< w1_dev2 << " w2_dev1 " << w2_dev1 << " w3_dev1 " << w3_dev1;
GraphDef gdef;
std::vector<string> fetches;
std::vector<string> targets;
{
Graph g(OpRegistry::Global());
auto a = test::graph::Constant(&g, Tensor(1));
a->set_assigned_device_name(w1_dev1);
auto a_err = test::graph::Error(&g, a, "fantasia!");
a_err->set_assigned_device_name(w1_dev1);
auto a_delay = test::graph::Delay(&g, a, Microseconds(5000000));
a_delay->set_assigned_device_name(w1_dev2);
targets.push_back(a_delay->name());
fetches.push_back(a_err->name());
auto b = test::graph::Constant(&g, Tensor(3));
b->set_assigned_device_name(w2_dev1);
auto b2 = test::graph::Add(&g, b, a_err);
b2->set_assigned_device_name(w2_dev1);
fetches.push_back(b2->name());
auto c = test::graph::Constant(&g, Tensor(3));
c->set_assigned_device_name(w3_dev1);
auto c_delay = test::graph::Delay(&g, c, Microseconds(4000000));
c_delay->set_assigned_device_name(w3_dev1);
fetches.push_back(c_delay->name());
g.ToGraphDef(&gdef);
}
std::unique_ptr<Session> session(NewRemote(Options(master, 1)));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(gdef));
{
std::vector<Tensor> outputs;
Status status = session->Run({}, fetches, targets, &outputs);
LOG(INFO) << status;
EXPECT_FALSE(status.ok());
EXPECT_NE(status.ToString().find("fantasia!"), string::npos);
EXPECT_EQ(status.ToString().find("Cancelled"), string::npos);
EXPECT_EQ(status.ToString().find("Aborted"), string::npos);
EXPECT_EQ(status.code(), error::Code::INTERNAL);
}
TF_ASSERT_OK(session->Close());
Env::Default()->SleepForMicroseconds(2000000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bab9db0-18bd-4eb9-8086-4bfa1cf88e70 | cpp | tensorflow/tensorflow | rpc_rendezvous_mgr | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc | tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class RpcRemoteRendezvous : public BaseRemoteRendezvous {
public:
RpcRemoteRendezvous(const WorkerEnv* env, int64_t step_id)
: BaseRemoteRendezvous(env, step_id) {}
protected:
void RecvFromRemoteAsync(const Rendezvous::ParsedKey& parsed,
const Rendezvous::Args& args,
DoneCallback done) override;
private:
~RpcRemoteRendezvous() override {}
RpcRemoteRendezvous(const RpcRemoteRendezvous&) = delete;
void operator=(const RpcRemoteRendezvous&) = delete;
};
class RpcRecvTensorCall : public BaseRecvTensorCall {
public:
RpcRecvTensorCall() : wi_(nullptr), dst_device_(nullptr) {}
void Init(WorkerInterface* wi, int64_t step_id, StringPiece key,
AllocatorAttributes alloc_attrs, Device* dst_device,
const Rendezvous::Args& recv_args, Rendezvous::DoneCallback done) {
wi_ = wi;
alloc_attrs_ = alloc_attrs;
dst_device_ = dst_device;
recv_args_ = recv_args;
done_ = std::move(done);
req_.set_step_id(step_id);
req_.set_rendezvous_key(key.data(), key.size());
req_.set_request_id(GetUniqueRequestId());
}
void Reset() {
DCHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall::Reset().";
alloc_attrs_ = AllocatorAttributes();
dst_device_ = nullptr;
req_.Clear();
resp_.Clear();
{
mutex_lock l(mu_);
status_ = absl::OkStatus();
}
done_ = nullptr;
}
~RpcRecvTensorCall() override {
CHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall destructor.";
}
void Start(std::function<void()> recv_done) override {
StartRTCall(std::move(recv_done));
}
void StartAbort(const Status& s) override {
{
mutex_lock l(mu_);
status_.Update(s);
}
opts_.StartCancel();
}
Status status() const override {
mutex_lock l(mu_);
return status_;
}
void ReleaseWorker(WorkerCacheInterface* worker_cache) {
DCHECK_NE(static_cast<WorkerInterface*>(nullptr), wi_)
<< "RpcRecvTensorCall::ReleaseWorker() called twice.";
worker_cache->ReleaseWorker(src_worker_, wi_);
wi_ = nullptr;
}
const Tensor& tensor() const { return resp_.tensor(); }
bool is_dead() const { return resp_.metadata().is_dead(); }
Device* dst_device() const { return dst_device_; }
const Rendezvous::Args& recv_args() const { return recv_args_; }
const Rendezvous::DoneCallback& done() const { return done_; }
private:
friend class RpcRemoteRendezvous;
void StartRTCall(std::function<void()> recv_done) {
resp_.InitAlloc(dst_device_, alloc_attrs_);
auto abort_checked = std::make_shared<Notification>();
auto cb = [this, abort_checked,
recv_done = std::move(recv_done)](const Status& s) {
abort_checked->WaitForNotification();
if (!s.ok()) {
mutex_lock l(mu_);
status_.Update(s);
}
recv_done();
};
wi_->RecvTensorAsync(&opts_, &req_, &resp_, std::move(cb));
Status s;
{
mutex_lock l(mu_);
s = status_;
}
if (!s.ok()) {
opts_.StartCancel();
}
abort_checked->Notify();
}
string src_worker_;
string src_rel_device_;
WorkerInterface* wi_;
AllocatorAttributes alloc_attrs_;
Device* dst_device_;
CallOptions opts_;
RecvTensorRequest req_;
TensorResponse resp_;
Rendezvous::Args recv_args_;
Rendezvous::DoneCallback done_;
mutable mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
RpcRecvTensorCall(const RpcRecvTensorCall&) = delete;
void operator=(const RpcRecvTensorCall&) = delete;
};
class RpcRecvTensorFreeList {
public:
RpcRecvTensorFreeList() {}
~RpcRecvTensorFreeList() {
for (size_t i = 0; i < objects_.size(); i++) {
delete objects_[i];
}
}
RpcRecvTensorCall* New() {
{
mutex_lock l(mu_);
if (!objects_.empty()) {
RpcRecvTensorCall* result = objects_.back();
objects_.pop_back();
return result;
}
}
return new RpcRecvTensorCall;
}
void Release(RpcRecvTensorCall* obj) {
obj->Reset();
{
mutex_lock l(mu_);
if (objects_.size() < kMaxObjects) {
objects_.push_back(obj);
return;
}
}
delete obj;
}
private:
static constexpr int kMaxObjects = 1000;
mutex mu_;
std::vector<RpcRecvTensorCall*> objects_ TF_GUARDED_BY(mu_);
};
static RpcRecvTensorFreeList* get_call_freelist() {
static RpcRecvTensorFreeList* call_freelist = new RpcRecvTensorFreeList();
return call_freelist;
}
void RpcRemoteRendezvous::RecvFromRemoteAsync(
const Rendezvous::ParsedKey& parsed, const Rendezvous::Args& recv_args,
DoneCallback done) {
CHECK(is_initialized());
Status s;
RpcRecvTensorCall* call = get_call_freelist()->New();
if (!DeviceNameUtils::SplitDeviceName(parsed.src_device, &call->src_worker_,
&call->src_rel_device_)) {
s = errors::Internal(parsed.src_device,
" is invalid remote source device.");
}
WorkerSession* sess = session();
std::shared_ptr<WorkerCacheInterface> worker_cache =
sess->GetSharedWorkerCache();
WorkerInterface* rwi = worker_cache->GetOrCreateWorker(call->src_worker_);
if (s.ok() && rwi == nullptr) {
s = errors::Internal("No worker known as ", call->src_worker_);
}
Device* dst_device;
if (s.ok()) {
s = sess->device_mgr()->LookupDevice(parsed.dst_device, &dst_device);
}
if (!s.ok()) {
if (rwi != nullptr) {
sess->worker_cache()->ReleaseWorker(call->src_worker_, rwi);
}
get_call_freelist()->Release(call);
done(s, Args(), recv_args, Tensor{}, false);
return;
}
call->Init(rwi, step_id_, parsed.FullKey(), recv_args.alloc_attrs, dst_device,
recv_args, std::move(done));
RegisterCall(call, recv_args);
if (!call->status().ok()) {
DeregisterCall(call, recv_args);
call->ReleaseWorker(sess->worker_cache());
call->done()(call->status(), Args(), Args(), Tensor(), false);
get_call_freelist()->Release(call);
return;
}
Ref();
call->Start([this, call, recv_args, worker_cache]() {
DeregisterCall(call, recv_args);
Status s = call->status();
call->ReleaseWorker(session()->worker_cache());
call->done()(s, Args(), call->recv_args(), call->tensor(), call->is_dead());
get_call_freelist()->Release(call);
Unref();
});
}
}
RpcRendezvousMgr::RpcRendezvousMgr(const WorkerEnv* env)
: BaseRendezvousMgr(env) {}
tsl::core::RefCountPtr<BaseRemoteRendezvous> RpcRendezvousMgr::Create(
int64_t step_id, const WorkerEnv* worker_env) {
return tsl::core::RefCountPtr<BaseRemoteRendezvous>(
new RpcRemoteRendezvous(worker_env, step_id));
}
} | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& s) {
Rendezvous::ParsedKey key;
CHECK(Rendezvous::ParseKey(s, &key).ok());
return key;
}
namespace {
class DummyWorker : public TestWorkerInterface {
public:
void RecvTensorAsync(CallOptions* opts, const RecvTensorRequest* request,
TensorResponse* response, StatusCallback done) override {
SchedClosure([done = std::move(done)]() {
const int64_t t_us = random::New64() % 100 * 1000;
Env::Default()->SleepForMicroseconds(t_us);
done(absl::OkStatus());
});
}
};
class DummyWorkerCache : public WorkerCacheInterface {
void ListWorkers(std::vector<string>* workers) const override {}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (dummy_remote_worker_ == nullptr) {
dummy_remote_worker_ = new DummyWorker;
}
return dummy_remote_worker_;
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
Status GetCoordinationClientCache(
std::unique_ptr<CoordinationClientCache>* coord_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {}
private:
DummyWorker* dummy_remote_worker_ = nullptr;
};
static Device* CreateDevice(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
static DeviceMgr* CreateDeviceMgr() {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:mnist/replica:1/task:2/cpu:1"));
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
return new StaticDeviceMgr(std::move(devices));
}
}
class RpcRendezvousMgrTest : public ::testing::Test {
protected:
RpcRendezvousMgrTest()
: cache_(new DummyWorkerCache),
worker_session_("rpc_session", "/job:mnist/replica:1/task:2",
std::unique_ptr<WorkerCacheInterface>(cache_),
std::unique_ptr<DeviceMgr>(CreateDeviceMgr()),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; }),
rmgr_(&env) {
env.env = Env::Default();
}
DummyWorkerCache* cache_;
WorkerEnv env;
WorkerSession worker_session_;
RpcRendezvousMgr rmgr_;
};
TEST_F(RpcRendezvousMgrTest, LocalSendRecv) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Tensor val(DT_FLOAT);
bool val_dead = false;
TF_ASSERT_OK(rmgr_.RecvLocal(step_id, key, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, LocalAbort) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, rendez = rendez.GetNewRef()]() {
env.env->SleepForMicroseconds(100 * 1000);
rendez->StartAbort(errors::Aborted(""));
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
{
const int64_t step_id = 321;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, step_id]() {
env.env->SleepForMicroseconds(100 * 1000);
rmgr_.Cleanup(step_id);
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
}
TEST_F(RpcRendezvousMgrTest, LocalCancel) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsCancelled(rendez->Recv(key, args, &val, &val_dead)));
n.WaitForNotification();
delete cm;
}
TEST_F(RpcRendezvousMgrTest, CancelAfterReceived) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, rendez = rendez.get(), key, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
TF_ASSERT_OK(rendez->Send(key, Rendezvous::Args(), V("peach"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
n.WaitForNotification();
delete cm;
}
namespace {
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
private:
const int stream_id_;
};
}
TEST_F(RpcRendezvousMgrTest, TransferDummyDeviceContext) {
DummyDeviceContext* dc = new DummyDeviceContext(123);
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Rendezvous::Args args;
args.device_context = dc;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Notification n;
rmgr_.RecvLocalAsync(
step_id, key,
[&n](const Status& s, const Rendezvous::Args send_args,
const Rendezvous::Args recv_args, const Tensor& val,
bool is_dead) {
auto send_dev_context =
static_cast<DummyDeviceContext*>(send_args.device_context);
CHECK_EQ(123, send_dev_context->stream_id());
CHECK_EQ(V(val), "peach");
n.Notify();
});
n.WaitForNotification();
}
rmgr_.Cleanup(step_id);
dc->Unref();
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvOne) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
Tensor val(DT_STRING);
bool val_dead = false;
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvAsyncMany) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
int num_requests = 10000;
Tensor val(DT_STRING);
mutex mu_;
Status status = absl::OkStatus();
BlockingCounter counter(num_requests);
for (int i = 0; i < num_requests; i++) {
rendez->RecvAsync(
key, args,
[&mu_, &status, &counter](const Status& s, const Rendezvous::Args&,
const Rendezvous::Args&, const Tensor&,
const bool) {
{
mutex_lock l(mu_);
status.Update(s);
}
counter.DecrementCount();
});
}
counter.Wait();
TF_ASSERT_OK(status);
}
rmgr_.Cleanup(step_id);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e72922f-72c7-4080-9b76-440f602382cd | cpp | tensorflow/tensorflow | grpc_tensor_coding | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/io/proto_encode_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace grpc {
void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
::grpc::ByteBuffer* result) {
::grpc::Slice slice(proto.ByteSizeLong());
proto.SerializeWithCachedSizesToArray(
const_cast<uint8*>(reinterpret_cast<const uint8*>(slice.begin())));
::grpc::ByteBuffer tmp(&slice, 1);
result->Swap(&tmp);
}
static int VarLengthEncodingSize(uint32 tag, size_t bytes) {
return core::VarintLength(tag << 3) + core::VarintLength(bytes) + bytes;
}
static int SkeletonEncodingSizeUpperBound(const Tensor& val) {
static const int kVarintMax64 = 10;
const int ndims = val.shape().dims();
return (2 * kVarintMax64) +
(ndims * (4 * kVarintMax64));
}
static void EncodeSkeleton(const Tensor& val, io::ProtoEncodeHelper* e) {
e->WriteUint64(TensorProto::kDtypeFieldNumber, val.dtype());
const int ndims = val.shape().dims();
int tensor_shape_bytes = 0;
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
tensor_shape_bytes +=
2 +
1 +
core::VarintLength(dim_size);
}
if (tensor_shape_bytes > 0) {
e->WriteVarlengthBeginning(TensorProto::kTensorShapeFieldNumber,
tensor_shape_bytes);
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
int64_t dim_varlen = 1 +
core::VarintLength(dim_size);
e->WriteVarlengthBeginning(TensorShapeProto::kDimFieldNumber, dim_varlen);
e->WriteUint64(TensorShapeProto_Dim::kSizeFieldNumber, dim_size);
}
}
#ifndef NDEBUG
{
TensorProto skeleton;
skeleton.set_dtype(val.dtype());
val.shape().AsProto(skeleton.mutable_tensor_shape());
string tensor_except_contents;
skeleton.AppendToString(&tensor_except_contents);
TensorProto skeleton2;
skeleton2.ParseFromString(string(e->data(), e->size()));
string out;
skeleton.AppendToString(&out);
DCHECK_EQ(tensor_except_contents, out) << skeleton.DebugString() << " vs\n"
<< skeleton2.DebugString();
}
#endif
}
void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, bool require_ack,
::grpc::ByteBuffer* result) {
const int kLargeTensorBytes = 1024;
const int64_t kProtoBufLimitBytes = 1LL << 31;
if (val.TotalBytes() > kProtoBufLimitBytes) {
size_t exceeded_bytes = val.TotalBytes() - kProtoBufLimitBytes;
LOG(FATAL) << "Cannot encode a Tensor that exceeds the 2GB protobuf limit. "
"Exceeded bytes: "
<< exceeded_bytes
<< ", tensor shape: " << val.shape().AsProto().DebugString();
}
RecvTensorResponse response;
if (is_dead) {
response.set_is_dead(is_dead);
}
response.set_require_ack(require_ack);
response.set_send_start_micros(Env::Default()->NowMicros());
if (!DataTypeCanUseMemcpy(val.dtype())) {
val.AsProtoTensorContent(response.mutable_tensor());
EncodeRecvTensorResponseToByteBuffer(response, result);
} else {
absl::InlinedVector<char, 128UL> skeleton(
SkeletonEncodingSizeUpperBound(val));
io::ProtoEncodeHelper e_skeleton(skeleton.data(), skeleton.size());
EncodeSkeleton(val, &e_skeleton);
StringPiece tdata = val.tensor_data();
uint32 overall_tensor_proto_bytesize =
(e_skeleton.size() +
VarLengthEncodingSize(TensorProto::kTensorContentFieldNumber,
tdata.size()));
string header;
response.AppendToString(&header);
size_t expected_size =
(header.size() +
VarLengthEncodingSize(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize));
bool share_tensor_slice_memory = (tdata.size() > kLargeTensorBytes);
size_t encoder_size = expected_size - tdata.size();
absl::InlinedVector<char, 1024UL> space(encoder_size);
io::ProtoEncodeHelper e(space.data(), space.size());
e.WriteRawBytes(header);
e.WriteVarlengthBeginning(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize);
e.WriteRawBytes(StringPiece(e_skeleton.data(), e_skeleton.size()));
e.WriteVarlengthBeginning(TensorProto::kTensorContentFieldNumber,
tdata.size());
::grpc::Slice slices[2];
int num_slices = 0;
{
size_t slice_len =
e.size() + (share_tensor_slice_memory ? 0 : tdata.size());
slices[0] = ::grpc::Slice(slice_len);
memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size());
if (!share_tensor_slice_memory) {
memcpy(const_cast<uint8_t*>(slices[0].begin()) + e.size(), tdata.data(),
tdata.size());
}
num_slices += 1;
}
if (share_tensor_slice_memory) {
const TensorBuffer* buf = DMAHelper::buffer(&val);
buf->Ref();
slices[1] = ::grpc::Slice(
const_cast<void*>(static_cast<const void*>(tdata.data())),
tdata.size(),
[](void* backing) { static_cast<TensorBuffer*>(backing)->Unref(); },
const_cast<TensorBuffer*>(buf));
num_slices += 1;
}
size_t total_bytes = 0;
for (int i = 0; i < num_slices; i++) {
total_bytes += slices[i].size();
}
CHECK_EQ(total_bytes, expected_size);
::grpc::ByteBuffer tmp(&slices[0], num_slices);
result->Swap(&tmp);
}
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class GrpcTensorCodingTest : public ::testing::Test {
public:
void Validate(const Tensor& t, bool is_dead) {
::grpc::ByteBuffer buf;
grpc::EncodeTensorToByteBuffer(is_dead, t, false, &buf);
std::vector<::grpc::Slice> slices;
(void)buf.Dump(&slices);
string tmp;
for (const auto& s : slices) {
tmp.append(reinterpret_cast<const char*>(s.begin()), s.size());
}
RecvTensorResponse response;
EXPECT_TRUE(response.ParseFromString(tmp));
EXPECT_EQ(response.is_dead(), is_dead);
Tensor result_tensor;
EXPECT_TRUE(result_tensor.FromProto(response.tensor()));
EXPECT_EQ(t.dtype(), result_tensor.dtype());
EXPECT_EQ(t.shape().DebugString(), result_tensor.shape().DebugString());
EXPECT_EQ(t.DebugString(), result_tensor.DebugString());
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
absl::InlinedVector<tstring, 4UL> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(GrpcTensorCodingTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(GrpcTensorCodingTest, StringTensor) { DoTestForStrings(DT_STRING); }
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ef74f11-3523-4202-9791-2c1beb9227af | cpp | tensorflow/tensorflow | grpc_worker_cache | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc | tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/rpc/coordination/grpc_coordination_client.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/distributed_runtime/worker_cache_logger.h"
#include "tensorflow/core/distributed_runtime/worker_cache_partial.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
class GrpcWorkerCache : public WorkerCachePartial {
public:
explicit GrpcWorkerCache(std::shared_ptr<GrpcChannelCache> channel_cache,
WorkerInterface* local_worker,
const string& local_target,
GrpcWorkerEnv* worker_env)
: local_target_(local_target),
local_worker_(local_worker),
channel_cache_(channel_cache),
worker_env_(worker_env),
next_round_robin_assignment_(0) {}
void ListWorkers(std::vector<string>* workers) const override {
channel_cache_->ListWorkers(workers);
}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {
channel_cache_->ListWorkersInJob(job_name, workers);
}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (target == local_target_) {
return local_worker_;
} else {
SharedGrpcChannelPtr channel = channel_cache_->FindWorkerChannel(target);
if (!channel) {
return nullptr;
}
size_t index = AssignWorkerToThread(target);
return NewGrpcRemoteWorker(
channel, worker_env_->GetCompletionQueue(index),
worker_env_->GetThreadPool(), &logger_, target);
}
}
void ReleaseWorker(const string& target, WorkerInterface* worker) override {
if (target == local_target_) {
CHECK_EQ(worker, local_worker_)
<< "Releasing a worker that was not returned by this WorkerCache";
} else {
WorkerCacheInterface::ReleaseWorker(target, worker);
}
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
eager_client_cache->reset(eager::NewGrpcEagerClientCache(channel_cache_));
return absl::OkStatus();
}
Status GetCoordinationClientCache(std::unique_ptr<CoordinationClientCache>*
coordination_client_cache) override {
coordination_client_cache->reset(
NewGrpcCoordinationClientCache(channel_cache_));
return absl::OkStatus();
}
void SetLogging(bool v) override { logger_.SetLogging(v); }
void ClearLogs() override { logger_.ClearLogs(); }
bool RetrieveLogs(int64_t step_id, StepStats* ss) override {
return logger_.RetrieveLogs(step_id, ss);
}
private:
size_t AssignWorkerToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(target,
(next_round_robin_assignment_++) %
worker_env_->CompletionQueueSize()))
.first;
}
return it->second;
}
const string local_target_;
WorkerInterface* const local_worker_;
std::shared_ptr<GrpcChannelCache> channel_cache_;
WorkerCacheLogger logger_;
GrpcWorkerEnv* worker_env_;
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
};
}
GrpcWorkerEnv::GrpcWorkerEnv(size_t num_completion_queues, size_t num_threads)
: threadpool_(new thread::ThreadPool(
Env::Default(), ThreadOptions(), "GrpcWorkerEnvQueues", num_threads,
false, nullptr)),
threads_(num_completion_queues) {}
GrpcWorkerEnv::~GrpcWorkerEnv() { threads_.clear(); }
GrpcWorkerEnv::GrpcWorkerCacheThread::GrpcWorkerCacheThread() {
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "GrpcWorkerEnvPool", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
}
}));
}
GrpcWorkerEnv::GrpcWorkerCacheThread::~GrpcWorkerCacheThread() {
completion_queue_.Shutdown();
thread_.reset();
}
GrpcWorkerEnv* CreateGrpcWorkerEnv() {
int num_cpus = port::NumSchedulableCPUs();
int64_t num_completion_queues;
Status status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_QUEUES", 64,
&num_completion_queues);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_QUEUES: " << status;
}
int64_t num_threads;
status = ReadInt64FromEnvVar("TF_GRPC_WORKER_CACHE_THREADS", num_cpus,
&num_threads);
if (!status.ok()) {
LOG(ERROR) << "Error parsing TF_GRPC_WORKER_CACHE_THREADS: " << status;
}
return new GrpcWorkerEnv(num_completion_queues, num_threads);
}
WorkerCacheInterface* NewGrpcWorkerCache(std::shared_ptr<GrpcChannelCache> cc,
GrpcWorkerEnv* worker_env) {
return new GrpcWorkerCache(cc, nullptr, "",
worker_env);
}
WorkerCacheInterface* NewGrpcWorkerCacheWithLocalWorker(
std::shared_ptr<GrpcChannelCache> cc, GrpcWorkerEnv* worker_env,
WorkerInterface* local_worker, const string& local_target) {
return new GrpcWorkerCache(cc, local_worker, local_target, worker_env);
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
TEST(GrpcWorkerCacheTest, NewGrpcWorkerCache) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
std::unique_ptr<WorkerCacheInterface> worker_cache(
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get()));
WorkerInterface* wi;
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:0");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:0", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:1");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:1", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:2");
EXPECT_NE(wi, nullptr);
worker_cache->ReleaseWorker("/job:worker/replica:0/task:2", wi);
wi = worker_cache->GetOrCreateWorker("/job:worker/replica:0/task:3");
EXPECT_EQ(wi, nullptr);
std::unique_ptr<TestWorkerInterface> local_wi;
worker_cache.reset(NewGrpcWorkerCacheWithLocalWorker(
channel_cache, grpc_worker_env.get(), local_wi.get(), "local_target"));
wi = worker_cache->GetOrCreateWorker("local_target");
EXPECT_EQ(wi, local_wi.get());
}
TEST(GrpcWorkerCacheTest, DestructWorkerCacheInThreadPool) {
GrpcChannelSpec spec;
TF_ASSERT_OK(
spec.AddHostPortsJob("worker", {{0, "a:0"}, {1, "b:1"}, {2, "c:2"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env(CreateGrpcWorkerEnv());
WorkerCacheInterface* worker_cache =
NewGrpcWorkerCache(channel_cache, grpc_worker_env.get());
thread::ThreadPool* tp = grpc_worker_env->GetThreadPool();
Notification n;
tp->Schedule([worker_cache, &n] {
delete worker_cache;
n.Notify();
});
n.WaitForNotification();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e680641a-da49-449a-bb3e-3767970a1e11 | cpp | tensorflow/tensorflow | grpc_eager_client | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client_test.cc | #include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include <cstdint>
#include <string>
#include "grpcpp/generic/generic_stub.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_client_cq_tag.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace eager {
namespace {
bool EnableStreaming() {
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
return result;
}
class GrpcEagerClientThread : public core::RefCounted {
public:
GrpcEagerClientThread() {
Ref();
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "eager_client_thread", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
VLOG(4) << "GrpcEagerClientThread got next tag";
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
VLOG(4) << "GrpcEagerClientThread blocking for next tag";
if (RefCountIsOne()) {
break;
}
}
VLOG(4) << "GrpcEagerClientThread exiting";
completion_queue_.Shutdown();
Env::Default()->SchedClosure([this]() { this->Unref(); });
}));
}
~GrpcEagerClientThread() override {}
::grpc::CompletionQueue* completion_queue() { return &completion_queue_; }
private:
::grpc::CompletionQueue completion_queue_;
std::unique_ptr<Thread> thread_;
};
class GrpcEagerClient : public EagerClient {
public:
GrpcEagerClient(const tensorflow::SharedGrpcChannelPtr& channel,
GrpcEagerClientThread* thread, const string& target)
: stub_(channel), thread_(thread), target_(target) {
thread_->Ref();
cq_ = thread->completion_queue();
}
~GrpcEagerClient() override { thread_->Unref(); }
bool allow_multiple_pending_requests() const override {
return EnableStreaming();
}
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), nullptr, \
nullptr, 0, true, \
&target_); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
#undef CLIENT_METHOD
#define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done, \
int64_t init_timeout_in_ms, int retries) override { \
CallOptions* call_ops = nullptr; \
StatusCallback done_wrapped; \
if (init_timeout_in_ms > 0) { \
call_ops = new CallOptions; \
call_ops->SetTimeout(init_timeout_in_ms); \
auto new_done = [call_ops, done = std::move(done)](const Status& s) { \
done(s); \
delete call_ops; \
}; \
done_wrapped = callback_wrapper(new_done); \
} else { \
done_wrapped = callback_wrapper(std::move(done)); \
} \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_ops, nullptr, \
retries, true, &target_); \
}
CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext);
#undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES
#define CLIENT_CANCELABLE_METHOD(method) \
void method##Async(CallOptions* call_opts, const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_opts, nullptr, \
0, true, &target_); \
}
CLIENT_CANCELABLE_METHOD(Enqueue);
CLIENT_CANCELABLE_METHOD(RunComponentFunction);
#undef CLIENT_CANCELABLE_METHOD
void CloseContextAsync(const CloseContextRequest* request,
CloseContextResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
new RPCState<protobuf::Message>(
&stub_, cq_, "/tensorflow.eager.EagerService/CloseContext", *request,
response, std::move(done_wrapped), nullptr,
nullptr, 0, true,
&target_);
VLOG(1) << "Sending RPC to close remote eager context "
<< request->DebugString();
mutex_lock l(mu_);
const auto& it = enqueue_dispatchers_.find(request->context_id());
if (it != enqueue_dispatchers_.end()) {
it->second.CancelCall();
enqueue_dispatchers_.erase(it);
} else if (EnableStreaming()) {
LOG(ERROR) << "Remote EagerContext with id " << request->context_id()
<< " does not seem to exist.";
}
}
void StreamingEnqueueAsync(bool enable_streaming_enqueue,
CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
if (EnableStreaming() && enable_streaming_enqueue) {
mutex_lock l(mu_);
auto it = enqueue_dispatchers_.find(request->context_id());
if (it == enqueue_dispatchers_.end()) {
auto it_and_bool = enqueue_dispatchers_.emplace(
std::piecewise_construct,
std::forward_as_tuple(request->context_id()),
std::forward_as_tuple(
&stub_, cq_,
"/tensorflow.eager.EagerService/StreamingEnqueue"));
it = it_and_bool.first;
}
it->second.SendNextRequest(*request, response, std::move(done_wrapped));
} else {
Notification n;
Status status;
EnqueueAsync(call_opts, request, response,
[&n, &status](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
done_wrapped(status);
}
}
private:
::grpc::GenericStub stub_;
const GrpcEagerClientThread* thread_;
const string target_;
::grpc::CompletionQueue* cq_;
mutable mutex mu_;
std::unordered_map<uint64, StreamingRPCDispatcher<EnqueueResponse>>
enqueue_dispatchers_ TF_GUARDED_BY(mu_);
StatusCallback callback_wrapper(StatusCallback done) {
Ref();
return [this, done = std::move(done)](const Status& status) {
done(status);
this->Unref();
if (TF_PREDICT_FALSE(!status.ok())) {
auto error_source_payload = status.GetPayload(kErrorSource);
if (error_source_payload.has_value()) {
tensorflow::core::platform::ErrorSourceProto error_source_proto;
error_source_proto.ParseFromString(
std::string(*error_source_payload));
metrics::UpdateEagerClientErrorCounter(
error_source_proto.ErrorSource_Name(
error_source_proto.error_source()),
absl::StatusCodeToString(status.code()));
} else {
metrics::UpdateEagerClientErrorCounter(
"unknown", absl::StatusCodeToString(status.code()));
}
}
};
}
};
class GrpcEagerClientCache : public EagerClientCache {
public:
explicit GrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> cache)
: next_round_robin_assignment_(0), cache_(cache), threads_(4) {
for (int i = 0, end = threads_.size(); i < end; i++) {
threads_[i].reset(new GrpcEagerClientThread());
}
}
~GrpcEagerClientCache() override { threads_.clear(); }
Status GetClient(const string& target,
core::RefCountPtr<EagerClient>* client) override {
mutex_lock l(clients_mu_);
auto it = clients_.find(target);
if (it == clients_.end()) {
tensorflow::SharedGrpcChannelPtr shared =
cache_->FindWorkerChannel(target);
if (shared == nullptr) {
return errors::InvalidArgument("Client for target ", target,
" not found.");
}
int assigned_index = AssignClientToThread(target);
GrpcEagerClientThread* thread = threads_[assigned_index].get();
core::RefCountPtr<EagerClient> worker(
new GrpcEagerClient(shared, thread, target));
it = clients_.emplace(target, std::move(worker)).first;
}
it->second->Ref();
client->reset(it->second.get());
return absl::OkStatus();
}
private:
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
size_t AssignClientToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(
target, (next_round_robin_assignment_++) % threads_.size()))
.first;
}
return it->second;
}
std::shared_ptr<tensorflow::GrpcChannelCache> cache_;
mutable mutex clients_mu_;
std::unordered_map<string, core::RefCountPtr<EagerClient>> clients_
TF_GUARDED_BY(clients_mu_);
std::vector<core::RefCountPtr<GrpcEagerClientThread>> threads_;
};
}
EagerClientCache* NewGrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> channel) {
return new GrpcEagerClientCache(channel);
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace eager {
TEST(GrpcEagerClientCache, TestGetClientThreadSafety) {
GrpcChannelSpec spec;
TF_ASSERT_OK(spec.AddHostPortsJob("worker", {{0, "a:1"},
{1, "b:2"},
{2, "c:3"},
{3, "d:4"},
{4, "e:5"},
{5, "f:6"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<EagerClientCache> client_cache(
NewGrpcEagerClientCache(channel_cache));
const int num_calls = 10;
BlockingCounter counter(num_calls);
for (int i = 0; i < num_calls; i++) {
Env::Default()->SchedClosure([&client_cache, i, &counter]() {
string target = strings::StrCat("/job:worker/replica:0/task:", i);
core::RefCountPtr<EagerClient> eager_client;
Status s = client_cache->GetClient(target, &eager_client);
error::Code expected_code = i <= 5 ? error::OK : error::INVALID_ARGUMENT;
EXPECT_EQ(expected_code, s.code());
counter.DecrementCount();
});
}
counter.Wait();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b273aa4-9a13-4db4-af7d-902975da9160 | cpp | tensorflow/tensorflow | eager_service_impl | tensorflow/core/distributed_runtime/eager/eager_service_impl.cc | tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc | #include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/c/eager/immediate_execution_distributed_manager.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/context_distributed_manager.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/execute.h"
#include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace eager {
namespace {
Status GetNumRetvals(FunctionLibraryDefinition* func_lib_def,
const string& op_name,
const google::protobuf::Map<string, tensorflow::AttrValue>& attrs,
int* num_retvals) {
const tensorflow::OpRegistrationData* op_reg_data = nullptr;
auto status = tensorflow::OpRegistry::Global()->LookUp(op_name, &op_reg_data);
if (absl::IsNotFound(status)) {
status = func_lib_def->LookUp(op_name, &op_reg_data);
}
TF_RETURN_IF_ERROR(status);
const tensorflow::OpDef& op_def = op_reg_data->op_def;
for (const auto& output_arg : op_def.output_arg()) {
if (!output_arg.number_attr().empty()) {
auto iter = attrs.find(output_arg.number_attr());
if (iter == attrs.end()) {
return errors::InvalidArgument("Unable to find number_attr ",
output_arg.number_attr(),
" for Op: ", op_name);
}
*num_retvals += iter->second.i();
} else if (!output_arg.type_list_attr().empty()) {
auto iter = attrs.find(output_arg.type_list_attr());
if (iter == attrs.end()) {
return errors::InvalidArgument("Unable to find type_list_attr ",
output_arg.type_list_attr(),
" for Op: ", op_name);
}
*num_retvals += iter->second.list().type_size();
} else {
*num_retvals += 1;
}
}
return absl::OkStatus();
}
Status GetEagerOperationAndNumRetvals(const Operation& operation,
EagerContext* eager_context,
EagerExecutor* eager_executor,
EagerOperation* eager_op,
int* num_retvals) {
const char* name = operation.name().c_str();
std::optional<tensorflow::EagerFunctionParams> remote_func_params =
std::nullopt;
FunctionLibraryDefinition* func_lib_def;
if (operation.is_function()) {
if (operation.is_component_function()) {
func_lib_def =
eager_context->GetComponentFunctionFunctionLibraryDefinition(
operation.name());
if (func_lib_def == nullptr) {
return absl::InternalError(
absl::StrCat("Could not find function library for registered "
"component function: ",
operation.name()));
}
remote_func_params = {operation.id(), true,
operation.func_step_id(), func_lib_def};
} else {
func_lib_def = eager_context->FuncLibDef();
remote_func_params = {operation.id(), false,
std::nullopt, nullptr};
}
} else {
func_lib_def = eager_context->FuncLibDef();
}
TF_RETURN_IF_ERROR(eager_op->Reset(name, operation.device().c_str(), false,
eager_executor, remote_func_params));
{
tsl::profiler::TraceMe activity("EagerService:RemoteTensorHandleInternal",
tsl::profiler::TraceMeLevel::kVerbose);
for (const auto& input : operation.op_inputs()) {
tensorflow::TensorHandle* handle;
if (input.has_remote_handle()) {
TF_RETURN_IF_ERROR(
eager_context->RemoteMgr()->DeserializeRemoteTensorHandle(
input.remote_handle(), &handle));
TF_RETURN_IF_ERROR(eager_op->AddInput(handle));
} else {
Tensor tensor;
if (!ParseTensorProtoToTensor(input.tensor(), &tensor)) {
return errors::InvalidArgument("Invalid TensorProto: ",
input.tensor().DebugString());
} else {
handle = TensorHandle::CreateLocalHandle(std::move(tensor), nullptr,
nullptr, eager_context);
TF_RETURN_IF_ERROR(eager_op->AddInput(handle));
}
}
handle->Unref();
}
}
for (const auto& attr : operation.attrs()) {
eager_op->MutableAttrs()->Set(attr.first, attr.second);
}
return GetNumRetvals(func_lib_def, operation.name(), operation.attrs(),
num_retvals);
}
Status TensorHandleProto(TensorHandle* handle, TensorProto* proto) {
const tensorflow::Tensor* t = nullptr;
TF_RETURN_IF_ERROR(handle->Tensor(&t));
t->AsProtoTensorContent(proto);
return absl::OkStatus();
}
Status TensorHandleShape(TensorHandle* handle, TensorShapeProto* proto) {
const tensorflow::Tensor* t = nullptr;
if (handle->Type() == TensorHandle::LOCAL) {
TF_RETURN_IF_ERROR(handle->Tensor(&t));
t->shape().AsProto(proto);
} else {
TensorShape shape;
TF_RETURN_IF_ERROR(handle->Shape(&shape));
shape.AsProto(proto);
}
return absl::OkStatus();
}
Status AddOpRetvalsToResponse(
EagerContext* eager_context, int op_id, int num_retvals,
const std::vector<int32>& output_nums, TensorHandle** retvals,
std::function<TensorProto*()> add_tensor_proto_fn,
std::function<TensorShapeProto*()> add_shape_proto_fn,
std::function<string*()> add_device_fn = nullptr) {
StatusGroup sg;
if (op_id == kInvalidOpId) {
for (int i = 0; i < num_retvals; i++) {
sg.Update(TensorHandleProto(retvals[i], add_tensor_proto_fn()));
retvals[i]->Unref();
}
} else {
for (int i = 0; i < num_retvals; i++) {
sg.Update(TensorHandleShape(retvals[i], add_shape_proto_fn()));
if (add_device_fn) {
Device* device = retvals[i]->device();
*add_device_fn() = device ? device->name() : "";
}
if (retvals[i]->Type() == TensorHandle::REMOTE) {
retvals[i]->Unref();
} else {
const int output_num = output_nums.empty() ? i : output_nums.at(i);
eager_context->RemoteMgr()->AddOperationOutput(retvals[i], op_id,
output_num);
}
}
}
return sg.as_summary_status();
}
Status ResetAgentAndConnectToCoordinationService(
tsl::CoordinationServiceAgent* coord_agent) {
if (coord_agent->IsError()) {
const Status s = coord_agent->Reset();
if (!s.ok()) {
LOG(ERROR) << "Coordination Service agent reset failed " << s;
return s;
}
}
if (!coord_agent->IsConnected()) {
const Status s = coord_agent->Connect();
if (!s.ok()) {
LOG(ERROR) << "Coordination Service agent connect failed " << s;
return s;
}
}
return absl::OkStatus();
}
}
Status EagerServiceImpl::CreateContext(const CreateContextRequest* request,
CreateContextResponse* response) {
bool update_collective_executor_mgr = false;
{
mutex_lock l(contexts_mu_);
if (contexts_.empty()) {
update_collective_executor_mgr = true;
} else {
auto context_it = contexts_.find(request->context_id());
if (context_it != contexts_.end()) {
if (request->context_view_id() <
context_it->second->Context()->GetContextViewId()) {
return errors::InvalidArgument("EagerService:CreateContext failed. ",
"Context id: <", request->context_id(),
"> already exists.");
} else {
context_it->second->Unref();
contexts_.erase(context_it);
}
}
}
}
if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
return tensorflow::errors::Internal(
"invalid eager env_ or env_->rendezvous_mgr.");
}
if (request->clear_existing_contexts()) {
for (auto* device : env_->device_mgr->ListDevices()) {
device->ClearResourceMgr();
}
env_->rendezvous_mgr->CleanupAll();
env_->collective_executor_mgr->CleanupAll();
TF_RETURN_IF_ERROR(env_->session_mgr->DeleteAllSessions());
std::unordered_map<uint64, ServerContext*> tmp_contexts;
{
mutex_lock l(contexts_mu_);
if (!contexts_.empty()) {
std::swap(tmp_contexts, contexts_);
}
}
for (auto& context : tmp_contexts) {
context.second->Unref();
}
}
tsl::core::RefCountPtr<RemoteRendezvous> r =
env_->rendezvous_mgr->Find(request->context_id());
auto session_name =
tensorflow::strings::StrCat("eager_", request->context_id());
if (VLOG_IS_ON(2)) {
VLOG(2) << "Creating context on /job:" << request->server_def().job_name()
<< "/task:" << request->server_def().task_index();
for (const auto& da : request->cluster_device_attributes()) {
VLOG(2) << " " << da.name();
}
}
TF_RETURN_IF_ERROR(env_->session_mgr->CreateSession(
session_name, request->server_def(), request->cluster_device_attributes(),
request->server_def().default_session_config().isolate_session_state()));
int64_t context_id = request->context_id();
std::function<void()> session_destroyer = [this, context_id, session_name]() {
env_->rendezvous_mgr->Cleanup(context_id);
auto s = env_->session_mgr->DeleteSession(session_name);
if (!s.ok()) {
LOG(WARNING) << "Failed to destroy worker session '" << session_name
<< "' due to " << s.message();
}
};
std::shared_ptr<WorkerSession> worker_session;
TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession(
session_name, &worker_session));
tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr();
TF_RETURN_IF_ERROR(r->Initialize(worker_session.get()));
r->SetRemoteEagerContextDefault();
std::function<tsl::core::RefCountPtr<Rendezvous>(const int64_t)>
rendezvous_creator = [worker_session, this](const int64_t step_id) {
tsl::core::RefCountPtr<RemoteRendezvous> r =
env_->rendezvous_mgr->Find(step_id);
r->Initialize(worker_session.get()).IgnoreError();
return r;
};
LOG(INFO) << "Creating " << (request->async() ? "async" : "sync")
<< " eager service context with rendezvous_id on host "
<< port::Hostname() << " " << worker_session->worker_name();
SessionOptions opts;
opts.config = request->server_def().default_session_config();
LOG(INFO) << "SessionOptions: " << opts.config.DebugString();
if (update_collective_executor_mgr) {
env_->collective_executor_mgr = CreateProdRpcCollectiveExecutorMgr(
opts.config, device_mgr, MaybeCreateNcclCommunicator(opts.config),
worker_session->worker_cache(), worker_session->worker_name());
}
tensorflow::EagerContext* ctx = new tensorflow::EagerContext(
opts, tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
request->async(), device_mgr, false, std::move(r),
worker_session->cluster_flr(), env_->collective_executor_mgr.get());
core::ScopedUnref unref_ctx(ctx);
std::vector<string> remote_workers;
worker_session->worker_cache()->ListWorkers(&remote_workers);
remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(),
worker_session->worker_name()),
remote_workers.end());
std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers;
TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache(
&remote_eager_workers));
DistributedFunctionLibraryRuntime* cluster_flr =
eager::CreateClusterFLR(request->context_id(), ctx, worker_session.get());
auto remote_mgr =
std::make_unique<tensorflow::eager::RemoteMgr>(false, ctx);
Status s = ctx->InitializeRemoteWorker(
std::move(remote_eager_workers), worker_session->remote_device_mgr(),
remote_workers, request->context_id(), request->context_view_id(),
std::move(rendezvous_creator), cluster_flr, std::move(remote_mgr),
std::move(session_destroyer));
if (!s.ok()) {
VLOG(1) << "EagerContext::InitializeRemoteWorker failed with "
<< s.ToString();
return s;
}
#if !defined(IS_MOBILE_PLATFORM)
const auto& config = request->server_def().default_session_config();
const bool enable_coordination =
!config.experimental().coordination_config().service_type().empty();
if (enable_coordination) {
auto dist_mgr = std::make_unique<EagerContextDistributedManager>(ctx);
auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent();
dist_mgr->SetCoordinationServiceAgent(coord_agent);
if (config.experimental().coordination_config().enable_health_check()) {
TF_RETURN_IF_ERROR(
ResetAgentAndConnectToCoordinationService(coord_agent));
}
auto preemption_notifier =
tsl::PreemptionNotifier::CreatePreemptionNotifier("sigterm",
Env::Default());
preemption_notifier->WillBePreemptedAtAsync(
[coord_agent](absl::StatusOr<absl::Time> time_or_status) {
if (time_or_status.ok()) {
const auto coord_task = coord_agent->GetOwnTask().value();
Status s = coord_agent->InsertKeyValue(
"TF_DEFAULT_PREEMPTION_NOTICE_KEY",
absl::StrCat("/job:", coord_task.job_name(),
"/task:", coord_task.task_id()));
if (!s.ok()) {
VLOG(3) << "Preemption not exported to coordination service: "
<< s;
}
}
});
dist_mgr->SetPreemptionNotifier(std::move(preemption_notifier));
ctx->SetDistributedManager(std::move(dist_mgr));
}
#endif
std::vector<DeviceAttributes> device_attributes;
device_mgr->ListDeviceAttributes(&device_attributes);
for (const auto& da : device_attributes) {
*response->add_device_attributes() = da;
}
{
mutex_lock l(contexts_mu_);
auto context_it = contexts_.find(request->context_id());
if (context_it != contexts_.end()) {
return errors::InvalidArgument("EagerService:CreateContext failed. ",
"Context id: <", request->context_id(),
"> already exists.");
}
contexts_.emplace(request->context_id(),
new ServerContext(ctx, request->keep_alive_secs(), env_));
}
return absl::OkStatus();
}
Status EagerServiceImpl::UpdateContext(const UpdateContextRequest* request,
UpdateContextResponse* response) {
if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
return tensorflow::errors::Internal(
"invalid eager env_ or env_->rendezvous_mgr.");
}
ServerContext* server_context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &server_context));
core::ScopedUnref context_unref(server_context);
tensorflow::EagerContext* ctx = server_context->Context();
if (request->context_view_id() != ctx->GetContextViewId() + 1) {
return errors::InvalidArgument(
"EagerService:UpdateContext failed. Context id: <",
request->context_id(), "> currently at view #", ctx->GetContextViewId(),
" but received update request at view #", request->context_view_id(),
". View id should only be continuously incremented.");
}
if (request->cluster_device_attributes_size() == 0) {
ctx->IncrementContextViewId();
VLOG(1) << "Processing simplified UpdateContextRequest on "
<< ctx->HostCPU()->name();
return absl::OkStatus();
}
auto session_name =
tensorflow::strings::StrCat("eager_", request->context_id());
TF_RETURN_IF_ERROR(
env_->session_mgr->UpdateSession(session_name, request->server_def(),
request->cluster_device_attributes()));
std::shared_ptr<WorkerSession> worker_session;
TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession(
session_name, &worker_session));
const tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr();
std::vector<string> remote_workers;
worker_session->worker_cache()->ListWorkers(&remote_workers);
remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(),
worker_session->worker_name()),
remote_workers.end());
VLOG(1) << "On existing server " << worker_session->worker_name()
<< " updating remote workers";
if (VLOG_IS_ON(2)) {
for (const string& rw : remote_workers) {
VLOG(2) << "Remote worker " << rw;
}
}
std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers;
TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache(
&remote_eager_workers));
ctx->ClearCachesAndThreadExecutors();
Status s = ctx->UpdateRemoteWorker(std::move(remote_eager_workers),
remote_workers, request->context_id());
if (!s.ok()) {
VLOG(1) << "EagerContext::UpdateRemoteWorker failed with " << s.ToString();
return s;
}
#if !defined(IS_MOBILE_PLATFORM)
const auto& config = request->server_def().default_session_config();
const bool should_connect =
!config.experimental().coordination_config().service_type().empty() &&
config.experimental().coordination_config().enable_health_check();
if (should_connect) {
auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent();
TF_RETURN_IF_ERROR(ResetAgentAndConnectToCoordinationService(coord_agent));
}
#endif
std::vector<DeviceAttributes> device_attributes;
device_mgr->ListDeviceAttributes(&device_attributes);
for (const auto& da : device_attributes) {
*response->add_device_attributes() = da;
}
return absl::OkStatus();
}
Status EagerServiceImpl::CreateMasterContext(
const tensorflow::uint64 context_id, EagerContext* context) {
{
mutex_lock l(contexts_mu_);
auto iter = contexts_.find(context_id);
if (iter != contexts_.end()) {
return errors::InvalidArgument(
"EagerService:CreateMasterContext failed. ", "Context id: <",
context_id, "> already exists.");
}
}
ServerContext* server_context =
ServerContext::CreateMasterContext(context, env_);
mutex_lock l(contexts_mu_);
contexts_.emplace(context_id, server_context);
return absl::OkStatus();
}
void EagerServiceImpl::RunComponentFunction(
CallOptions* call_opts, const RunComponentFunctionRequest* request,
RunComponentFunctionResponse* response, StatusCallback done) {
ServerContext* context = nullptr;
Status s = GetServerContext(request->context_id(), &context);
if (!s.ok()) {
done(s);
return;
}
core::ScopedUnref context_unref(context);
auto& operation = request->operation();
if (!operation.is_function() || !operation.is_component_function()) {
done(errors::Internal(
"RunComponentFunction request can only be used to execute "
"component functions."));
return;
}
EagerContext* eager_context = context->Context();
EagerExecutor* eager_executor = &eager_context->Executor();
EagerOperation* op = new EagerOperation(eager_context);
int* num_retvals = new int(0);
s = GetEagerOperationAndNumRetvals(operation, eager_context, eager_executor,
op, num_retvals);
if (!s.ok()) {
delete num_retvals;
delete op;
done(s);
return;
}
if (!op->IsLocal()) {
delete num_retvals;
delete op;
done(errors::Internal(
"Received RunComponentFunction request with remote function device. "));
return;
}
s = op->SetAttrBool("is_component_function", true);
if (!s.ok()) {
delete num_retvals;
delete op;
done(errors::Internal("Error setting is_component_function attribute: ",
s.message()));
return;
}
auto* retvals = new absl::FixedArray<TensorHandle*>(*num_retvals);
VLOG(3) << "ServerContext: Calling EagerLocalExecuteAsync for op "
<< operation.id();
std::vector<int32> output_nums;
for (const int32_t output_num : request->output_num()) {
output_nums.push_back(output_num);
}
auto cm = std::make_shared<CancellationManager>();
op->SetCancellationManager(cm.get());
call_opts->SetCancelCallback([cm] { cm->StartCancel(); });
context->Ref();
EagerLocalExecuteAsync(
op, retvals->data(), num_retvals,
[op, op_id = operation.id(), num_retvals, retvals, output_nums, cm,
call_opts, response, eager_context, context,
done = std::move(done)](const Status& status) {
call_opts->ClearCancelCallback();
auto wrapped_done = [&](const Status& status) {
context->Unref();
done(status);
delete op;
delete num_retvals;
delete retvals;
};
if (!status.ok()) {
wrapped_done(status);
return;
}
wrapped_done(AddOpRetvalsToResponse(
eager_context, op_id, *num_retvals, output_nums, retvals->data(),
[response] { return response->add_tensor(); },
[response] { return response->add_shape(); }));
});
}
Status EagerServiceImpl::ExecuteOp(CallOptions* call_opts,
const Operation& operation,
EagerContext* eager_context,
EagerExecutor* eager_executor,
QueueResponse* queue_response) {
tensorflow::EagerOperation op(eager_context);
int num_retvals = 0;
TF_RETURN_IF_ERROR(GetEagerOperationAndNumRetvals(
operation, eager_context, eager_executor, &op, &num_retvals));
auto cm = std::make_shared<CancellationManager>();
if (call_opts) {
op.SetCancellationManager(cm.get());
call_opts->SetCancelCallback([cm] { cm->StartCancel(); });
}
absl::FixedArray<tensorflow::TensorHandle*> retvals(num_retvals);
VLOG(3) << "ServerContext: Calling EagerExecute for op " << operation.id();
TF_RETURN_IF_ERROR(op.Execute(
absl::MakeSpan(
reinterpret_cast<tensorflow::AbstractTensorHandle**>(retvals.data()),
num_retvals),
&num_retvals));
std::function<string*()> add_device_fn = nullptr;
if (op.is_function()) {
add_device_fn = [queue_response] { return queue_response->add_device(); };
}
return AddOpRetvalsToResponse(
eager_context, operation.id(), num_retvals, {},
retvals.data(), [queue_response] { return queue_response->add_tensor(); },
[queue_response] { return queue_response->add_shape(); },
std::move(add_device_fn));
}
Status EagerServiceImpl::Enqueue(CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response, uint64 stream_id) {
tsl::profiler::TraceMe activity(
[&] {
return absl::StrCat(
"EagerService:Enqueue#debug_str=", request->DebugString(), "#");
},
tsl::profiler::TraceMeLevel::kInfo);
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context));
core::ScopedUnref context_unref(context);
EagerExecutor& executor =
stream_id == kInvalidStreamId
? context->Context()->Executor()
: context->Context()->RemoteMgr()->GetOrCreateExecutorForStream(
stream_id);
Status s;
for (const auto& item : request->queue()) {
auto* queue_response = response->add_queue_response();
if (item.has_operation()) {
s = ExecuteOp(call_opts, item.operation(), context->Context(), &executor,
queue_response);
} else if (item.has_handle_to_decref()) {
auto handle_to_decref = std::make_unique<RemoteTensorHandleInternal>(
item.handle_to_decref());
auto node = std::make_unique<ClientTensorHandleDeleteNode>(
context, std::move(handle_to_decref));
s = context->Context()->Executor().AddOrExecute(std::move(node));
} else if (item.has_send_tensor()) {
s = SendTensor(item.send_tensor(), context->Context());
} else if (item.has_send_packed_handle()) {
s = SendPackedHandle(item.send_packed_handle(), context->Context());
} else if (item.has_register_function()) {
s = RegisterFunction(item.register_function(), context->Context());
} else if (item.has_remove_function()) {
s = RemoveFunction(item.remove_function(), context->Context());
} else if (item.has_cleanup_function()) {
s = CleanupFunction(item.cleanup_function());
} else {
DCHECK(item.has_sync_remote_executor_for_stream());
s = executor.WaitForAllPendingNodes();
}
if (!s.ok()) {
if (stream_id != kInvalidStreamId) {
context->Context()->RemoteMgr()->DeleteExecutorForStream(stream_id);
}
return s;
}
}
return absl::OkStatus();
}
Status EagerServiceImpl::WaitQueueDone(const WaitQueueDoneRequest* request,
WaitQueueDoneResponse* response) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context));
core::ScopedUnref context_unref(context);
if (request->op_id_size() > 0) {
return errors::Unimplemented(
"EagerServiceImpl::WaitQueueDone is not "
"implemented for particular op IDs.");
}
return context->Context()->Executor().WaitForAllPendingNodes();
}
Status EagerServiceImpl::KeepAlive(const KeepAliveRequest* request,
KeepAliveResponse* response) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context));
core::ScopedUnref context_unref(context);
tensorflow::EagerContext* ctx = context->Context();
response->set_context_view_id(ctx->GetContextViewId());
return absl::OkStatus();
}
Status EagerServiceImpl::CloseContext(const CloseContextRequest* request,
CloseContextResponse* response) {
ServerContext* context = nullptr;
if (!GetServerContext(request->context_id(), &context).ok()) {
return absl::OkStatus();
}
core::ScopedUnref context_unref(context);
if (request->context_view_id() < context->Context()->GetContextViewId()) {
LOG(INFO) << "Ignoring CloseContext request with a stale context_view_id "
<< request->context_view_id() << " for context_id "
<< request->context_id() << ". The current context_view_id is "
<< context->Context()->GetContextViewId() << ".";
return absl::OkStatus();
}
mutex_lock l(contexts_mu_);
contexts_.erase(request->context_id());
context->Unref();
return absl::OkStatus();
}
Status EagerServiceImpl::RegisterFunction(
const RegisterFunctionOp& register_function, EagerContext* eager_context) {
if (register_function.is_component_function()) {
return eager_context->AddComponentFunction(register_function.function_def(),
register_function.library());
} else {
return eager_context->AddFunctionDef(register_function.function_def(),
register_function.library(),
false);
}
}
Status EagerServiceImpl::RemoveFunction(const RemoveFunctionOp& remove_function,
EagerContext* eager_context) {
return eager_context->RemoveFunction(remove_function.function_name());
}
Status EagerServiceImpl::CleanupFunction(
const CleanupFunctionOp& cleanup_function) {
env_->rendezvous_mgr->Cleanup(cleanup_function.step_id());
return absl::OkStatus();
}
Status EagerServiceImpl::SendTensor(const SendTensorOp& send_tensor,
EagerContext* eager_context) {
absl::InlinedVector<tensorflow::TensorHandle*, 2UL> tensors;
for (const auto& tensor_proto : send_tensor.tensors()) {
Tensor tensor;
if (!tensor.FromProto(tensor_proto)) {
return errors::InvalidArgument("Unable to parse tensor proto");
}
TensorHandle* tensor_handle = TensorHandle::CreateLocalHandle(
std::move(tensor), nullptr, nullptr, eager_context);
TensorHandle* copied_handle = nullptr;
Device* device;
TF_RETURN_IF_ERROR(eager_context->FindDeviceFromName(
send_tensor.device_name().c_str(), &device));
TF_RETURN_IF_ERROR(EagerCopyToDevice(tensor_handle, eager_context,
&eager_context->Executor(), device,
false, &copied_handle));
tensors.push_back(copied_handle);
tensor_handle->Unref();
}
eager_context->RemoteMgr()->AddOperationOutputs(tensors, send_tensor.op_id());
return absl::OkStatus();
}
Status EagerServiceImpl::SendPackedHandle(
const SendPackedHandleOp& send_packed_handle, EagerContext* eager_context) {
if (send_packed_handle.handles().empty()) {
return errors::InvalidArgument("Handles should not be empty.");
}
std::vector<tensorflow::TensorHandle*> handles;
handles.resize(send_packed_handle.handles_size());
for (int i = 0; i < send_packed_handle.handles_size(); ++i) {
const auto& item = send_packed_handle.handles(i);
if (item.has_local_handle()) {
Tensor tensor;
if (!ParseTensorProtoToTensor(item.local_handle().tensor(), &tensor)) {
return errors::InvalidArgument(
"Invalid TensorProto: ",
item.local_handle().tensor().DebugString());
}
Device* op_device = nullptr;
TF_RETURN_IF_ERROR(eager_context->FindDeviceFromName(
item.local_handle().device().c_str(), &op_device));
handles[i] = TensorHandle::CreateLocalHandle(
std::move(tensor), nullptr, op_device, eager_context);
} else {
TF_RETURN_IF_ERROR(
eager_context->RemoteMgr()->DeserializeRemoteTensorHandle(
item.remote_handle(), &handles[i]));
}
}
tensorflow::TensorHandle* packed_handle = nullptr;
std::vector<tensorflow::TensorHandle*> handles_to_pack = handles;
TF_RETURN_IF_ERROR(TensorHandle::CreatePackedHandle(
std::move(handles_to_pack), handles.at(0)->dtype, TensorShape(),
send_packed_handle.device_name(), eager_context, &packed_handle));
for (auto* h : handles) {
h->Unref();
}
eager_context->RemoteMgr()->AddOperationOutputs({packed_handle},
send_packed_handle.op_id());
return absl::OkStatus();
}
tensorflow::Status EagerServiceImpl::GetServerContext(
uint64 context_id, ServerContext** server_context) {
tf_shared_lock l(contexts_mu_);
auto iter = contexts_.find(context_id);
if (iter == contexts_.end()) {
*server_context = nullptr;
return errors::Aborted(strings::Printf(
"Unable to find a context_id matching the specified one "
"(%llu). Perhaps the worker was restarted, or the context was GC'd?",
static_cast<unsigned long long>(context_id)));
}
*server_context = iter->second;
(*server_context)->Ref();
(*server_context)->RecordAccess();
return absl::OkStatus();
}
}
} | #include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <unordered_map>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h"
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
#include "tensorflow/core/protobuf/tensorflow_server.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestEagerServiceImpl : public EagerServiceImpl {
public:
explicit TestEagerServiceImpl(WorkerEnv* env) : EagerServiceImpl(env) {}
Status GetEagerContext(const uint64 context_id, EagerContext** ctx) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(context_id, &context));
core::ScopedUnref context_unref(context);
*ctx = context->Context();
return absl::OkStatus();
}
Status GetTensorHandle(const uint64 context_id,
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
ServerContext* context = nullptr;
TF_RETURN_IF_ERROR(GetServerContext(context_id, &context));
core::ScopedUnref context_unref(context);
return context->Context()->RemoteMgr()->GetTensorHandle(remote_handle,
handle);
}
};
class FakeEagerClient : public EagerClient {
public:
FakeEagerClient() {}
~FakeEagerClient() override {}
void SetServiceImpl(TestEagerServiceImpl* impl) { impl_ = impl; }
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
done(impl_->method(request, response)); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
CLIENT_METHOD(CloseContext);
#undef CLIENT_METHOD
#define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done, \
int64_t init_timeout_in_ms, int retries) override { \
done(impl_->method(request, response)); \
}
CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext);
#undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES
void EnqueueAsync(CallOptions* call_opts, const EnqueueRequest* request,
EnqueueResponse* response, StatusCallback done) override {
done(impl_->Enqueue(call_opts, request, response));
}
void RunComponentFunctionAsync(CallOptions* call_opts,
const RunComponentFunctionRequest* request,
RunComponentFunctionResponse* response,
StatusCallback done) override {
impl_->RunComponentFunction(call_opts, request, response, std::move(done));
}
void StreamingEnqueueAsync(bool enable_streaming_enqueue,
CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
done(impl_->Enqueue(nullptr, request, response));
}
bool allow_multiple_pending_requests() const override { return false; }
private:
TestEagerServiceImpl* impl_;
};
class DummyEagerClientCache : public EagerClientCache {
public:
DummyEagerClientCache() : client_(new FakeEagerClient) {}
Status GetClient(const string& target,
core::RefCountPtr<EagerClient>* client) override {
client->reset(client_.get());
client_->Ref();
return absl::OkStatus();
}
private:
core::RefCountPtr<EagerClient> client_;
};
class FakeCache : public TestWorkerCache {
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
*eager_client_cache = std::make_unique<DummyEagerClientCache>();
return absl::OkStatus();
}
void ListWorkers(std::vector<string>* workers) const override {
workers->push_back("/job:localhost/replica:0/task:0");
}
};
class EagerServiceImplTest : public ::testing::Test {
public:
EagerServiceImplTest()
: rendezvous_mgr_(&worker_env_),
session_mgr_(new SessionMgr(
&worker_env_, "/job:localhost/replica:0/task:0/device:CPU:0",
std::unique_ptr<WorkerCacheInterface>(new FakeCache),
[](const ServerDef& server_def,
WorkerCacheInterface** worker_cache) {
*worker_cache = new FakeCache;
return absl::OkStatus();
},
nullptr)) {
worker_env_.env = Env::Default();
worker_env_.rendezvous_mgr = &rendezvous_mgr_;
worker_env_.session_mgr = session_mgr_.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
worker_env_.device_mgr = device_mgr_.get();
}
protected:
WorkerEnv worker_env_;
tensorflow::RpcRendezvousMgr rendezvous_mgr_;
std::unique_ptr<SessionMgr> session_mgr_;
std::unique_ptr<DynamicDeviceMgr> device_mgr_;
};
void SetTensorProto(TensorProto* tensor_proto) {
int64_t dims[] = {2, 2};
float data[] = {1.0f, 2.0f, 3.0f, 4.0f};
TF_Tensor* t = TF_AllocateTensor(
TF_FLOAT, &dims[0], sizeof(dims) / sizeof(int64_t), sizeof(data));
memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
tensorflow::Tensor tensor;
TF_ASSERT_OK(tensorflow::TF_TensorToTensor(t, &tensor));
tensor.AsProtoTensorContent(tensor_proto);
TF_DeleteTensor(t);
}
void BuildOperation(
Operation* operation, int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device) {
operation->set_id(id);
operation->set_name(name);
operation->set_device(device);
for (const auto& input : inputs) {
if (input.index() == 0) {
*operation->add_op_inputs()->mutable_tensor() =
std::get<TensorProto>(input);
} else {
const auto& tensor_handle_pair =
std::get<std::pair<int64_t, int32>>(input);
auto* input = operation->add_op_inputs()->mutable_remote_handle();
input->set_op_id(tensor_handle_pair.first);
input->set_output_num(tensor_handle_pair.second);
input->set_op_device(device);
input->set_device(device);
}
}
for (const auto& attr_entry : attrs) {
(*operation->mutable_attrs())[attr_entry.first] = attr_entry.second;
}
}
void AddOperationToEnqueueRequest(
int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device,
EnqueueRequest* request) {
auto* operation = request->add_queue()->mutable_operation();
BuildOperation(operation, id, name, inputs, attrs, device);
}
void AddOperationToRunComponentFunctionRequest(
int64_t id, const string& name,
const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>&
inputs,
const std::unordered_map<string, AttrValue>& attrs, const string& device,
const int output_num, RunComponentFunctionRequest* request) {
auto* operation = request->mutable_operation();
operation->set_is_function(true);
operation->set_is_component_function(true);
request->add_output_num(output_num);
BuildOperation(operation, id, name, inputs, attrs, device);
}
tensorflow::NodeDef MatMulFunctionNodeDef() {
tensorflow::NodeDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" name: 'matmul_func'"
" op: 'MatMulFunction'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'm'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul'"
" op: 'MatMul'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" attr {"
" key: 'transpose_a'"
" value {"
" b: false"
" }"
" }"
" }"
" ret {"
" key: 'm'"
" value: 'matmul:product'"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulTransposeFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'm'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul'"
" op: 'MatMul'"
" input: 'a'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" attr {"
" key: 'transpose_a'"
" value {"
" b: true"
" }"
" }"
" }"
" ret {"
" key: 'm'"
" value: 'matmul:product'"
" }",
&def));
return def;
}
tensorflow::FunctionDef MatMulNestedFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'MatMulNestedFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'matmul_nested'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'matmul_nested'"
" op: 'MatMulFunction'"
" input: 'a'"
" attr {"
" key: 'T'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'matmul_nested'"
" value: 'matmul_nested:m:0'"
" }",
&def));
return def;
}
tensorflow::FunctionDef SingleRecvNodeFunction() {
tensorflow::FunctionDef def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'SingleRecvNodeFunction'"
" input_arg {"
" name: 'a'"
" type: DT_FLOAT"
" }"
" output_arg {"
" name: 'recv_tensor'"
" type: DT_FLOAT"
" }"
" }"
" node_def {"
" name: 'recv_node'"
" op: '_Recv'"
" device: '/job:localhost/replica:0/task:0/device:CPU:0'"
" attr {"
" key: 'client_terminated'"
" value {"
" b: true"
" }"
" }"
" attr {"
" key: 'recv_device'"
" value {"
" s: '/job:localhost/replica:0/task:0/device:CPU:0'"
" }"
" }"
" attr {"
" key: 'send_device'"
" value {"
" s: '/job:localhost/replica:0/task:0/device:CPU:0'"
" }"
" }"
" attr {"
" key: 'send_device_incarnation'"
" value {"
" i: 1"
" }"
" }"
" attr {"
" key: 'tensor_name'"
" value {"
" s: 't0'"
" }"
" }"
" attr {"
" key: 'tensor_type'"
" value {"
" type: DT_FLOAT"
" }"
" }"
" }"
" ret {"
" key: 'recv_tensor'"
" value: 'recv_node:tensor:0'"
" }",
&def));
return def;
}
TEST_F(EagerServiceImplTest, BasicTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
std::unordered_map<string, AttrValue> attrs;
val.Clear();
val.set_type(tensorflow::DataType::DT_FLOAT);
attrs.insert({"T", val});
val.Clear();
val.set_b(false);
attrs.insert({"transpose_a", val});
attrs.insert({"transpose_b", val});
AddOperationToEnqueueRequest(
2, "MatMul", {std::make_pair(1, 0), std::make_pair(1, 0)}, attrs,
"/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
auto& matmul_result_shape =
remote_enqueue_response.queue_response(1).shape(0);
EXPECT_EQ(matmul_result_shape.dim(0).size(), 2);
EXPECT_EQ(matmul_result_shape.dim(1).size(), 2);
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle));
const tensorflow::Tensor* t = nullptr;
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
class EagerServiceImplFunctionTest : public EagerServiceImplTest {
public:
EagerServiceImplFunctionTest() : EagerServiceImplTest() {}
void TestFunction(const RegisterFunctionOp& register_op,
const string& function_name,
const bool local_inputs = false,
const bool test_cancel = false) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
*enqueue_request.add_queue()->mutable_register_function() = register_op;
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
if (local_inputs) {
TensorProto tensor_proto;
SetTensorProto(&tensor_proto);
AddOperationToEnqueueRequest(
2, function_name, {tensor_proto},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
} else {
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(
1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
AddOperationToEnqueueRequest(
2, function_name, {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
}
CallOptions call_opts;
Status status;
Notification n;
Env::Default()->SchedClosure([&] {
status = eager_service_impl.Enqueue(&call_opts, &remote_enqueue_request,
&remote_enqueue_response);
n.Notify();
});
if (test_cancel) {
Env::Default()->SleepForMicroseconds(500000);
call_opts.StartCancel();
n.WaitForNotification();
EXPECT_TRUE(absl::IsCancelled(status)) << status.message();
} else {
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
void TestComponentFunction(const RegisterFunctionOp& register_op,
const string& function_name,
const bool test_cancel) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
*enqueue_request.add_queue()->mutable_register_function() = register_op;
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
2, function_name, {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
if (test_cancel) {
call_opts.StartCancel();
}
n.WaitForNotification();
if (test_cancel) {
EXPECT_TRUE(absl::IsCancelled(status)) << status.message();
} else {
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, output_num),
&tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
};
TEST_F(EagerServiceImplFunctionTest, BasicFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
TestFunction(register_op, "MatMulFunction");
}
TEST_F(EagerServiceImplFunctionTest, FunctionWithLocalInputsTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
TestFunction(register_op, "MatMulFunction", true);
}
TEST_F(EagerServiceImplFunctionTest, NestedFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulNestedFunction();
*register_op.mutable_library()->add_function() = MatMulFunction();
TestFunction(register_op, "MatMulNestedFunction");
}
TEST_F(EagerServiceImplFunctionTest, FunctionCancellationTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = SingleRecvNodeFunction();
TestFunction(register_op, "SingleRecvNodeFunction", false,
true);
}
TEST_F(EagerServiceImplFunctionTest, ComponentFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "MatMulFunction", false);
}
TEST_F(EagerServiceImplFunctionTest, ComponentFunctionCancellationTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = SingleRecvNodeFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "SingleRecvNodeFunction", true);
}
TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionTest) {
RegisterFunctionOp register_op;
*register_op.mutable_function_def() = MatMulNestedFunction();
*register_op.mutable_library()->add_function() = MatMulFunction();
register_op.set_is_component_function(true);
TestComponentFunction(register_op, "MatMulNestedFunction", false);
}
TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionWithNameClashTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
{
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
RegisterFunctionOp* register_op =
enqueue_request.add_queue()->mutable_register_function();
*register_op->mutable_function_def() = MatMulNestedFunction();
*register_op->mutable_library()->add_function() = MatMulFunction();
register_op->set_is_component_function(true);
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
}
{
EnqueueRequest enqueue_request;
enqueue_request.set_context_id(context_id);
RegisterFunctionOp* register_op =
enqueue_request.add_queue()->mutable_register_function();
*register_op->mutable_function_def() = MatMulNestedFunction();
register_op->mutable_function_def()->mutable_signature()->set_name(
"MatMulNestedTransposeFunction");
*register_op->mutable_library()->add_function() = MatMulTransposeFunction();
register_op->set_is_component_function(true);
EnqueueResponse enqueue_response;
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request,
&enqueue_response));
}
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
"/job:localhost/replica:0/task:0/device:CPU:0",
&remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
{
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
2, "MatMulNestedFunction", {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, output_num), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
}
{
RunComponentFunctionRequest run_comp_func_request;
run_comp_func_request.set_context_id(context_id);
RunComponentFunctionResponse run_comp_func_response;
const int output_num = 5;
AddOperationToRunComponentFunctionRequest(
3, "MatMulNestedTransposeFunction", {std::make_pair(1, 0)},
std::unordered_map<string, AttrValue>(),
"/job:localhost/replica:0/task:0/device:CPU:0", output_num,
&run_comp_func_request);
CallOptions call_opts;
Notification n;
Status status;
eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request,
&run_comp_func_response,
[&status, &n](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
TF_ASSERT_OK(status);
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(3, output_num), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(10, actual(0));
EXPECT_EQ(14, actual(1));
EXPECT_EQ(14, actual(2));
EXPECT_EQ(20, actual(3));
}
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
class FunctionWithRemoteInputsTest : public EagerServiceImplTest {
public:
FunctionWithRemoteInputsTest()
: EagerServiceImplTest(), eager_service_impl_(&worker_env_) {
remote_device_mgr_ = std::make_unique<StaticDeviceMgr>(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:1"));
context_id_ = random::New64();
}
class TestExecuteNodeArgs : public EagerKernelArgs {
public:
TestExecuteNodeArgs(
absl::InlinedVector<TensorValue, 4UL>&& tensor_args,
std::function<Status(const int, eager::RemoteTensorHandle*)>
serialize_remote_handle)
: EagerKernelArgs(std::move(tensor_args)),
serialize_remote_handle_(std::move(serialize_remote_handle)) {}
bool HasRemoteOrPackedInputs() const override { return true; }
Status GetRemoteArg(const FunctionArgIndex& index,
eager::RemoteTensorHandle* val) const override {
return serialize_remote_handle_(index.index, val);
}
private:
std::function<Status(const int, eager::RemoteTensorHandle*)>
serialize_remote_handle_;
};
bool MatMulHasAttrWithDefaultValue(const tensorflow::FunctionDef& fdef) {
for (const auto& node : fdef.node_def()) {
if (node.op() == "MatMul") {
return node.attr().find("transpose_a") != node.attr().end();
}
}
return false;
}
void Init() {
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id_);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl_.CreateContext(&request, &response));
EagerContext* ctx = nullptr;
TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx));
Device* device;
TF_ASSERT_OK(ctx->FindDeviceFromName(local_device_.c_str(), &device));
core::RefCountPtr<EagerClient> client;
TF_ASSERT_OK(ctx->GetClient(device, &client));
FakeEagerClient* fake_client = static_cast<FakeEagerClient*>(client.get());
fake_client->SetServiceImpl(&eager_service_impl_);
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id_);
EnqueueResponse remote_enqueue_response;
std::unordered_map<string, AttrValue> const_attrs;
AttrValue val;
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs, local_device_,
&remote_enqueue_request);
TF_EXPECT_OK(eager_service_impl_.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
eager_cluster_flr_ = std::make_unique<EagerClusterFunctionLibraryRuntime>(
context_id_, ctx, device_mgr_.get());
fdef_ = MatMulFunction();
TF_ASSERT_OK(func_lib_def_.AddFunctionDef(fdef_));
eager_pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
remote_device_mgr_.get(), Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, &func_lib_def_, OptimizerOptions(),
nullptr, eager_cluster_flr_.get(),
nullptr,
Rendezvous::Factory{[this](const int64_t step_id,
const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
worker_env_.rendezvous_mgr->Find(step_id).release());
return absl::OkStatus();
}});
}
void CheckOutputTensorAndClose(const Tensor& tensor) {
auto actual = tensor.flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id_);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl_.CloseContext(&close_context_request,
&close_context_response));
}
void CheckOutputsAndClose(const std::vector<FunctionRet>& outputs,
const int64_t op_id) {
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl_.GetTensorHandle(
context_id_, RemoteTensorHandleInternal(2, 0), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
EXPECT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.at(0).index(), 1);
const TensorShape& shape = std::get<TensorShape>(outputs.at(0));
EXPECT_EQ(shape, t->shape());
CheckOutputTensorAndClose(*t);
}
protected:
const string local_device_ = "/job:localhost/replica:0/task:0/device:CPU:0";
const string remote_device_ = "/job:localhost/replica:0/task:1/device:CPU:0";
TestEagerServiceImpl eager_service_impl_;
std::unique_ptr<DeviceMgr> remote_device_mgr_;
uint64 context_id_;
tensorflow::FunctionDef fdef_;
std::unique_ptr<ProcessFunctionLibraryRuntime> eager_pflr_;
std::unique_ptr<EagerClusterFunctionLibraryRuntime> eager_cluster_flr_;
FunctionLibraryDefinition func_lib_def_{OpRegistry::Global(),
FunctionDefLibrary()};
};
TEST_F(FunctionWithRemoteInputsTest, EagerPFLRTest) {
Init();
FunctionLibraryRuntime::InstantiateOptions options;
options.target = remote_device_;
options.is_multi_device_function = true;
options.input_devices.push_back(local_device_);
FunctionLibraryRuntime::Handle handle;
EXPECT_TRUE(MatMulHasAttrWithDefaultValue(fdef_));
TF_ASSERT_OK(eager_pflr_->Instantiate(
fdef_.signature().name(), AttrSlice(&fdef_.attr()), options, &handle));
EagerContext* ctx = nullptr;
TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx));
for (const string& func_name : ctx->FuncLibDef()->ListFunctionNames()) {
const FunctionDef* fdef = ctx->FuncLibDef()->Find(func_name);
EXPECT_TRUE(fdef != nullptr);
if (absl::StartsWith(func_name, "MatMulFunction")) {
EXPECT_FALSE(MatMulHasAttrWithDefaultValue(*fdef));
}
}
bool is_cross_process = false;
TF_CHECK_OK(eager_pflr_->IsCrossProcess(handle, &is_cross_process));
EXPECT_TRUE(is_cross_process);
FunctionLibraryRuntime::Options opts;
const uint64 op_id = 2;
opts.op_id = op_id;
Notification done;
Status status;
RemoteTensorHandle input;
input.set_op_id(1);
input.set_output_num(0);
input.set_op_device(local_device_);
input.set_device(local_device_);
std::vector<RemoteTensorHandle> inputs = {input};
std::vector<FunctionRet> outputs;
absl::InlinedVector<TensorValue, 4UL> tensor_args = {TensorValue()};
TestExecuteNodeArgs args(
std::move(tensor_args),
[&inputs](const int i, RemoteTensorHandle* handle) -> Status {
*handle = inputs.at(i);
return absl::OkStatus();
});
eager_pflr_->Run(opts, handle, args, &outputs,
[&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
TF_ASSERT_OK(status);
CheckOutputsAndClose(outputs, op_id);
}
TEST_F(FunctionWithRemoteInputsTest,
EagerClusterFLRTestWithLocalInputAndOutput) {
Init();
FunctionLibraryRuntime::Handle handle;
EXPECT_TRUE(MatMulHasAttrWithDefaultValue(fdef_));
Status status;
Notification instantiate_done;
eager_cluster_flr_->Instantiate(
fdef_.signature().name(), func_lib_def_, AttrSlice(&fdef_.attr()),
FunctionLibraryRuntime::InstantiateOptions(), &handle,
[&status, &instantiate_done](const Status& s) {
status = s;
instantiate_done.Notify();
});
instantiate_done.WaitForNotification();
TF_ASSERT_OK(status);
EagerContext* ctx = nullptr;
TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx));
for (const string& func_name : ctx->FuncLibDef()->ListFunctionNames()) {
const FunctionDef* fdef = ctx->FuncLibDef()->Find(func_name);
EXPECT_TRUE(fdef != nullptr);
if (absl::StartsWith(func_name, "MatMulFunction")) {
EXPECT_FALSE(MatMulHasAttrWithDefaultValue(*fdef));
}
}
const tensorflow::Tensor* input_tensor = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl_.GetTensorHandle(
context_id_, RemoteTensorHandleInternal(1, 0), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&input_tensor));
FunctionLibraryRuntime::Options opts;
Notification execute_done;
std::vector<Tensor> inputs = {*input_tensor};
std::vector<Tensor> outputs;
eager_cluster_flr_->Run(opts, handle, inputs, &outputs,
[&status, &execute_done](const Status& s) {
status = s;
execute_done.Notify();
});
execute_done.WaitForNotification();
TF_ASSERT_OK(status);
EXPECT_EQ(outputs.size(), 1);
CheckOutputTensorAndClose(outputs.at(0));
}
TEST_F(FunctionWithRemoteInputsTest, KernelAndDeviceFuncTest) {
Init();
Device* local_device;
TF_ASSERT_OK(device_mgr_->LookupDevice(local_device_, &local_device));
std::vector<Device*> input_dev_ptrs;
input_dev_ptrs.push_back(local_device);
FunctionLibraryRuntime* flr = eager_pflr_->GetFLR(remote_device_);
EagerContext* ctx = nullptr;
TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx));
core::RefCountPtr<KernelAndDeviceFunc> kernel = nullptr;
const int64_t op_id = 2;
kernel.reset(new KernelAndDeviceFunc(
flr, eager_pflr_.get(), std::move(input_dev_ptrs),
{}, {},
nullptr,
nullptr, local_device, fdef_.signature().name(),
false,
false,
false,
true,
false,
std::nullopt,
false, ctx->RendezvousFactory(),
[=]() { return op_id; }));
const NodeDef node_def = MatMulFunctionNodeDef();
TF_ASSERT_OK(kernel->InstantiateFunc({}, node_def, nullptr, std::nullopt));
absl::InlinedVector<TensorValue, 4UL> input_tensors = {TensorValue()};
RemoteTensorHandle input;
input.set_op_id(1);
input.set_output_num(0);
input.set_op_device(local_device_);
input.set_device(local_device_);
std::vector<RemoteTensorHandle> remote_handles = {input};
TestExecuteNodeArgs inputs(
std::move(input_tensors),
[&remote_handles](const int index, RemoteTensorHandle* handle) -> Status {
*handle = remote_handles.at(index);
return absl::OkStatus();
});
std::vector<FunctionRet> outputs;
TF_ASSERT_OK(kernel->Run(nullptr, inputs, &outputs,
nullptr,
std::nullopt,
std::nullopt,
nullptr));
CheckOutputsAndClose(outputs, op_id);
}
TEST_F(FunctionWithRemoteInputsTest, KernelAndDeviceFuncAsyncTest) {
Init();
Device* local_device;
TF_ASSERT_OK(device_mgr_->LookupDevice(local_device_, &local_device));
std::vector<Device*> input_dev_ptrs;
input_dev_ptrs.push_back(local_device);
FunctionLibraryRuntime* flr = eager_pflr_->GetFLR(remote_device_);
EagerContext* ctx = nullptr;
TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx));
core::RefCountPtr<KernelAndDeviceFunc> kernel = nullptr;
const int64_t op_id = 2;
kernel.reset(new KernelAndDeviceFunc(
flr, eager_pflr_.get(), std::move(input_dev_ptrs),
{}, {},
nullptr,
nullptr, local_device, fdef_.signature().name(),
false,
false,
false,
true,
false,
std::nullopt,
false, ctx->RendezvousFactory(),
[=]() { return op_id; }));
const NodeDef node_def = MatMulFunctionNodeDef();
TF_ASSERT_OK(kernel->InstantiateFunc({}, node_def, nullptr, std::nullopt));
absl::InlinedVector<TensorValue, 4UL> input_tensors = {TensorValue()};
RemoteTensorHandle input;
input.set_op_id(1);
input.set_output_num(0);
input.set_op_device(local_device_);
input.set_device(local_device_);
std::vector<RemoteTensorHandle> remote_handles = {input};
TestExecuteNodeArgs inputs(
std::move(input_tensors),
[&remote_handles](const int index, RemoteTensorHandle* handle) -> Status {
*handle = remote_handles.at(index);
return absl::OkStatus();
});
std::vector<FunctionRet> outputs;
Status status;
Notification n;
kernel->RunAsync(nullptr, inputs, &outputs,
nullptr,
std::nullopt,
nullptr,
[&status, &n](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
TF_ASSERT_OK(status);
CheckOutputsAndClose(outputs, op_id);
}
TEST_F(EagerServiceImplTest, SendTensorTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor();
send_tensor->set_op_id(1);
SetTensorProto(send_tensor->add_tensors());
std::unordered_map<string, AttrValue> attrs;
AttrValue val;
val.Clear();
val.set_type(tensorflow::DataType::DT_FLOAT);
attrs.insert({"T", val});
val.Clear();
val.set_b(false);
attrs.insert({"transpose_a", val});
attrs.insert({"transpose_b", val});
AddOperationToEnqueueRequest(
2, "MatMul", {std::make_pair(1, 0), std::make_pair(1, 0)}, attrs,
"/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
const tensorflow::Tensor* t = nullptr;
tensorflow::TensorHandle* tensor_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle));
TF_ASSERT_OK(tensor_handle->Tensor(&t));
EXPECT_EQ(tensor_handle->device(), nullptr);
auto actual = t->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(7, actual(0));
EXPECT_EQ(10, actual(1));
EXPECT_EQ(15, actual(2));
EXPECT_EQ(22, actual(3));
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
TEST_F(EagerServiceImplTest, SendPackedHandleTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
const string device0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const string device1 = "/job:localhost/replica:0/task:1/device:CPU:0";
const string device2 = "/job:localhost/replica:0/task:2/device:CPU:0";
const string composite_device =
"/job:localhost/replica:0/task:0/device:COMPOSITE:0";
uint64 context_id = random::New64();
CreateContextRequest request;
auto* server_def = request.mutable_server_def();
server_def->set_job_name("localhost");
server_def->set_task_index(0);
request.add_cluster_device_attributes()->set_name(device0);
request.add_cluster_device_attributes()->set_name(device1);
request.add_cluster_device_attributes()->set_name(device2);
request.set_context_id(context_id);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor();
send_tensor->set_op_id(1);
SetTensorProto(send_tensor->add_tensors());
auto* send_packed_handle =
remote_enqueue_request.add_queue()->mutable_send_packed_handle();
send_packed_handle->set_op_id(3);
RemoteTensorHandle* remote_handle =
send_packed_handle->add_handles()->mutable_remote_handle();
remote_handle->set_op_id(send_tensor->op_id());
remote_handle->set_output_num(0);
remote_handle->set_op_device(device0);
remote_handle->set_device(device0);
SendPackedHandleOp::LocalTensorHandle* lcoal_handle =
send_packed_handle->add_handles()->mutable_local_handle();
SetTensorProto(lcoal_handle->mutable_tensor());
lcoal_handle->set_device(device1);
remote_handle = send_packed_handle->add_handles()->mutable_remote_handle();
remote_handle->set_op_id(2);
remote_handle->set_output_num(5);
remote_handle->set_op_device(device2);
remote_handle->set_device(device2);
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
tensorflow::TensorHandle* packed_handle;
TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
context_id, RemoteTensorHandleInternal(3, 0), &packed_handle));
EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED);
EXPECT_EQ(packed_handle->NumPackedHandles(), 3);
EXPECT_EQ(packed_handle->device()->name(), composite_device);
TensorHandle* handle0 = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(0, &handle0));
EXPECT_EQ(handle0->Type(), TensorHandle::LOCAL);
EXPECT_EQ(handle0->op_device()->name(), device0);
const Tensor* t0 = nullptr;
TF_ASSERT_OK(handle0->Tensor(&t0));
auto actual = t0->flat<float>();
EXPECT_EQ(4, actual.size());
EXPECT_EQ(1.0, actual(0));
EXPECT_EQ(2.0, actual(1));
EXPECT_EQ(3.0, actual(2));
EXPECT_EQ(4.0, actual(3));
TensorHandle* handle1 = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(1, &handle1));
EXPECT_EQ(handle1->Type(), TensorHandle::LOCAL);
EXPECT_EQ(handle1->op_device()->name(), device1);
const Tensor* t1 = nullptr;
TF_ASSERT_OK(handle0->Tensor(&t1));
EXPECT_EQ(t1, t0);
TensorHandle* handle2 = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(2, &handle2));
EXPECT_EQ(handle2->Type(), TensorHandle::REMOTE);
EXPECT_EQ(handle2->op_device()->name(), device2);
int64_t op_id;
int32_t output_num;
TF_ASSERT_OK(handle2->RemoteAddress(handle2->device(),
true, &op_id,
&output_num));
EXPECT_EQ(op_id, 2);
EXPECT_EQ(output_num, 5);
CloseContextRequest close_context_request;
close_context_request.set_context_id(context_id);
close_context_request.set_context_view_id(0);
CloseContextResponse close_context_response;
TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
&close_context_response));
}
TEST_F(EagerServiceImplTest, RequestsToMasterTest) {
tsl::core::RefCountPtr<tensorflow::Rendezvous> rendezvous =
tsl::core::RefCountPtr<tensorflow::Rendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr_.get()));
tensorflow::EagerContext* ctx = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr_.get(), false, std::move(rendezvous), nullptr,
nullptr,
true);
const uint64 context_id = random::New64();
auto remote_mgr =
std::make_unique<tensorflow::eager::RemoteMgr>(true, ctx);
TF_ASSERT_OK(ctx->InitializeRemoteWorker(
nullptr, nullptr,
{}, context_id, 0,
nullptr,
nullptr, std::move(remote_mgr),
nullptr));
TestEagerServiceImpl eager_service_impl(&worker_env_);
EnqueueRequest remote_enqueue_request;
remote_enqueue_request.set_context_id(context_id);
EnqueueResponse remote_enqueue_response;
auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor();
send_tensor->set_op_id(1);
SetTensorProto(send_tensor->add_tensors());
Status status = eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response);
EXPECT_EQ(error::ABORTED, status.code());
EXPECT_TRUE(absl::StrContains(
status.message(),
"Unable to find a context_id matching the specified one"));
TF_ASSERT_OK(eager_service_impl.CreateMasterContext(context_id, ctx));
TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request,
&remote_enqueue_response));
ctx->Unref();
}
TEST_F(EagerServiceImplTest, KeepAliveTest) {
TestEagerServiceImpl eager_service_impl(&worker_env_);
uint64 context_id = random::New64();
CreateContextRequest request;
request.mutable_server_def()->set_job_name("localhost");
request.mutable_server_def()->set_task_index(0);
request.set_context_id(context_id);
request.set_keep_alive_secs(3);
CreateContextResponse response;
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
worker_env_.env->SleepForMicroseconds(5 *
tensorflow::EnvTime::kSecondsToMicros);
KeepAliveRequest keep_alive_request;
KeepAliveResponse keep_alive_response;
keep_alive_request.set_context_id(context_id);
Status status =
eager_service_impl.KeepAlive(&keep_alive_request, &keep_alive_response);
EXPECT_EQ(status.code(), error::ABORTED);
EXPECT_PRED_FORMAT2(::testing::IsSubstring, "Unable to find a context_id",
std::string(status.message()));
uint64 new_context_id = random::New64();
request.set_context_id(new_context_id);
TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
worker_env_.env->SleepForMicroseconds(1 *
tensorflow::EnvTime::kSecondsToMicros);
keep_alive_request.set_context_id(new_context_id);
TF_ASSERT_OK(
eager_service_impl.KeepAlive(&keep_alive_request, &keep_alive_response));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f3c6aef-7f7d-42a7-8cfa-9656e271f9f0 | cpp | tensorflow/tensorflow | remote_mgr | tensorflow/core/distributed_runtime/eager/remote_mgr.cc | tensorflow/core/distributed_runtime/eager/remote_mgr_test.cc | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
Status WithErrorSourcePayload(Status error) {
core::platform::ErrorSourceProto error_source_proto;
error_source_proto.set_error_source(
core::platform::ErrorSourceProto::EAGER_REMOTE_MGR);
error.SetPayload(tensorflow::kErrorSource,
absl::Cord(error_source_proto.SerializeAsString()));
return error;
}
}
namespace eager {
void RemoteMgr::AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id) {
mutex_lock l(remote_tensor_handle_mu_);
for (int i = 0, end = handles.size(); i < end; i++) {
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, i), handles[i]);
}
}
void RemoteMgr::AddOperationOutput(tensorflow::TensorHandle* handle,
int64_t operation_id, int32_t output_num) {
mutex_lock l(remote_tensor_handle_mu_);
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, output_num), handle);
}
Status RemoteMgr::GetTensorHandleImpl(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter == remote_tensor_handle_map_.end()) {
std::string error_message = absl::StrCat(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup.");
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
if (result) {
std::string error_message_ext;
absl::StrAppend(
&error_message_ext, error_message,
"Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem.");
return WithErrorSourcePayload(
absl::InvalidArgumentError(error_message_ext));
}
return WithErrorSourcePayload(absl::InvalidArgumentError(error_message));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetTensorHandle(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
tf_shared_lock l(remote_tensor_handle_mu_);
return GetTensorHandleImpl(remote_handle, handle);
}
Status RemoteMgr::GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle) {
tf_shared_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter == mirrored_resource_shape_map_.end()) {
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup. Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem."));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready,
int64_t* op_id, int32* output_num) {
TF_RETURN_IF_ERROR(handle->RemoteAddress(handle->device(), wait_until_ready,
op_id, output_num));
tensorflow::TensorHandle* h;
TF_RETURN_IF_ERROR(
GetTensorHandleImpl(RemoteTensorHandleInternal(*op_id, *output_num), &h));
if (handle != h) {
return WithErrorSourcePayload(errors::Internal(
"Found two different tensor handles with the same op_id:", *op_id,
" and output_num:", *output_num));
}
return absl::OkStatus();
}
Status RemoteMgr::DeleteTensorHandle(
const RemoteTensorHandleInternal& remote_handle) {
{
mutex_lock l(remote_tensor_handle_mu_);
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter != remote_tensor_handle_map_.end()) {
iter->second->Unref();
remote_tensor_handle_map_.erase(iter);
return absl::OkStatus();
}
}
{
mutex_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter != mirrored_resource_shape_map_.end()) {
mirrored_resource_shape_map_.erase(iter);
return absl::OkStatus();
}
}
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num));
}
Status RemoteMgr::SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name,
const bool serialize_resource_dtype_and_shape) {
int64_t op_id;
int32_t output_num;
auto status =
in->RemoteAddress(device, wait_until_ready, &op_id, &output_num);
if (!status.ok()) {
LOG(ERROR)
<< "Failed to get remote address for tensor handle with given device "
<< device->name() << " error " << status.message();
tf_shared_lock l(remote_tensor_handle_mu_);
TF_RETURN_IF_ERROR(
GetRemoteTensorHandle(in, wait_until_ready, &op_id, &output_num));
}
out->Clear();
out->set_op_id(op_id);
out->set_output_num(output_num);
out->set_op_device(in->op_device() ? in->op_device()->name() : "");
out->set_device(device_name.empty()
? std::string(in->DeviceOrHostCPU(*parent_)->name())
: std::string(device_name));
out->set_dtype(in->dtype);
if (serialize_resource_dtype_and_shape) {
std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes;
TF_RETURN_IF_ERROR(
in->GetResourceHandleDtypesAndShapes(&resource_dtypes_and_shapes));
for (const auto& dtype_and_shape : resource_dtypes_and_shapes) {
ResourceDtypeAndShape* dtype_and_shape_proto =
out->add_resource_dtypes_and_shapes();
dtype_and_shape_proto->set_dtype(dtype_and_shape.dtype);
dtype_and_shape.shape.AsProto(dtype_and_shape_proto->mutable_shape());
}
}
return absl::OkStatus();
}
Status RemoteMgr::DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out) {
Device* device;
if (parent_->local_device_mgr()->LookupDevice(in.op_device(), &device).ok() ||
parent_->local_device_mgr()->LookupDevice(in.device(), &device).ok()) {
TF_RETURN_IF_ERROR(GetTensorHandle(RemoteTensorHandleInternal(in), out));
(*out)->Ref();
} else {
const string& device_name =
in.op_device().empty() ? in.device() : in.op_device();
TF_RETURN_IF_ERROR(
parent_->FindDeviceFromName(device_name.c_str(), &device));
*out = TensorHandle::CreateLazyRemoteHandle(in.op_id(), in.output_num(),
in.dtype(), device,
true, parent_);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (!GetMirroredResourceShape(RemoteTensorHandleInternal(in),
&dtypes_and_shapes)
.ok()) {
for (const auto& dtype_and_shape_proto :
in.resource_dtypes_and_shapes()) {
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{
dtype_and_shape_proto.dtype(),
TensorShape(dtype_and_shape_proto.shape())});
}
mutex_lock l(mirrored_resource_shape_mu_);
mirrored_resource_shape_map_.emplace(
RemoteTensorHandleInternal(in.op_id(), in.output_num()),
dtypes_and_shapes);
}
(*out)->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
}
return absl::OkStatus();
}
EagerExecutor& RemoteMgr::GetOrCreateExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
auto it_and_bool = executor_map_.emplace(
std::piecewise_construct, std::forward_as_tuple(stream_id),
std::forward_as_tuple(true));
DCHECK(it_and_bool.second);
it = it_and_bool.first;
}
return it->second;
}
void RemoteMgr::DeleteExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
return;
}
Status s = it->second.ShutDown();
if (!s.ok()) {
LOG(ERROR) << "EagerExecutor shutdown with error " << s.message();
}
executor_map_.erase(it);
}
}
} | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestRemoteMgr : public RemoteMgr {
public:
TestRemoteMgr(bool is_master, EagerContext* ctx)
: RemoteMgr(is_master, ctx) {}
uint64 OpId() {
tf_shared_lock l(next_id_mutex_);
return next_op_id_;
}
};
class RemoteMgrTest : public ::testing::Test {
public:
RemoteMgrTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
local_device_ = devices.back().get();
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:worker/replica:0/task:0"));
remote_device_ = devices.back().get();
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::Rendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true, std::move(rendezvous),
nullptr, nullptr, true);
}
~RemoteMgrTest() override { ctx_->Unref(); }
Device* local_device_;
Device* remote_device_;
EagerContext* ctx_;
};
TEST_F(RemoteMgrTest, SerializeLocalTensorHandleWithRemoteMirror) {
RemoteMgr remote_mgr(false, ctx_);
const TensorShape shape({0});
Tensor t(DT_FLOAT, shape);
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
TF_ASSERT_OK(
handle->SetRemoteShape(shape, remote_device_, ctx_->GetContextViewId()));
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_,
remote_device_->name()));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, SerializeRemoteTensorHandle) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, InvalidateRemoteMirrorWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
Tensor t(DT_FLOAT, TensorShape({0}));
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
EXPECT_TRUE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
ctx_->IncrementContextViewId();
EXPECT_FALSE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
EXPECT_FALSE(handle
->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId())
.ok());
handle->Unref();
}
TEST_F(RemoteMgrTest, SetRemoteShapeWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
ctx_->IncrementContextViewId();
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
}
TEST_F(RemoteMgrTest, ErrorSourcesShouldExist) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
remote_mgr.AddOperationOutput(handle, op_id, output_num);
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
auto remote_handle_internal = RemoteTensorHandleInternal(remote_handle);
TF_ASSERT_OK(remote_mgr.DeleteTensorHandle(remote_handle_internal));
Status s = remote_mgr.DeleteTensorHandle(remote_handle_internal);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
TensorHandle* out;
s = remote_mgr.GetTensorHandle(remote_handle_internal, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
s = remote_mgr.DeserializeRemoteTensorHandle(remote_handle, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/remote_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/remote_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7a0dd09-19cd-4e14-ba62-dc1cebe0e8e2 | cpp | tensorflow/tensorflow | schema | tensorflow/core/summary/schema.cc | tensorflow/core/summary/schema_test.cc | #include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status Run(Sqlite* db, const char* sql) {
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
return stmt.StepAndReset();
}
}
Status SetupTensorboardSqliteDb(Sqlite* db) {
TF_RETURN_IF_ERROR(
db->PrepareOrDie(strings::StrCat("PRAGMA application_id=",
kTensorboardSqliteApplicationId))
.StepAndReset());
db->PrepareOrDie("PRAGMA user_version=0").StepAndResetOrDie();
Status s;
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Ids (
id INTEGER PRIMARY KEY
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Descriptions (
id INTEGER PRIMARY KEY,
description TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tensors (
rowid INTEGER PRIMARY KEY,
series INTEGER,
step INTEGER,
dtype INTEGER,
computed_time REAL,
shape TEXT,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TensorSeriesStepIndex
ON
Tensors (series, step)
WHERE
series IS NOT NULL
AND step IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS TensorStrings (
rowid INTEGER PRIMARY KEY,
tensor_rowid INTEGER NOT NULL,
idx INTEGER NOT NULL,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TensorStringIndex
ON TensorStrings (tensor_rowid, idx)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tags (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
tag_id INTEGER NOT NULL,
inserted_time DOUBLE,
tag_name TEXT,
display_name TEXT,
plugin_name TEXT,
plugin_data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TagIdIndex
ON Tags (tag_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TagRunNameIndex
ON
Tags (run_id, tag_name)
WHERE
run_id IS NOT NULL
AND tag_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Runs (
rowid INTEGER PRIMARY KEY,
experiment_id INTEGER,
run_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
finished_time REAL,
run_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunIdIndex
ON Runs (run_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunNameIndex
ON Runs (experiment_id, run_name)
WHERE run_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Experiments (
rowid INTEGER PRIMARY KEY,
user_id INTEGER,
experiment_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
is_watching INTEGER,
experiment_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentIdIndex
ON Experiments (experiment_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentNameIndex
ON Experiments (user_id, experiment_name)
WHERE experiment_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Users (
rowid INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
inserted_time REAL,
user_name TEXT,
email TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserIdIndex
ON Users (user_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserNameIndex
ON Users (user_name)
WHERE user_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserEmailIndex
ON Users (email)
WHERE email IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Graphs (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
graph_id INTEGER NOT NULL,
inserted_time REAL,
graph_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphIdIndex
ON Graphs (graph_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphRunIndex
ON Graphs (run_id)
WHERE run_id IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Nodes (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
node_name TEXT,
op TEXT,
device TEXT,
node_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeIdIndex
ON Nodes (graph_id, node_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeNameIndex
ON Nodes (graph_id, node_name)
WHERE node_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS NodeInputs (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
idx INTEGER NOT NULL,
input_node_id INTEGER NOT NULL,
input_node_idx INTEGER,
is_control INTEGER
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeInputsIndex
ON NodeInputs (graph_id, node_id, idx)
)sql"));
return s;
}
} | #include "tensorflow/core/summary/schema.h"
#include <memory>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(SchemaTest, SmokeTestTensorboardSchema) {
Sqlite* db;
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db));
core::ScopedUnref unref_db(db);
TF_ASSERT_OK(SetupTensorboardSqliteDb(db));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/schema.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/schema_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64afe01c-f225-4810-a1ae-f62d116283b3 | cpp | tensorflow/tensorflow | summary_db_writer | tensorflow/core/summary/summary_db_writer.cc | tensorflow/core/summary/summary_db_writer_test.cc | #include "tensorflow/core/summary/summary_db_writer.h"
#include <deque>
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/event.pb.h"
#define CALL_SUPPORTED_TYPES(m) \
TF_CALL_tstring(m) \
TF_CALL_half(m) \
TF_CALL_float(m) \
TF_CALL_double(m) \
TF_CALL_complex64(m) \
TF_CALL_complex128(m) \
TF_CALL_int8(m) \
TF_CALL_int16(m) \
TF_CALL_int32(m) \
TF_CALL_int64(m) \
TF_CALL_uint8(m) \
TF_CALL_uint16(m) \
TF_CALL_uint32(m) \
TF_CALL_uint64(m)
namespace tensorflow {
namespace {
const uint64 kIdTiers[] = {
0x7fffffULL,
0x7fffffffULL,
0x7fffffffffffULL,
};
const int kMaxIdTier = sizeof(kIdTiers) / sizeof(uint64) - 1;
const int kIdCollisionDelayMicros = 10;
const int kMaxIdCollisions = 21;
const int64_t kAbsent = 0LL;
const char* kScalarPluginName = "scalars";
const char* kImagePluginName = "images";
const char* kAudioPluginName = "audio";
const char* kHistogramPluginName = "histograms";
const int64_t kReserveMinBytes = 32;
const double kReserveMultiplier = 1.5;
const int64_t kPreallocateRows = 1000;
const uint64 kFlushBytes = 1024 * 1024;
double DoubleTime(uint64 micros) {
return static_cast<double>(micros) / 1.0e6;
}
string StringifyShape(const TensorShape& shape) {
string result;
bool first = true;
for (const auto& dim : shape) {
if (first) {
first = false;
} else {
strings::StrAppend(&result, ",");
}
strings::StrAppend(&result, dim.size);
}
return result;
}
Status CheckSupportedType(const Tensor& t) {
#define CASE(T) \
case DataTypeToEnum<T>::value: \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
return errors::Unimplemented(DataTypeString(t.dtype()),
" tensors unsupported on platform");
}
return absl::OkStatus();
#undef CASE
}
Tensor AsScalar(const Tensor& t) {
Tensor t2{t.dtype(), {}};
#define CASE(T) \
case DataTypeToEnum<T>::value: \
t2.scalar<T>()() = t.flat<T>()(0); \
break;
switch (t.dtype()) {
CALL_SUPPORTED_TYPES(CASE)
default:
t2 = {DT_FLOAT, {}};
t2.scalar<float>()() = NAN;
break;
}
return t2;
#undef CASE
}
void PatchPluginName(SummaryMetadata* metadata, const char* name) {
if (metadata->plugin_data().plugin_name().empty()) {
metadata->mutable_plugin_data()->set_plugin_name(name);
}
}
Status SetDescription(Sqlite* db, int64_t id, const StringPiece& markdown) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Descriptions (id, description) VALUES (?, ?)
)sql";
SqliteStatement insert_desc;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert_desc));
insert_desc.BindInt(1, id);
insert_desc.BindText(2, markdown);
return insert_desc.StepAndReset();
}
class IdAllocator {
public:
IdAllocator(Env* env, Sqlite* db) : env_{env}, db_{db} {
DCHECK(env_ != nullptr);
DCHECK(db_ != nullptr);
}
Status CreateNewId(int64_t* id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
Status s;
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db_->Prepare("INSERT INTO Ids (id) VALUES (?)", &stmt));
for (int i = 0; i < kMaxIdCollisions; ++i) {
int64_t tid = MakeRandomId();
stmt.BindInt(1, tid);
s = stmt.StepAndReset();
if (s.ok()) {
*id = tid;
break;
}
if (s.code() != error::INVALID_ARGUMENT) break;
if (tier_ < kMaxIdTier) {
LOG(INFO) << "IdAllocator collision at tier " << tier_ << " (of "
<< kMaxIdTier << ") so auto-adjusting to a higher tier";
++tier_;
} else {
LOG(WARNING) << "IdAllocator (attempt #" << i << ") "
<< "resulted in a collision at the highest tier; this "
"is problematic if it happens often; you can try "
"pruning the Ids table; you can also file a bug "
"asking for the ID space to be increased; otherwise "
"writes will gradually slow down over time until they "
"become impossible";
}
env_->SleepForMicroseconds((1 << i) * kIdCollisionDelayMicros);
}
return s;
}
private:
int64_t MakeRandomId() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t id = static_cast<int64_t>(random::New64() & kIdTiers[tier_]);
if (id == kAbsent) ++id;
return id;
}
mutex mu_;
Env* const env_;
Sqlite* const db_;
int tier_ TF_GUARDED_BY(mu_) = 0;
IdAllocator(const IdAllocator&) = delete;
void operator=(const IdAllocator&) = delete;
};
class GraphWriter {
public:
static Status Save(Sqlite* db, SqliteTransaction* txn, IdAllocator* ids,
GraphDef* graph, uint64 now, int64_t run_id,
int64_t* graph_id)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
TF_RETURN_IF_ERROR(ids->CreateNewId(graph_id));
GraphWriter saver{db, txn, graph, now, *graph_id};
saver.MapNameToNodeId();
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodeInputs(), "SaveNodeInputs");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveNodes(), "SaveNodes");
TF_RETURN_WITH_CONTEXT_IF_ERROR(saver.SaveGraph(run_id), "SaveGraph");
return absl::OkStatus();
}
private:
GraphWriter(Sqlite* db, SqliteTransaction* txn, GraphDef* graph, uint64 now,
int64_t graph_id)
: db_(db), txn_(txn), graph_(graph), now_(now), graph_id_(graph_id) {}
void MapNameToNodeId() {
size_t toto = static_cast<size_t>(graph_->node_size());
name_copies_.reserve(toto);
name_to_node_id_.reserve(toto);
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
name_copies_.emplace_back(graph_->node(node_id).name());
name_to_node_id_.emplace(name_copies_.back(), node_id);
}
}
Status SaveNodeInputs() {
const char* sql = R"sql(
INSERT INTO NodeInputs (
graph_id,
node_id,
idx,
input_node_id,
input_node_idx,
is_control
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
const NodeDef& node = graph_->node(node_id);
for (int idx = 0; idx < node.input_size(); ++idx) {
StringPiece name = node.input(idx);
int64_t input_node_id;
int64_t input_node_idx = 0;
int64_t is_control = 0;
size_t i = name.rfind(':');
if (i != StringPiece::npos) {
if (!strings::safe_strto64(name.substr(i + 1, name.size() - i - 1),
&input_node_idx)) {
return errors::DataLoss("Bad NodeDef.input: ", name);
}
name.remove_suffix(name.size() - i);
}
if (!name.empty() && name[0] == '^') {
name.remove_prefix(1);
is_control = 1;
}
auto e = name_to_node_id_.find(name);
if (e == name_to_node_id_.end()) {
return errors::DataLoss("Could not find node: ", name);
}
input_node_id = e->second;
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindInt(3, idx);
insert.BindInt(4, input_node_id);
insert.BindInt(5, input_node_idx);
insert.BindInt(6, is_control);
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node.name(),
" -> ", name);
TF_RETURN_IF_ERROR(MaybeFlush());
}
}
return absl::OkStatus();
}
Status SaveNodes() {
const char* sql = R"sql(
INSERT INTO Nodes (
graph_id,
node_id,
node_name,
op,
device,
node_def)
VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
for (int node_id = 0; node_id < graph_->node_size(); ++node_id) {
NodeDef* node = graph_->mutable_node(node_id);
insert.BindInt(1, graph_id_);
insert.BindInt(2, node_id);
insert.BindText(3, node->name());
insert.BindText(4, node->op());
insert.BindText(5, node->device());
node->clear_name();
node->clear_op();
node->clear_device();
node->clear_input();
string node_def;
if (node->SerializeToString(&node_def)) {
insert.BindBlobUnsafe(6, node_def);
}
unflushed_bytes_ += insert.size();
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node->name());
TF_RETURN_IF_ERROR(MaybeFlush());
}
return absl::OkStatus();
}
Status SaveGraph(int64_t run_id) {
const char* sql = R"sql(
INSERT OR REPLACE INTO Graphs (
run_id,
graph_id,
inserted_time,
graph_def
) VALUES (?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db_->Prepare(sql, &insert));
if (run_id != kAbsent) insert.BindInt(1, run_id);
insert.BindInt(2, graph_id_);
insert.BindDouble(3, DoubleTime(now_));
graph_->clear_node();
string graph_def;
if (graph_->SerializeToString(&graph_def)) {
insert.BindBlobUnsafe(4, graph_def);
}
return insert.StepAndReset();
}
Status MaybeFlush() {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn_->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
Sqlite* const db_;
SqliteTransaction* const txn_;
uint64 unflushed_bytes_ = 0;
GraphDef* const graph_;
const uint64 now_;
const int64_t graph_id_;
std::vector<string> name_copies_;
std::unordered_map<StringPiece, int64_t, StringPieceHasher> name_to_node_id_;
GraphWriter(const GraphWriter&) = delete;
void operator=(const GraphWriter&) = delete;
};
class RunMetadata {
public:
RunMetadata(IdAllocator* ids, const string& experiment_name,
const string& run_name, const string& user_name)
: ids_{ids},
experiment_name_{experiment_name},
run_name_{run_name},
user_name_{user_name} {
DCHECK(ids_ != nullptr);
}
const string& experiment_name() { return experiment_name_; }
const string& run_name() { return run_name_; }
const string& user_name() { return user_name_; }
int64_t run_id() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
return run_id_;
}
Status SetGraph(Sqlite* db, uint64 now, double computed_time,
std::unique_ptr<GraphDef> g) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
int64_t run_id;
{
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
run_id = run_id_;
}
int64_t graph_id;
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
GraphWriter::Save(db, &txn, ids_, g.get(), now, run_id, &graph_id));
return txn.Commit();
}
Status GetTagId(Sqlite* db, uint64 now, double computed_time,
const string& tag_name, int64_t* tag_id,
const SummaryMetadata& metadata) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
TF_RETURN_IF_ERROR(InitializeRun(db, now, computed_time));
auto e = tag_ids_.find(tag_name);
if (e != tag_ids_.end()) {
*tag_id = e->second;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(tag_id));
tag_ids_[tag_name] = *tag_id;
TF_RETURN_IF_ERROR(
SetDescription(db, *tag_id, metadata.summary_description()));
const char* sql = R"sql(
INSERT INTO Tags (
run_id,
tag_id,
tag_name,
inserted_time,
display_name,
plugin_name,
plugin_data
) VALUES (
:run_id,
:tag_id,
:tag_name,
:inserted_time,
:display_name,
:plugin_name,
:plugin_data
)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
if (run_id_ != kAbsent) insert.BindInt(":run_id", run_id_);
insert.BindInt(":tag_id", *tag_id);
insert.BindTextUnsafe(":tag_name", tag_name);
insert.BindDouble(":inserted_time", DoubleTime(now));
insert.BindTextUnsafe(":display_name", metadata.display_name());
insert.BindTextUnsafe(":plugin_name", metadata.plugin_data().plugin_name());
insert.BindBlobUnsafe(":plugin_data", metadata.plugin_data().content());
return insert.StepAndReset();
}
private:
Status InitializeUser(Sqlite* db, uint64 now)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (user_id_ != kAbsent || user_name_.empty()) return absl::OkStatus();
const char* get_sql = R"sql(
SELECT user_id FROM Users WHERE user_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
get.BindText(1, user_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
user_id_ = get.ColumnInt(0);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ids_->CreateNewId(&user_id_));
const char* insert_sql = R"sql(
INSERT INTO Users (
user_id,
user_name,
inserted_time
) VALUES (?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
insert.BindInt(1, user_id_);
insert.BindText(2, user_name_);
insert.BindDouble(3, DoubleTime(now));
TF_RETURN_IF_ERROR(insert.StepAndReset());
return absl::OkStatus();
}
Status InitializeExperiment(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (experiment_name_.empty()) return absl::OkStatus();
if (experiment_id_ == kAbsent) {
TF_RETURN_IF_ERROR(InitializeUser(db, now));
const char* get_sql = R"sql(
SELECT
experiment_id,
started_time
FROM
Experiments
WHERE
user_id IS ?
AND experiment_name = ?
)sql";
SqliteStatement get;
TF_RETURN_IF_ERROR(db->Prepare(get_sql, &get));
if (user_id_ != kAbsent) get.BindInt(1, user_id_);
get.BindText(2, experiment_name_);
bool is_done;
TF_RETURN_IF_ERROR(get.Step(&is_done));
if (!is_done) {
experiment_id_ = get.ColumnInt(0);
experiment_started_time_ = get.ColumnInt(1);
} else {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&experiment_id_));
experiment_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT INTO Experiments (
user_id,
experiment_id,
experiment_name,
inserted_time,
started_time,
is_watching
) VALUES (?, ?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (user_id_ != kAbsent) insert.BindInt(1, user_id_);
insert.BindInt(2, experiment_id_);
insert.BindText(3, experiment_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
insert.BindInt(6, 0);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
}
if (computed_time < experiment_started_time_) {
experiment_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Experiments
SET
started_time = ?
WHERE
experiment_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, experiment_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
Status InitializeRun(Sqlite* db, uint64 now, double computed_time)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (run_name_.empty()) return absl::OkStatus();
TF_RETURN_IF_ERROR(InitializeExperiment(db, now, computed_time));
if (run_id_ == kAbsent) {
TF_RETURN_IF_ERROR(ids_->CreateNewId(&run_id_));
run_started_time_ = computed_time;
const char* insert_sql = R"sql(
INSERT OR REPLACE INTO Runs (
experiment_id,
run_id,
run_name,
inserted_time,
started_time
) VALUES (?, ?, ?, ?, ?)
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(insert_sql, &insert));
if (experiment_id_ != kAbsent) insert.BindInt(1, experiment_id_);
insert.BindInt(2, run_id_);
insert.BindText(3, run_name_);
insert.BindDouble(4, DoubleTime(now));
insert.BindDouble(5, computed_time);
TF_RETURN_IF_ERROR(insert.StepAndReset());
}
if (computed_time < run_started_time_) {
run_started_time_ = computed_time;
const char* update_sql = R"sql(
UPDATE
Runs
SET
started_time = ?
WHERE
run_id = ?
)sql";
SqliteStatement update;
TF_RETURN_IF_ERROR(db->Prepare(update_sql, &update));
update.BindDouble(1, computed_time);
update.BindInt(2, run_id_);
TF_RETURN_IF_ERROR(update.StepAndReset());
}
return absl::OkStatus();
}
mutex mu_;
IdAllocator* const ids_;
const string experiment_name_;
const string run_name_;
const string user_name_;
int64_t experiment_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t run_id_ TF_GUARDED_BY(mu_) = kAbsent;
int64_t user_id_ TF_GUARDED_BY(mu_) = kAbsent;
double experiment_started_time_ TF_GUARDED_BY(mu_) = 0.0;
double run_started_time_ TF_GUARDED_BY(mu_) = 0.0;
std::unordered_map<string, int64_t> tag_ids_ TF_GUARDED_BY(mu_);
RunMetadata(const RunMetadata&) = delete;
void operator=(const RunMetadata&) = delete;
};
class SeriesWriter {
public:
SeriesWriter(int64_t series, RunMetadata* meta)
: series_{series}, meta_{meta} {
DCHECK(series_ > 0);
}
Status Append(Sqlite* db, int64_t step, uint64 now, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (rowids_.empty()) {
Status s = Reserve(db, t);
if (!s.ok()) {
rowids_.clear();
return s;
}
}
int64_t rowid = rowids_.front();
Status s = Write(db, rowid, step, computed_time, t);
if (s.ok()) {
++count_;
}
rowids_.pop_front();
return s;
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (!rowids_.empty()) {
SqliteTransaction txn(*db);
const char* sql = R"sql(
DELETE FROM Tensors WHERE rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(sql, &deleter));
for (size_t i = count_; i < rowids_.size(); ++i) {
deleter.BindInt(1, rowids_.front());
TF_RETURN_IF_ERROR(deleter.StepAndReset());
rowids_.pop_front();
}
TF_RETURN_IF_ERROR(txn.Commit());
rowids_.clear();
}
return absl::OkStatus();
}
private:
Status Write(Sqlite* db, int64_t rowid, int64_t step, double computed_time,
const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db) {
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
return Update(db, step, computed_time, t, t.scalar<tstring>()(), rowid);
} else {
SqliteTransaction txn(*db);
TF_RETURN_IF_ERROR(
Update(db, step, computed_time, t, StringPiece(), rowid));
TF_RETURN_IF_ERROR(UpdateNdString(db, t, rowid));
return txn.Commit();
}
} else {
return Update(db, step, computed_time, t, t.tensor_data(), rowid);
}
}
Status Update(Sqlite* db, int64_t step, double computed_time, const Tensor& t,
const StringPiece& data, int64_t rowid) {
const char* sql = R"sql(
UPDATE OR REPLACE
Tensors
SET
step = ?,
computed_time = ?,
dtype = ?,
shape = ?,
data = ?
WHERE
rowid = ?
)sql";
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
stmt.BindInt(1, step);
stmt.BindDouble(2, computed_time);
stmt.BindInt(3, t.dtype());
stmt.BindText(4, StringifyShape(t.shape()));
stmt.BindBlobUnsafe(5, data);
stmt.BindInt(6, rowid);
TF_RETURN_IF_ERROR(stmt.StepAndReset());
return absl::OkStatus();
}
Status UpdateNdString(Sqlite* db, const Tensor& t, int64_t tensor_rowid)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db) {
DCHECK_EQ(t.dtype(), DT_STRING);
DCHECK_GT(t.dims(), 0);
const char* deleter_sql = R"sql(
DELETE FROM TensorStrings WHERE tensor_rowid = ?
)sql";
SqliteStatement deleter;
TF_RETURN_IF_ERROR(db->Prepare(deleter_sql, &deleter));
deleter.BindInt(1, tensor_rowid);
TF_RETURN_WITH_CONTEXT_IF_ERROR(deleter.StepAndReset(), tensor_rowid);
const char* inserter_sql = R"sql(
INSERT INTO TensorStrings (
tensor_rowid,
idx,
data
) VALUES (?, ?, ?)
)sql";
SqliteStatement inserter;
TF_RETURN_IF_ERROR(db->Prepare(inserter_sql, &inserter));
auto flat = t.flat<tstring>();
for (int64_t i = 0; i < flat.size(); ++i) {
inserter.BindInt(1, tensor_rowid);
inserter.BindInt(2, i);
inserter.BindBlobUnsafe(3, flat(i));
TF_RETURN_WITH_CONTEXT_IF_ERROR(inserter.StepAndReset(), "i=", i);
}
return absl::OkStatus();
}
Status Reserve(Sqlite* db, const Tensor& t) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SqliteTransaction txn(*db);
unflushed_bytes_ = 0;
if (t.dtype() == DT_STRING) {
if (t.dims() == 0) {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.scalar<tstring>()().size()));
} else {
TF_RETURN_IF_ERROR(ReserveTensors(db, &txn, kReserveMinBytes));
}
} else {
TF_RETURN_IF_ERROR(ReserveData(db, &txn, t.tensor_data().size()));
}
return txn.Commit();
}
Status ReserveData(Sqlite* db, SqliteTransaction* txn, size_t size)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t space =
static_cast<int64_t>(static_cast<double>(size) * kReserveMultiplier);
if (space < kReserveMinBytes) space = kReserveMinBytes;
return ReserveTensors(db, txn, space);
}
Status ReserveTensors(Sqlite* db, SqliteTransaction* txn,
int64_t reserved_bytes)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const char* sql = R"sql(
INSERT INTO Tensors (
series,
data
) VALUES (?, ZEROBLOB(?))
)sql";
SqliteStatement insert;
TF_RETURN_IF_ERROR(db->Prepare(sql, &insert));
for (int64_t i = 0; i < kPreallocateRows; ++i) {
insert.BindInt(1, series_);
insert.BindInt(2, reserved_bytes);
TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), "i=", i);
rowids_.push_back(db->last_insert_rowid());
unflushed_bytes_ += reserved_bytes;
TF_RETURN_IF_ERROR(MaybeFlush(db, txn));
}
return absl::OkStatus();
}
Status MaybeFlush(Sqlite* db, SqliteTransaction* txn)
SQLITE_EXCLUSIVE_TRANSACTIONS_REQUIRED(*db)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (unflushed_bytes_ >= kFlushBytes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(txn->Commit(), "flushing ",
unflushed_bytes_, " bytes");
unflushed_bytes_ = 0;
}
return absl::OkStatus();
}
mutex mu_;
const int64_t series_;
RunMetadata* const meta_;
uint64 count_ TF_GUARDED_BY(mu_) = 0;
std::deque<int64_t> rowids_ TF_GUARDED_BY(mu_);
uint64 unflushed_bytes_ TF_GUARDED_BY(mu_) = 0;
SeriesWriter(const SeriesWriter&) = delete;
void operator=(const SeriesWriter&) = delete;
};
class RunWriter {
public:
explicit RunWriter(RunMetadata* meta) : meta_{meta} {}
Status Append(Sqlite* db, int64_t tag_id, int64_t step, uint64 now,
double computed_time, const Tensor& t)
SQLITE_TRANSACTIONS_EXCLUDED(*db) TF_LOCKS_EXCLUDED(mu_) {
SeriesWriter* writer = GetSeriesWriter(tag_id);
return writer->Append(db, step, now, computed_time, t);
}
Status Finish(Sqlite* db) SQLITE_TRANSACTIONS_EXCLUDED(*db)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
if (series_writers_.empty()) return absl::OkStatus();
for (auto i = series_writers_.begin(); i != series_writers_.end(); ++i) {
if (!i->second) continue;
TF_RETURN_WITH_CONTEXT_IF_ERROR(i->second->Finish(db),
"finish tag_id=", i->first);
i->second.reset();
}
return absl::OkStatus();
}
private:
SeriesWriter* GetSeriesWriter(int64_t tag_id) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock sl(mu_);
auto spot = series_writers_.find(tag_id);
if (spot == series_writers_.end()) {
SeriesWriter* writer = new SeriesWriter(tag_id, meta_);
series_writers_[tag_id].reset(writer);
return writer;
} else {
return spot->second.get();
}
}
mutex mu_;
RunMetadata* const meta_;
std::unordered_map<int64_t, std::unique_ptr<SeriesWriter>> series_writers_
TF_GUARDED_BY(mu_);
RunWriter(const RunWriter&) = delete;
void operator=(const RunWriter&) = delete;
};
class SummaryDbWriter : public SummaryWriterInterface {
public:
SummaryDbWriter(Env* env, Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name)
: SummaryWriterInterface(),
env_{env},
db_{db},
ids_{env_, db_},
meta_{&ids_, experiment_name, run_name, user_name},
run_{&meta_} {
DCHECK(env_ != nullptr);
db_->Ref();
}
~SummaryDbWriter() override {
core::ScopedUnref unref(db_);
Status s = run_.Finish(db_);
if (!s.ok()) {
LOG(ERROR) << s;
}
int64_t run_id = meta_.run_id();
if (run_id == kAbsent) return;
const char* sql = R"sql(
UPDATE Runs SET finished_time = ? WHERE run_id = ?
)sql";
SqliteStatement update;
s = db_->Prepare(sql, &update);
if (s.ok()) {
update.BindDouble(1, DoubleTime(env_->NowMicros()));
update.BindInt(2, run_id);
s = update.StepAndReset();
}
if (!s.ok()) {
LOG(ERROR) << "Failed to set Runs[" << run_id << "].finish_time: " << s;
}
}
Status Flush() override { return absl::OkStatus(); }
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
if (!metadata.ParseFromString(serialized_metadata)) {
return errors::InvalidArgument("Bad serialized_metadata");
}
return Write(global_step, t, tag, metadata);
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
TF_RETURN_IF_ERROR(CheckSupportedType(t));
SummaryMetadata metadata;
PatchPluginName(&metadata, kScalarPluginName);
return Write(global_step, AsScalar(t), tag, metadata);
}
Status WriteGraph(int64_t global_step, std::unique_ptr<GraphDef> g) override {
uint64 now = env_->NowMicros();
return meta_.SetGraph(db_, now, DoubleTime(now), std::move(g));
}
Status WriteEvent(std::unique_ptr<Event> e) override {
return MigrateEvent(std::move(e));
}
Status WriteHistogram(int64_t global_step, Tensor t,
const string& tag) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(
AddTensorAsHistogramToSummary(t, tag, e->mutable_summary()));
return MigrateEvent(std::move(e));
}
Status WriteImage(int64_t global_step, Tensor t, const string& tag,
int max_images, Tensor bad_color) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(AddTensorAsImageToSummary(t, tag, max_images, bad_color,
e->mutable_summary()));
return MigrateEvent(std::move(e));
}
Status WriteAudio(int64_t global_step, Tensor t, const string& tag,
int max_outputs, float sample_rate) override {
uint64 now = env_->NowMicros();
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(DoubleTime(now));
TF_RETURN_IF_ERROR(AddTensorAsAudioToSummary(
t, tag, max_outputs, sample_rate, e->mutable_summary()));
return MigrateEvent(std::move(e));
}
string DebugString() const override { return "SummaryDbWriter"; }
private:
Status Write(int64_t step, const Tensor& t, const string& tag,
const SummaryMetadata& metadata) {
uint64 now = env_->NowMicros();
double computed_time = DoubleTime(now);
int64_t tag_id;
TF_RETURN_IF_ERROR(
meta_.GetTagId(db_, now, computed_time, tag, &tag_id, metadata));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
run_.Append(db_, tag_id, step, now, computed_time, t),
meta_.user_name(), "/", meta_.experiment_name(), "/", meta_.run_name(),
"/", tag, "@", step);
return absl::OkStatus();
}
Status MigrateEvent(std::unique_ptr<Event> e) {
switch (e->what_case()) {
case Event::WhatCase::kSummary: {
uint64 now = env_->NowMicros();
auto summaries = e->mutable_summary();
for (int i = 0; i < summaries->value_size(); ++i) {
Summary::Value* value = summaries->mutable_value(i);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
MigrateSummary(e.get(), value, now), meta_.user_name(), "/",
meta_.experiment_name(), "/", meta_.run_name(), "/", value->tag(),
"@", e->step());
}
break;
}
case Event::WhatCase::kGraphDef:
TF_RETURN_WITH_CONTEXT_IF_ERROR(
MigrateGraph(e.get(), e->graph_def()), meta_.user_name(), "/",
meta_.experiment_name(), "/", meta_.run_name(), "/__graph__@",
e->step());
break;
default:
break;
}
return absl::OkStatus();
}
Status MigrateGraph(const Event* e, const string& graph_def) {
uint64 now = env_->NowMicros();
std::unique_ptr<GraphDef> graph{new GraphDef};
if (!ParseProtoUnlimited(graph.get(), graph_def)) {
return errors::InvalidArgument("bad proto");
}
return meta_.SetGraph(db_, now, e->wall_time(), std::move(graph));
}
Status MigrateSummary(const Event* e, Summary::Value* s, uint64 now) {
switch (s->value_case()) {
case Summary::Value::ValueCase::kTensor:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateTensor(e, s, now), "tensor");
break;
case Summary::Value::ValueCase::kSimpleValue:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateScalar(e, s, now), "scalar");
break;
case Summary::Value::ValueCase::kHisto:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateHistogram(e, s, now), "histo");
break;
case Summary::Value::ValueCase::kImage:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateImage(e, s, now), "image");
break;
case Summary::Value::ValueCase::kAudio:
TF_RETURN_WITH_CONTEXT_IF_ERROR(MigrateAudio(e, s, now), "audio");
break;
default:
break;
}
return absl::OkStatus();
}
Status MigrateTensor(const Event* e, Summary::Value* s, uint64 now) {
Tensor t;
if (!t.FromProto(s->tensor())) return errors::InvalidArgument("bad proto");
TF_RETURN_IF_ERROR(CheckSupportedType(t));
int64_t tag_id;
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateScalar(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_FLOAT, {}};
t.scalar<float>()() = s->simple_value();
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kScalarPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateHistogram(const Event* e, Summary::Value* s, uint64 now) {
const HistogramProto& histo = s->histo();
int k = histo.bucket_size();
if (k != histo.bucket_limit_size()) {
return errors::InvalidArgument("size mismatch");
}
Tensor t{DT_DOUBLE, {k, 3}};
auto data = t.flat<double>();
for (int i = 0, j = 0; i < k; ++i) {
double left_edge = (i == 0) ? std::numeric_limits<double>::min()
: histo.bucket_limit(i - 1);
data(j++) = left_edge;
data(j++) = histo.bucket_limit(i);
data(j++) = histo.bucket(i);
}
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kHistogramPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateImage(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_STRING, {3}};
auto img = s->mutable_image();
t.flat<tstring>()(0) = strings::StrCat(img->width());
t.flat<tstring>()(1) = strings::StrCat(img->height());
t.flat<tstring>()(2) = std::move(*img->mutable_encoded_image_string());
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kImagePluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Status MigrateAudio(const Event* e, Summary::Value* s, uint64 now) {
Tensor t{DT_STRING, {1, 2}};
auto wav = s->mutable_audio();
t.flat<tstring>()(0) = std::move(*wav->mutable_encoded_audio_string());
t.flat<tstring>()(1) = "";
int64_t tag_id;
PatchPluginName(s->mutable_metadata(), kAudioPluginName);
TF_RETURN_IF_ERROR(meta_.GetTagId(db_, now, e->wall_time(), s->tag(),
&tag_id, s->metadata()));
return run_.Append(db_, tag_id, e->step(), now, e->wall_time(), t);
}
Env* const env_;
Sqlite* const db_;
IdAllocator ids_;
RunMetadata meta_;
RunWriter run_;
};
}
Status CreateSummaryDbWriter(Sqlite* db, const string& experiment_name,
const string& run_name, const string& user_name,
Env* env, SummaryWriterInterface** result) {
*result = new SummaryDbWriter(env, db, experiment_name, run_name, user_name);
return absl::OkStatus();
}
} | #include "tensorflow/core/summary/summary_db_writer.h"
#include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/db/sqlite.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Tensor MakeScalarInt64(int64_t x) {
Tensor t(DT_INT64, TensorShape({}));
t.scalar<int64_t>()() = x;
return t;
}
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryDbWriterTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db_));
TF_ASSERT_OK(SetupTensorboardSqliteDb(db_));
}
void TearDown() override {
if (writer_ != nullptr) {
writer_->Unref();
writer_ = nullptr;
}
db_->Unref();
db_ = nullptr;
}
int64_t QueryInt(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnInt(0);
}
double QueryDouble(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return -1;
}
return stmt.ColumnDouble(0);
}
string QueryString(const string& sql) {
SqliteStatement stmt = db_->PrepareOrDie(sql);
bool is_done;
Status s = stmt.Step(&is_done);
if (!s.ok() || is_done) {
LOG(ERROR) << s << " due to " << sql;
return "MISSINGNO";
}
return stmt.ColumnString(0);
}
FakeClockEnv env_;
Sqlite* db_ = nullptr;
SummaryWriterInterface* writer_ = nullptr;
};
TEST_F(SummaryDbWriterTest, WriteHistogram_VerifyTensorValues) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "histtest", "test1", "user1", &env_,
&writer_));
int step = 0;
std::unique_ptr<Event> e{new Event};
e->set_step(step);
e->set_wall_time(123);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("normal/myhisto");
double dummy_value = 10.123;
HistogramProto* proto = s->mutable_histo();
proto->Clear();
proto->set_min(dummy_value);
proto->set_max(dummy_value);
proto->set_num(dummy_value);
proto->set_sum(dummy_value);
proto->set_sum_squares(dummy_value);
int size = 3;
double bucket_limits[] = {-30.5, -10.5, -5.5};
double bucket[] = {-10, 10, 20};
for (int i = 0; i < size; i++) {
proto->add_bucket_limit(bucket_limits[i]);
proto->add_bucket(bucket[i]);
}
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
string result = QueryString("SELECT data FROM Tensors");
const double* val = reinterpret_cast<const double*>(result.data());
double histarray[] = {std::numeric_limits<double>::min(),
-30.5,
-10,
-30.5,
-10.5,
10,
-10.5,
-5.5,
20};
int histarray_size = 9;
for (int i = 0; i < histarray_size; i++) {
EXPECT_EQ(histarray[i], val[i]);
}
}
TEST_F(SummaryDbWriterTest, NothingWritten_NoRowsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
TF_ASSERT_OK(writer_->Flush());
writer_->Unref();
writer_ = nullptr;
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tags"));
EXPECT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, TensorsWritten_RowsGetInitialized) {
SummaryMetadata metadata;
metadata.set_display_name("display_name");
metadata.set_summary_description("description");
metadata.mutable_plugin_data()->set_plugin_name("plugin_name");
metadata.mutable_plugin_data()->set_content("plugin_data");
SummaryMetadata metadata_nope;
metadata_nope.set_display_name("nope");
metadata_nope.set_summary_description("nope");
metadata_nope.mutable_plugin_data()->set_plugin_name("nope");
metadata_nope.mutable_plugin_data()->set_content("nope");
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(2, MakeScalarInt64(314LL), "taggy",
metadata_nope.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t user_id = QueryInt("SELECT user_id FROM Users");
int64_t experiment_id = QueryInt("SELECT experiment_id FROM Experiments");
int64_t run_id = QueryInt("SELECT run_id FROM Runs");
int64_t tag_id = QueryInt("SELECT tag_id FROM Tags");
EXPECT_LT(0LL, user_id);
EXPECT_LT(0LL, experiment_id);
EXPECT_LT(0LL, run_id);
EXPECT_LT(0LL, tag_id);
EXPECT_EQ("jart", QueryString("SELECT user_name FROM Users"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Users"));
EXPECT_EQ(user_id, QueryInt("SELECT user_id FROM Experiments"));
EXPECT_EQ("mad-science",
QueryString("SELECT experiment_name FROM Experiments"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Experiments"));
EXPECT_EQ(experiment_id, QueryInt("SELECT experiment_id FROM Runs"));
EXPECT_EQ("train", QueryString("SELECT run_name FROM Runs"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Runs"));
EXPECT_EQ(run_id, QueryInt("SELECT run_id FROM Tags"));
EXPECT_EQ("taggy", QueryString("SELECT tag_name FROM Tags"));
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Tags"));
EXPECT_EQ("display_name", QueryString("SELECT display_name FROM Tags"));
EXPECT_EQ("plugin_name", QueryString("SELECT plugin_name FROM Tags"));
EXPECT_EQ("plugin_data", QueryString("SELECT plugin_data FROM Tags"));
EXPECT_EQ("description", QueryString("SELECT description FROM Descriptions"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 1"));
EXPECT_EQ(0.023,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 1"));
EXPECT_EQ(tag_id, QueryInt("SELECT series FROM Tensors WHERE step = 2"));
EXPECT_EQ(0.046,
QueryDouble("SELECT computed_time FROM Tensors WHERE step = 2"));
}
TEST_F(SummaryDbWriterTest, EmptyParentNames_NoParentsCreated) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy", ""));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Users"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Experiments"));
ASSERT_EQ(0LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(1000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
}
TEST_F(SummaryDbWriterTest, WriteEvent_Scalar) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "", "", &env_, &writer_));
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->set_wall_time(123.456);
Summary::Value* s = e->mutable_summary()->add_value();
s->set_tag("π");
s->set_simple_value(3.14f);
s = e->mutable_summary()->add_value();
s->set_tag("φ");
s->set_simple_value(1.61f);
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(2LL, QueryInt("SELECT COUNT(*) FROM Tags"));
ASSERT_EQ(2000LL, QueryInt("SELECT COUNT(*) FROM Tensors"));
int64_t tag1_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'π'");
int64_t tag2_id = QueryInt("SELECT tag_id FROM Tags WHERE tag_name = 'φ'");
EXPECT_GT(tag1_id, 0LL);
EXPECT_GT(tag2_id, 0LL);
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag1_id, " AND step = 7")));
EXPECT_EQ(123.456, QueryDouble(strings::StrCat(
"SELECT computed_time FROM Tensors WHERE series = ",
tag2_id, " AND step = 7")));
}
TEST_F(SummaryDbWriterTest, WriteGraph) {
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "R", "", &env_, &writer_));
env_.AdvanceByMillis(23);
GraphDef graph;
graph.mutable_library()->add_gradient()->set_function_name("funk");
NodeDef* node = graph.add_node();
node->set_name("x");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("y");
node->set_op("Placeholder");
node = graph.add_node();
node->set_name("z");
node->set_op("Love");
node = graph.add_node();
node->set_name("+");
node->set_op("Add");
node->add_input("x");
node->add_input("y");
node->add_input("^z");
node->set_device("tpu/lol");
std::unique_ptr<Event> e{new Event};
graph.SerializeToString(e->mutable_graph_def());
TF_ASSERT_OK(writer_->WriteEvent(std::move(e)));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs"));
ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Graphs"));
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Nodes"));
ASSERT_EQ(3LL, QueryInt("SELECT COUNT(*) FROM NodeInputs"));
ASSERT_EQ(QueryInt("SELECT run_id FROM Runs"),
QueryInt("SELECT run_id FROM Graphs"));
int64_t graph_id = QueryInt("SELECT graph_id FROM Graphs");
EXPECT_GT(graph_id, 0LL);
EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Graphs"));
GraphDef graph2;
graph2.ParseFromString(QueryString("SELECT graph_def FROM Graphs"));
EXPECT_EQ(0, graph2.node_size());
EXPECT_EQ("funk", graph2.library().gradient(0).function_name());
EXPECT_EQ("x", QueryString("SELECT node_name FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("y", QueryString("SELECT node_name FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("z", QueryString("SELECT node_name FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("+", QueryString("SELECT node_name FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("Placeholder",
QueryString("SELECT op FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("Love", QueryString("SELECT op FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("Add", QueryString("SELECT op FROM Nodes WHERE node_id = 3"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 0"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 1"));
EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 2"));
EXPECT_EQ("tpu/lol",
QueryString("SELECT device FROM Nodes WHERE node_id = 3"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(graph_id,
QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(1LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(2LL,
QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 2"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 0"));
EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 1"));
EXPECT_EQ(1LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 2"));
}
TEST_F(SummaryDbWriterTest, UsesIdsTable) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Ids"));
EXPECT_EQ(4LL, QueryInt(strings::StrCat(
"SELECT COUNT(*) FROM Ids WHERE id IN (",
QueryInt("SELECT user_id FROM Users"), ", ",
QueryInt("SELECT experiment_id FROM Experiments"), ", ",
QueryInt("SELECT run_id FROM Runs"), ", ",
QueryInt("SELECT tag_id FROM Tags"), ")")));
}
TEST_F(SummaryDbWriterTest, SetsRunFinishedTime) {
SummaryMetadata metadata;
TF_ASSERT_OK(CreateSummaryDbWriter(db_, "mad-science", "train", "jart", &env_,
&writer_));
env_.AdvanceByMillis(23);
TF_ASSERT_OK(writer_->WriteTensor(1, MakeScalarInt64(123LL), "taggy",
metadata.SerializeAsString()));
TF_ASSERT_OK(writer_->Flush());
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.0, QueryDouble("SELECT finished_time FROM Runs"));
env_.AdvanceByMillis(23);
writer_->Unref();
writer_ = nullptr;
ASSERT_EQ(0.023, QueryDouble("SELECT started_time FROM Runs"));
ASSERT_EQ(0.046, QueryDouble("SELECT finished_time FROM Runs"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_db_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_db_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8f1a831-4754-45ce-a70d-ceae8ef287fa | cpp | tensorflow/tensorflow | summary_file_writer | tensorflow/core/summary/summary_file_writer.cc | tensorflow/core/summary/summary_file_writer_test.cc | #include "tensorflow/core/summary/summary_file_writer.h"
#include <memory>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/summary/summary_converter.h"
#include "tensorflow/core/util/events_writer.h"
namespace tensorflow {
namespace {
class SummaryFileWriter : public SummaryWriterInterface {
public:
SummaryFileWriter(int max_queue, int flush_millis, Env* env)
: SummaryWriterInterface(),
is_initialized_(false),
max_queue_(max_queue),
flush_millis_(flush_millis),
env_(env) {}
Status Initialize(const string& logdir, const string& filename_suffix) {
const Status is_dir = env_->IsDirectory(logdir);
if (!is_dir.ok()) {
if (is_dir.code() != tensorflow::error::NOT_FOUND) {
return is_dir;
}
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(logdir));
}
int32_t pid = env_->GetProcessId();
static std::atomic<int64_t> file_id_counter(0);
string sep = absl::StartsWith(filename_suffix, ".") ? "" : ".";
const string uniquified_filename_suffix = absl::StrCat(
".", pid, ".", file_id_counter.fetch_add(1), sep, filename_suffix);
mutex_lock ml(mu_);
events_writer_ =
std::make_unique<EventsWriter>(io::JoinPath(logdir, "events"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
events_writer_->InitWithSuffix(uniquified_filename_suffix),
"Could not initialize events writer.");
last_flush_ = env_->NowMicros();
is_initialized_ = true;
return absl::OkStatus();
}
Status Flush() override {
mutex_lock ml(mu_);
if (!is_initialized_) {
return errors::FailedPrecondition("Class was not properly initialized.");
}
return InternalFlush();
}
~SummaryFileWriter() override {
(void)Flush();
}
Status WriteTensor(int64_t global_step, Tensor t, const string& tag,
const string& serialized_metadata) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
Summary::Value* v = e->mutable_summary()->add_value();
if (t.dtype() == DT_STRING) {
t.AsProtoField(v->mutable_tensor());
} else {
t.AsProtoTensorContent(v->mutable_tensor());
}
v->set_tag(tag);
if (!serialized_metadata.empty()) {
v->mutable_metadata()->ParseFromString(serialized_metadata);
}
return WriteEvent(std::move(e));
}
Status WriteScalar(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsScalarToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteHistogram(int64_t global_step, Tensor t,
const string& tag) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(
AddTensorAsHistogramToSummary(t, tag, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteImage(int64_t global_step, Tensor t, const string& tag,
int max_images, Tensor bad_color) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsImageToSummary(t, tag, max_images, bad_color,
e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteAudio(int64_t global_step, Tensor t, const string& tag,
int max_outputs, float sample_rate) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
TF_RETURN_IF_ERROR(AddTensorAsAudioToSummary(
t, tag, max_outputs, sample_rate, e->mutable_summary()));
return WriteEvent(std::move(e));
}
Status WriteGraph(int64_t global_step,
std::unique_ptr<GraphDef> graph) override {
std::unique_ptr<Event> e{new Event};
e->set_step(global_step);
e->set_wall_time(GetWallTime());
graph->SerializeToString(e->mutable_graph_def());
return WriteEvent(std::move(e));
}
Status WriteEvent(std::unique_ptr<Event> event) override {
mutex_lock ml(mu_);
queue_.emplace_back(std::move(event));
if (queue_.size() > max_queue_ ||
env_->NowMicros() - last_flush_ > 1000 * flush_millis_) {
return InternalFlush();
}
return absl::OkStatus();
}
string DebugString() const override { return "SummaryFileWriter"; }
private:
double GetWallTime() {
return static_cast<double>(env_->NowMicros()) / 1.0e6;
}
Status InternalFlush() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (const std::unique_ptr<Event>& e : queue_) {
events_writer_->WriteEvent(*e);
}
queue_.clear();
TF_RETURN_WITH_CONTEXT_IF_ERROR(events_writer_->Flush(),
"Could not flush events file.");
last_flush_ = env_->NowMicros();
return absl::OkStatus();
}
bool is_initialized_;
const int max_queue_;
const int flush_millis_;
uint64 last_flush_;
Env* env_;
mutex mu_;
std::vector<std::unique_ptr<Event>> queue_ TF_GUARDED_BY(mu_);
std::unique_ptr<EventsWriter> events_writer_ TF_GUARDED_BY(mu_);
std::vector<std::pair<string, SummaryMetadata>> registered_summaries_
TF_GUARDED_BY(mu_);
};
}
Status CreateSummaryFileWriter(int max_queue, int flush_millis,
const string& logdir,
const string& filename_suffix, Env* env,
SummaryWriterInterface** result) {
SummaryFileWriter* w = new SummaryFileWriter(max_queue, flush_millis, env);
const Status s = w->Initialize(logdir, filename_suffix);
if (!s.ok()) {
w->Unref();
*result = nullptr;
return s;
}
*result = w;
return absl::OkStatus();
}
} | #include "tensorflow/core/summary/summary_file_writer.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
uint64 NowSeconds() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
class SummaryFileWriterTest : public ::testing::Test {
protected:
Status SummaryTestHelper(
const string& test_name,
const std::function<Status(SummaryWriterInterface*)>& writer_fn,
const std::function<void(const Event&)>& test_fn) {
static std::set<string>* tests = new std::set<string>();
CHECK(tests->insert(test_name).second) << ": " << test_name;
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
TF_CHECK_OK(writer_fn(writer));
TF_CHECK_OK(writer->Flush());
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
bool found = false;
for (const string& f : files) {
if (absl::StrContains(f, test_name)) {
if (found) {
return errors::Unknown("Found more than one file for ", test_name);
}
found = true;
std::unique_ptr<RandomAccessFile> read_file;
TF_CHECK_OK(env_.NewRandomAccessFile(io::JoinPath(testing::TmpDir(), f),
&read_file));
io::RecordReader reader(read_file.get(), io::RecordReaderOptions());
tstring record;
uint64 offset = 0;
TF_CHECK_OK(
reader.ReadRecord(&offset,
&record));
TF_CHECK_OK(reader.ReadRecord(&offset, &record));
Event e;
e.ParseFromString(record);
test_fn(e);
}
}
if (!found) {
return errors::Unknown("Found no file for ", test_name);
}
return absl::OkStatus();
}
FakeClockEnv env_;
};
TEST_F(SummaryFileWriterTest, WriteTensor) {
TF_CHECK_OK(SummaryTestHelper("tensor_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, one, "name",
SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
}));
TF_CHECK_OK(SummaryTestHelper(
"string_tensor_test",
[](SummaryWriterInterface* writer) {
Tensor hello(DT_STRING, TensorShape({}));
hello.scalar<tstring>()() = "hello";
TF_RETURN_IF_ERROR(writer->WriteTensor(
2, hello, "name", SummaryMetadata().SerializeAsString()));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).tensor().dtype(), DT_STRING);
EXPECT_EQ(e.summary().value(0).tensor().string_val()[0], "hello");
}));
}
TEST_F(SummaryFileWriterTest, WriteScalar) {
TF_CHECK_OK(SummaryTestHelper(
"scalar_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_EQ(e.summary().value(0).simple_value(), 1.0);
}));
}
TEST_F(SummaryFileWriterTest, WriteHistogram) {
TF_CHECK_OK(SummaryTestHelper("hist_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(
writer->WriteHistogram(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name");
EXPECT_TRUE(e.summary().value(0).has_histo());
}));
}
namespace {
template <typename T>
static Status CreateImage(SummaryWriterInterface* writer) {
Tensor bad_color(DT_UINT8, TensorShape({1}));
bad_color.scalar<uint8>()() = 0;
Tensor one(DataTypeToEnum<T>::v(), TensorShape({1, 1, 1, 1}));
one.scalar<T>()() = T(1);
TF_RETURN_IF_ERROR(writer->WriteImage(2, one, "name", 1, bad_color));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
}
static void CheckImage(const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/image");
CHECK(e.summary().value(0).has_image());
EXPECT_EQ(e.summary().value(0).image().height(), 1);
EXPECT_EQ(e.summary().value(0).image().width(), 1);
EXPECT_EQ(e.summary().value(0).image().colorspace(), 1);
}
}
TEST_F(SummaryFileWriterTest, WriteImageUInt8) {
TF_CHECK_OK(
SummaryTestHelper("image_test_uint8", CreateImage<uint8>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageFloat) {
TF_CHECK_OK(
SummaryTestHelper("image_test_float", CreateImage<float>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageHalf) {
TF_CHECK_OK(SummaryTestHelper("image_test_half", CreateImage<Eigen::half>,
CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteImageDouble) {
TF_CHECK_OK(
SummaryTestHelper("image_test_double", CreateImage<double>, CheckImage));
}
TEST_F(SummaryFileWriterTest, WriteAudio) {
TF_CHECK_OK(SummaryTestHelper(
"audio_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({1, 1}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteAudio(2, one, "name", 1, 1));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 2);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "name/audio");
CHECK(e.summary().value(0).has_audio());
}));
}
TEST_F(SummaryFileWriterTest, WriteEvent) {
TF_CHECK_OK(
SummaryTestHelper("event_test",
[](SummaryWriterInterface* writer) {
std::unique_ptr<Event> e{new Event};
e->set_step(7);
e->mutable_summary()->add_value()->set_tag("hi");
TF_RETURN_IF_ERROR(writer->WriteEvent(std::move(e)));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) {
EXPECT_EQ(e.step(), 7);
CHECK_EQ(e.summary().value_size(), 1);
EXPECT_EQ(e.summary().value(0).tag(), "hi");
}));
}
TEST_F(SummaryFileWriterTest, WallTime) {
env_.AdvanceByMillis(7023);
TF_CHECK_OK(SummaryTestHelper(
"wall_time_test",
[](SummaryWriterInterface* writer) {
Tensor one(DT_FLOAT, TensorShape({}));
one.scalar<float>()() = 1.0;
TF_RETURN_IF_ERROR(writer->WriteScalar(2, one, "name"));
TF_RETURN_IF_ERROR(writer->Flush());
return absl::OkStatus();
},
[](const Event& e) { EXPECT_EQ(e.wall_time(), 7.023); }));
}
TEST_F(SummaryFileWriterTest, AvoidFilenameCollision) {
string test_name = "avoid_filename_collision_test";
int num_files = 10;
for (int i = 0; i < num_files; i++) {
SummaryWriterInterface* writer;
TF_CHECK_OK(CreateSummaryFileWriter(1, 1, testing::TmpDir(), test_name,
&env_, &writer));
core::ScopedUnref deleter(writer);
}
std::vector<string> files;
TF_CHECK_OK(env_.GetChildren(testing::TmpDir(), &files));
files.erase(std::remove_if(files.begin(), files.end(),
[test_name](string f) {
return !absl::StrContains(f, test_name);
}),
files.end());
EXPECT_EQ(num_files, files.size())
<< "files = [" << absl::StrJoin(files, ", ") << "]";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_file_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/summary_file_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f91e14b2-7735-43ce-8b22-6f38cce4d87a | cpp | tensorflow/tensorflow | debug_graph_utils | tensorflow/core/debug/debug_graph_utils.cc | tensorflow/core/debug/debug_graph_utils_test.cc | #include "tensorflow/core/debug/debug_graph_utils.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/debug.pb.h"
namespace tensorflow {
namespace {
Status ParseBoolString(const string& bool_str, bool* bool_val) {
const string lower_bool_str = absl::AsciiStrToLower(bool_str);
if (lower_bool_str == "false" || lower_bool_str == "f" ||
lower_bool_str == "0") {
*bool_val = false;
} else if (lower_bool_str == "true" || lower_bool_str == "t" ||
lower_bool_str == "1") {
*bool_val = true;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Invalid string for bool value: ", bool_str));
}
return absl::OkStatus();
}
}
Status DebugNodeInserter::InsertNodes(
const protobuf::RepeatedPtrField<DebugTensorWatch>& watches, Graph* graph,
Device* device) {
if (watches.empty()) {
return absl::OkStatus();
}
std::vector<string> default_debug_ops;
std::vector<string> default_debug_urls;
std::unordered_map<string, std::vector<string>> tensor_watches;
std::unordered_map<string, std::vector<string>> tensor_watch_urls;
std::unordered_map<string, bool> tensor_tolerate_failures;
for (const DebugTensorWatch& watch : watches) {
if (watch.debug_ops().empty()) {
continue;
}
if (watch.debug_urls().empty()) {
continue;
}
if (watch.node_name() == "*") {
if (watch.output_slot() == -1) {
default_debug_ops.insert(default_debug_ops.end(),
watch.debug_ops().begin(),
watch.debug_ops().end());
default_debug_urls.insert(default_debug_urls.end(),
watch.debug_urls().begin(),
watch.debug_urls().end());
} else {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat(
"output_slot is expected to be -1 for wildcard ",
"node name (\"*\"), but got ", watch.output_slot()));
}
continue;
} else {
if (watch.output_slot() < 0) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("A negative output_slot in DebugTensorWatch is ",
"valid only for the wildcard node name (\"*\"), ",
"but got node name ", watch.node_name()));
}
}
string tensor_name =
strings::StrCat(watch.node_name(), ":", watch.output_slot());
std::vector<string> debug_ops;
for (const string& debug_op : watch.debug_ops()) {
debug_ops.push_back(debug_op);
}
tensor_watches[tensor_name] = debug_ops;
tensor_tolerate_failures[tensor_name] =
watch.tolerate_debug_op_creation_failures();
std::vector<string> urls;
for (const string& url : watch.debug_urls()) {
urls.push_back(url);
}
tensor_watch_urls[tensor_name] = urls;
}
if (tensor_watches.empty()) {
return absl::OkStatus();
}
DeviceType device_type = DeviceType{device->device_type()};
std::vector<const Edge*> edges_to_remove;
for (Node* src_node : graph->nodes()) {
std::unordered_map<int, std::vector<const Edge*>> output_slot_to_edges;
for (const Edge* edge : src_node->out_edges()) {
const int src_output = edge->src_output();
if (output_slot_to_edges.find(src_output) == output_slot_to_edges.end()) {
output_slot_to_edges[src_output] = {edge};
} else {
output_slot_to_edges[src_output].push_back(edge);
}
}
for (int src_output_slot = 0; src_output_slot < src_node->num_outputs();
++src_output_slot) {
const string tensor_name =
strings::StrCat(src_node->name(), ":", src_output_slot);
const bool explicit_tensor_match =
tensor_watches.find(tensor_name) != tensor_watches.end();
if (!explicit_tensor_match && default_debug_ops.empty()) {
continue;
}
const DataType src_dt = src_node->output_type(src_output_slot);
MemoryType memory_type;
TF_RETURN_IF_ERROR(MemoryTypeForOutput(device_type, graph, src_node,
src_output_slot, &memory_type));
const std::vector<string> debug_ops = explicit_tensor_match
? tensor_watches[tensor_name]
: default_debug_ops;
const std::vector<string> debug_urls =
explicit_tensor_match ? tensor_watch_urls[tensor_name]
: default_debug_urls;
Node* copy_node;
Status copy_s =
CreateCopyNode(graph, device_type, memory_type == HOST_MEMORY,
src_node->name(), src_output_slot, src_dt, tensor_name,
debug_ops, debug_urls, ©_node);
if (!copy_s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create Copy/CopyHost node for tensor ",
tensor_name, ", due to: ", copy_s.message()));
}
graph->AddEdge(src_node, src_output_slot, copy_node, 0);
std::vector<Node*> debug_nodes;
for (size_t i = 0; i < debug_ops.size(); ++i) {
const string& debug_op_name = debug_ops[i];
Node* debug_node;
Status debug_s = CreateDebugNode(graph, *device, copy_node->name(),
src_dt, tensor_name, debug_urls, i,
debug_op_name, &debug_node);
if (debug_s.ok()) {
graph->AddEdge(copy_node, 0, debug_node, 0);
debug_nodes.push_back(debug_node);
} else {
if (tensor_tolerate_failures[tensor_name]) {
LOG(INFO) << "Tolerating failure to create debug node: "
<< "tensor name = " << tensor_name << "; "
<< "debug op name = " << debug_op_name;
} else {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create debug node ", debug_op_name,
" for tensor ", tensor_name,
", due to: ", debug_s.message()));
}
}
}
const bool is_ref = IsRefType(src_node->output_type(src_output_slot));
for (const Edge* edge : output_slot_to_edges[src_output_slot]) {
if (!is_ref) {
edges_to_remove.push_back(edge);
graph->AddEdge(copy_node, 0, edge->dst(), edge->dst_input());
}
for (Node* debug_node : debug_nodes) {
if (!src_node->IsEnter() && !src_node->IsNextIteration()) {
graph->AddEdge(debug_node, Graph::kControlSlot, edge->dst(),
Graph::kControlSlot);
}
}
}
}
}
for (const Edge* edge : edges_to_remove) {
graph->RemoveEdge(edge);
}
return absl::OkStatus();
}
void DebugNodeInserter::DeparallelizeWhileLoops(Graph* graph, Device* device) {
bool deparallelized_a_loop = false;
for (Node* node : graph->nodes()) {
if (node->IsEnter()) {
const AttrValue* parallel_iterations =
node->attrs().Find("parallel_iterations");
if (parallel_iterations && parallel_iterations->i() > 1) {
deparallelized_a_loop = true;
VLOG(1) << "Changing the parallel_iterations attribute of the "
<< "Enter/RefEnter node \"" << node->name() << "\" on device \""
<< device->name() << "\" from " << parallel_iterations->i()
<< " to 1.";
node->AddAttr<int64_t>("parallel_iterations", 1);
}
}
}
if (deparallelized_a_loop) {
LOG(INFO) << "For debugging, tfdbg has set the parallel_iterations "
<< "attribute of all scheduled Enter/RefEnter nodes to 1. (This "
<< "does not affect subsequent non-debug runs.)";
}
}
const string DebugNodeInserter::GetCopyNodeName(const string& node_name,
const int output_slot) {
return strings::StrCat("__copy_", node_name, "_", output_slot);
}
const string DebugNodeInserter::GetDebugNodeName(const string& tensor_name,
const int debug_op_num,
const string& debug_op_name) {
return strings::StrCat("__dbg_", tensor_name, "_", debug_op_num, "_",
debug_op_name);
}
Status DebugNodeInserter::CreateCopyNode(
Graph* graph, const DeviceType device_type, const bool is_host_memory,
const string& src_node_name, const int src_output, const DataType src_dt,
const string& tensor_name, const std::vector<string>& debug_ops,
const std::vector<string>& debug_urls, Node** copy_node) {
const string kGatedGrpcAttributeKey = "gated_grpc";
NodeDef node_def;
const KernelDef* kdef;
const string copy_op_name = is_host_memory ? "CopyHost" : "Copy";
const string copy_node_name = GetCopyNodeName(src_node_name, src_output);
std::vector<string> debug_ops_spec;
for (const string& debug_op : debug_ops) {
for (const string& debug_url : debug_urls) {
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op, &debug_op_name_proper,
&custom_attributes));
bool gated_grpc_value = false;
if (custom_attributes.find(kGatedGrpcAttributeKey) !=
custom_attributes.end()) {
TF_RETURN_IF_ERROR(ParseBoolString(
custom_attributes[kGatedGrpcAttributeKey], &gated_grpc_value));
}
debug_ops_spec.push_back(strings::StrCat(debug_op_name_proper, ";",
debug_url, ";",
gated_grpc_value ? "1" : "0"));
}
}
auto builder = NodeDefBuilder(copy_node_name, copy_op_name)
.Input(src_node_name, src_output, src_dt)
.Attr("debug_ops_spec", debug_ops_spec);
if (!builder.Finalize(&node_def).ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create node definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
Status s = FindKernelDef(device_type, node_def, &kdef, nullptr);
if (!s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to find kernel definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, copy_node).ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create copy node ", copy_node_name,
" on watched tensor ", tensor_name));
}
return absl::OkStatus();
}
Status DebugNodeInserter::ParseDebugOpName(
const string& debug_op_name, string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
const size_t l_index = debug_op_name.find('(');
const size_t r_index = debug_op_name.find(')');
if (l_index == string::npos && r_index == string::npos) {
*debug_op_name_proper = debug_op_name;
} else {
if (l_index == string::npos || l_index == 0 ||
r_index != debug_op_name.size() - 1) {
return absl::InvalidArgumentError(
absl::StrCat("Malformed debug op name \"", debug_op_name, "\""));
}
*debug_op_name_proper = debug_op_name.substr(0, l_index);
string arguments = debug_op_name.substr(l_index + 1, r_index - l_index - 1);
std::vector<string> attribute_segs = str_util::Split(arguments, ";");
for (const string& attribute_seg : attribute_segs) {
StringPiece seg(attribute_seg);
str_util::RemoveWhitespaceContext(&seg);
if (seg.empty()) {
continue;
}
const size_t eq_index = seg.find('=');
if (eq_index == string::npos) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
const string key(seg.substr(0, eq_index));
const string value(
seg.substr(eq_index + 1, attribute_seg.size() - eq_index - 1));
if (key.empty() || value.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
if (attributes->find(key) == attributes->end()) {
(*attributes)[key] = value;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Duplicate attribute name \"", key,
"\" found in the debug op: \"", debug_op_name, "\""));
}
}
}
return absl::OkStatus();
}
Status DebugNodeInserter::SetDebugNodeAttributes(
Node* debug_node, const std::unordered_map<string, string>& attributes) {
std::unordered_set<string> unfulfilled_keys;
for (const auto& item : attributes) {
unfulfilled_keys.insert(item.first);
}
for (const auto& attr : debug_node->op_def().attr()) {
if (attributes.find(attr.name()) != attributes.end()) {
const string& attr_value = attributes.at(attr.name());
if (attr.type() == "string") {
debug_node->AddAttr<string>(attr.name(), attr_value);
} else if (attr.type() == "float") {
float float_value = 0.0;
if (!::tensorflow::strings::safe_strtof(attr_value, &float_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for float-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<float>(attr.name(), float_value);
} else if (attr.type() == "int") {
int64_t int_value = 0;
if (!::tensorflow::strings::safe_strto64(attr_value, &int_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for int-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<int>(attr.name(), int_value);
} else if (attr.type() == "bool") {
bool bool_value;
if (!ParseBoolString(attr_value, &bool_value).ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for bool-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<bool>(attr.name(), bool_value);
} else {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported type of custom attribute for debug ops: ",
attr.type()));
}
unfulfilled_keys.erase(attr.name());
}
}
if (unfulfilled_keys.empty()) {
return absl::OkStatus();
} else {
return absl::InvalidArgumentError(absl::StrCat(
unfulfilled_keys.size(),
" attribute key(s) were not valid for debug node ", debug_node->name(),
": ", absl::StrJoin(unfulfilled_keys, ", ")));
}
}
Status DebugNodeInserter::CreateDebugNode(
Graph* graph, const Device& device, const string& src_copy_node_name,
const DataType src_dt, const string& tensor_name,
const std::vector<string>& debug_urls, const int debug_op_num,
const string& debug_op_name, Node** debug_node) {
NodeDef node_def;
const KernelDef* kdef;
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op_name, &debug_op_name_proper,
&custom_attributes));
const string debug_node_name =
GetDebugNodeName(tensor_name, debug_op_num, debug_op_name_proper);
auto builder = NodeDefBuilder(debug_node_name, debug_op_name_proper)
.Input(src_copy_node_name, 0, src_dt)
.Attr("device_name", device.name())
.Attr("tensor_name", tensor_name)
.Attr("debug_urls", debug_urls);
if (!builder.Finalize(&node_def).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create node definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!FindKernelDef(DeviceType(device.device_type()), node_def, &kdef, nullptr)
.ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to find kernel definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, debug_node).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create debug node ", debug_op_name_proper,
" on watched tensor ", tensor_name));
}
if (!custom_attributes.empty()) {
TF_RETURN_IF_ERROR(SetDebugNodeAttributes(*debug_node, custom_attributes));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/debug/debug_graph_utils.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
class DebugGraphUtilsTest : public ::testing::Test {
protected:
Status ParseDebugOpName(const string& debug_op_name,
string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
return DebugNodeInserter::ParseDebugOpName(
debug_op_name, debug_op_name_proper, attributes);
}
};
TEST_F(DebugGraphUtilsTest, TestParseNoAttributeDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(
ParseDebugOpName("DebugIdentity", &debug_op_name_proper, &attributes));
ASSERT_EQ("DebugIdentity", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
}
TEST_F(DebugGraphUtilsTest, TestMalformedDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("(mute_if_healthy=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestDebugOpNameWithMalformedAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("DebugNumericSummary(=)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy:true)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true;threshold=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold:300.0)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithSingleAttribute) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary()", &debug_op_name_proper,
&attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(1, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreThanOneAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; threshold=300.0)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold=300.0;first_n=100)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(3, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
ASSERT_EQ("100", attributes["first_n"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreDuplicateAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; lower_bound=3; "
"mute_if_healthy=false;)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithWhitespaceInAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary( mute_if_healthy=true; threshold=300.0 )",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(;;mute_if_healthy=true; threshold=300.0;;)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits