mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 00:19:58 +01:00
Automated Code Change
PiperOrigin-RevId: 825902854
This commit is contained in:
parent
a36834c399
commit
6f1d4574bd
|
|
@ -43,7 +43,7 @@ class StringStripOp : public OpKernel {
|
|||
for (int64_t i = 0; i < input.size(); ++i) {
|
||||
absl::string_view entry(input(i));
|
||||
str_util::RemoveWhitespaceContext(&entry);
|
||||
output(i) = string(entry);
|
||||
output(i) = std::string(entry);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ limitations under the License.
|
|||
|
||||
namespace tensorflow {
|
||||
|
||||
template <uint64 hash(absl::string_view)>
|
||||
template <uint64_t hash(absl::string_view)>
|
||||
class StringToHashBucketOp : public OpKernel {
|
||||
public:
|
||||
explicit StringToHashBucketOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
|
||||
|
|
@ -46,8 +46,8 @@ class StringToHashBucketOp : public OpKernel {
|
|||
|
||||
typedef decltype(input_flat.size()) Index;
|
||||
for (Index i = 0; i < input_flat.size(); ++i) {
|
||||
const uint64 input_hash = hash(input_flat(i));
|
||||
const uint64 bucket_id = input_hash % num_buckets_;
|
||||
const uint64_t input_hash = hash(input_flat(i));
|
||||
const uint64_t bucket_id = input_hash % num_buckets_;
|
||||
// The number of buckets is always in the positive range of int64 so is
|
||||
// the resulting bucket_id. Casting the bucket_id from uint64 to int64 is
|
||||
// safe.
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@ class LegacyStringToHashBucketOp : public OpKernel {
|
|||
|
||||
typedef decltype(input_flat.size()) Index;
|
||||
for (Index i = 0; i < input_flat.size(); ++i) {
|
||||
const uint64 input_hash = Hash64(input_flat(i));
|
||||
const uint64 bucket_id = input_hash % num_buckets_;
|
||||
const uint64_t input_hash = Hash64(input_flat(i));
|
||||
const uint64_t bucket_id = input_hash % num_buckets_;
|
||||
// The number of buckets is always in the positive range of int64 so is
|
||||
// the resulting bucket_id. Casting the bucket_id from uint64 to int64 is
|
||||
// safe.
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ limitations under the License.
|
|||
|
||||
namespace tensorflow {
|
||||
|
||||
template <uint64 hash(const uint64 (&)[2], const string&)>
|
||||
template <uint64_t hash(const uint64_t (&)[2], const std::string&)>
|
||||
class StringToKeyedHashBucketOp : public OpKernel {
|
||||
public:
|
||||
explicit StringToKeyedHashBucketOp(OpKernelConstruction* ctx)
|
||||
|
|
@ -53,8 +53,8 @@ class StringToKeyedHashBucketOp : public OpKernel {
|
|||
|
||||
typedef decltype(input_flat.size()) Index;
|
||||
for (Index i = 0; i < input_flat.size(); ++i) {
|
||||
const uint64 input_hash = hash(key_, input_flat(i));
|
||||
const uint64 bucket_id = input_hash % num_buckets_;
|
||||
const uint64_t input_hash = hash(key_, input_flat(i));
|
||||
const uint64_t bucket_id = input_hash % num_buckets_;
|
||||
// The number of buckets is always in the positive range of int64 so is
|
||||
// the resulting bucket_id. Casting the bucket_id from uint64 to int64 is
|
||||
// safe.
|
||||
|
|
@ -64,7 +64,7 @@ class StringToKeyedHashBucketOp : public OpKernel {
|
|||
|
||||
private:
|
||||
int64_t num_buckets_;
|
||||
uint64 key_[2];
|
||||
uint64_t key_[2];
|
||||
|
||||
StringToKeyedHashBucketOp(const StringToKeyedHashBucketOp&) = delete;
|
||||
void operator=(const StringToKeyedHashBucketOp&) = delete;
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ class StringToNumberOp : public OpKernel {
|
|||
StringToNumberOp<type>)
|
||||
REGISTER(float);
|
||||
REGISTER(double);
|
||||
REGISTER(int32);
|
||||
REGISTER(int32_t);
|
||||
REGISTER(int64_t);
|
||||
REGISTER(uint32_t);
|
||||
REGISTER(uint64_t);
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ class StringUpperOp : public OpKernel {
|
|||
}
|
||||
|
||||
private:
|
||||
string encoding_;
|
||||
std::string encoding_;
|
||||
};
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("StringUpper").Device(DEVICE_CPU), StringUpperOp);
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ limitations under the License.
|
|||
namespace tensorflow {
|
||||
|
||||
// Sets unit value based on str.
|
||||
absl::Status ParseUnicodeEncoding(const string& str,
|
||||
absl::Status ParseUnicodeEncoding(const std::string& str,
|
||||
UnicodeEncoding* encoding) {
|
||||
if (str == "UTF-8") {
|
||||
*encoding = UnicodeEncoding::UTF8;
|
||||
|
|
@ -36,7 +36,7 @@ absl::Status ParseUnicodeEncoding(const string& str,
|
|||
}
|
||||
|
||||
// Sets unit value based on str.
|
||||
absl::Status ParseCharUnit(const string& str, CharUnit* unit) {
|
||||
absl::Status ParseCharUnit(const std::string& str, CharUnit* unit) {
|
||||
if (str == "BYTE") {
|
||||
*unit = CharUnit::BYTE;
|
||||
} else if (str == "UTF8_CHAR") {
|
||||
|
|
@ -50,7 +50,7 @@ absl::Status ParseCharUnit(const string& str, CharUnit* unit) {
|
|||
|
||||
// Return the number of Unicode characters in a UTF-8 string.
|
||||
// Result may be incorrect if the input string is not valid UTF-8.
|
||||
int32 UTF8StrLen(const string& str) {
|
||||
int32_t UTF8StrLen(const std::string& str) {
|
||||
const int32_t byte_size = str.size();
|
||||
const char* const end = str.data() + byte_size;
|
||||
const char* ptr = str.data();
|
||||
|
|
|
|||
|
|
@ -33,14 +33,15 @@ enum class CharUnit { BYTE, UTF8_CHAR };
|
|||
inline bool IsTrailByte(char x) { return static_cast<signed char>(x) < -0x40; }
|
||||
|
||||
// Sets `encoding` based on `str`.
|
||||
absl::Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding);
|
||||
absl::Status ParseUnicodeEncoding(const std::string& str,
|
||||
UnicodeEncoding* encoding);
|
||||
|
||||
// Sets `unit` value based on `str`.
|
||||
absl::Status ParseCharUnit(const string& str, CharUnit* unit);
|
||||
absl::Status ParseCharUnit(const std::string& str, CharUnit* unit);
|
||||
|
||||
// Returns the number of Unicode characters in a UTF-8 string.
|
||||
// Result may be incorrect if the input string is not valid UTF-8.
|
||||
int32 UTF8StrLen(const string& str);
|
||||
int32_t UTF8StrLen(const std::string& str);
|
||||
|
||||
// Get the next UTF8 character position starting at the given position and
|
||||
// skipping the given number of characters. Position is a byte offset, and
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ template <typename T>
|
|||
class SubstrOp : public OpKernel {
|
||||
public:
|
||||
explicit SubstrOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
|
||||
string unit;
|
||||
std::string unit;
|
||||
OP_REQUIRES_OK(ctx, ctx->GetAttr("unit", &unit));
|
||||
OP_REQUIRES_OK(ctx, ParseCharUnit(unit, &unit_));
|
||||
}
|
||||
|
|
@ -342,6 +342,6 @@ class SubstrOp : public OpKernel {
|
|||
REGISTER_KERNEL_BUILDER( \
|
||||
Name("Substr").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
|
||||
SubstrOp<type>);
|
||||
REGISTER_SUBSTR(int32);
|
||||
REGISTER_SUBSTR(int32_t);
|
||||
REGISTER_SUBSTR(int64_t);
|
||||
} // namespace tensorflow
|
||||
|
|
|
|||
|
|
@ -136,9 +136,9 @@ Graph* SetupSubstrGraph(const Tensor& input, const int32_t pos,
|
|||
const int32_t len, const char* const unit) {
|
||||
Graph* g = new Graph(OpRegistry::Global());
|
||||
Tensor position(DT_INT32, TensorShape({}));
|
||||
position.flat<int32>().setConstant(pos);
|
||||
position.flat<int32_t>().setConstant(pos);
|
||||
Tensor length(DT_INT32, TensorShape({}));
|
||||
length.flat<int32>().setConstant(len);
|
||||
length.flat<int32_t>().setConstant(len);
|
||||
|
||||
TF_CHECK_OK(NodeBuilder("substr_op", "Substr")
|
||||
.Input(test::graph::Constant(g, input))
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class SummaryAudioOp : public OpKernel {
|
|||
OP_REQUIRES(c, tensor.dims() >= 2 && tensor.dims() <= 3,
|
||||
errors::InvalidArgument("Tensor must be 3-D or 2-D, got: ",
|
||||
tensor.shape().DebugString()));
|
||||
const string& base_tag = tag.scalar<tstring>()();
|
||||
const std::string& base_tag = tag.scalar<tstring>()();
|
||||
|
||||
float sample_rate = sample_rate_attr_;
|
||||
if (!has_sample_rate_attr_) {
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ namespace tensorflow {
|
|||
namespace {
|
||||
|
||||
static void EXPECT_SummaryMatches(const Summary& actual,
|
||||
const string& expected_str) {
|
||||
const std::string& expected_str) {
|
||||
Summary expected;
|
||||
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
|
||||
EXPECT_EQ(expected.DebugString(), actual.DebugString());
|
||||
|
|
|
|||
|
|
@ -28,14 +28,14 @@ namespace tensorflow {
|
|||
|
||||
class SummaryImageOp : public OpKernel {
|
||||
public:
|
||||
typedef Eigen::Tensor<uint8, 2, Eigen::RowMajor> Uint8Image;
|
||||
typedef Eigen::Tensor<uint8_t, 2, Eigen::RowMajor> Uint8Image;
|
||||
|
||||
explicit SummaryImageOp(OpKernelConstruction* context) : OpKernel(context) {
|
||||
int64_t max_images_tmp;
|
||||
OP_REQUIRES_OK(context, context->GetAttr("max_images", &max_images_tmp));
|
||||
OP_REQUIRES(context, max_images_tmp < (1LL << 31),
|
||||
errors::InvalidArgument("max_images must be < 2^31"));
|
||||
max_images_ = static_cast<int32>(max_images_tmp);
|
||||
max_images_ = static_cast<int32_t>(max_images_tmp);
|
||||
const TensorProto* proto;
|
||||
OP_REQUIRES_OK(context, context->GetAttr("bad_color", &proto));
|
||||
OP_REQUIRES_OK(context, context->device()->MakeTensorFromProto(
|
||||
|
|
@ -61,7 +61,7 @@ class SummaryImageOp : public OpKernel {
|
|||
errors::InvalidArgument(
|
||||
"Tensor must be 4-D with last dim 1, 3, or 4, not ",
|
||||
tensor.shape().DebugString()));
|
||||
const string& base_tag = tags.scalar<tstring>()();
|
||||
const std::string& base_tag = tags.scalar<tstring>()();
|
||||
|
||||
OP_REQUIRES(c,
|
||||
tensor.dim_size(0) < (1LL << 31) &&
|
||||
|
|
@ -87,8 +87,8 @@ class SummaryImageOp : public OpKernel {
|
|||
if (tensor.dtype() == DT_UINT8) {
|
||||
// For uint8 input, no normalization is necessary
|
||||
auto ith_image = [&tensor, batch_size, hw, depth](int i) {
|
||||
auto values = tensor.shaped<uint8, 3>({batch_size, hw, depth});
|
||||
return typename TTypes<uint8>::ConstMatrix(
|
||||
auto values = tensor.shaped<uint8_t, 3>({batch_size, hw, depth});
|
||||
return typename TTypes<uint8_t>::ConstMatrix(
|
||||
&values(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth));
|
||||
};
|
||||
OP_REQUIRES_OK(
|
||||
|
|
@ -112,14 +112,14 @@ class SummaryImageOp : public OpKernel {
|
|||
template <class T>
|
||||
void NormalizeAndAddImages(OpKernelContext* c, const Tensor& tensor, int h,
|
||||
int w, int hw, int depth, int batch_size,
|
||||
const string& base_tag, Summary* s) {
|
||||
const std::string& base_tag, Summary* s) {
|
||||
// For float and half images, nans and infs are replaced with bad_color.
|
||||
OP_REQUIRES(c, bad_color_.dim_size(0) >= depth,
|
||||
errors::InvalidArgument(
|
||||
"expected depth <= bad_color.size, got depth = ", depth,
|
||||
", bad_color.size = ", bad_color_.dim_size(0)));
|
||||
auto bad_color_full = bad_color_.vec<uint8>();
|
||||
typename TTypes<uint8>::ConstVec bad_color(bad_color_full.data(), depth);
|
||||
auto bad_color_full = bad_color_.vec<uint8_t>();
|
||||
typename TTypes<uint8_t>::ConstVec bad_color(bad_color_full.data(), depth);
|
||||
|
||||
// Float images must be scaled and translated.
|
||||
Uint8Image image(hw, depth);
|
||||
|
|
@ -142,7 +142,7 @@ class SummaryImageOp : public OpKernel {
|
|||
// differently in the float and uint8 cases: the float case needs a temporary
|
||||
// buffer which can be shared across calls to ith_image, but the uint8 case
|
||||
// does not.
|
||||
absl::Status AddImages(const string& tag, int batch_size, int w, int h,
|
||||
absl::Status AddImages(const std::string& tag, int batch_size, int w, int h,
|
||||
int depth,
|
||||
const std::function<Uint8Image(int)>& ith_image,
|
||||
Summary* s) {
|
||||
|
|
@ -180,7 +180,7 @@ class SummaryImageOp : public OpKernel {
|
|||
template <class T>
|
||||
static void NormalizeFloatImage(int hw, int depth,
|
||||
typename TTypes<T>::ConstMatrix values,
|
||||
typename TTypes<uint8>::ConstVec bad_color,
|
||||
typename TTypes<uint8_t>::ConstVec bad_color,
|
||||
Uint8Image* image) {
|
||||
if (!image->size()) return; // Nothing to do for empty images
|
||||
|
||||
|
|
@ -241,7 +241,7 @@ class SummaryImageOp : public OpKernel {
|
|||
}
|
||||
if (finite) {
|
||||
image->chip<0>(i) = (values.template chip<0>(i) * scale + offset)
|
||||
.template cast<uint8>();
|
||||
.template cast<uint8_t>();
|
||||
} else {
|
||||
image->chip<0>(i) = bad_color;
|
||||
}
|
||||
|
|
@ -249,7 +249,7 @@ class SummaryImageOp : public OpKernel {
|
|||
}
|
||||
|
||||
private:
|
||||
int32 max_images_;
|
||||
int32_t max_images_;
|
||||
Tensor bad_color_;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ namespace tensorflow {
|
|||
namespace {
|
||||
|
||||
static void EXPECT_SummaryMatches(const Summary& actual,
|
||||
const string& expected_str) {
|
||||
const std::string& expected_str) {
|
||||
Summary expected;
|
||||
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
|
||||
EXPECT_EQ(expected.DebugString(), actual.DebugString());
|
||||
|
|
|
|||
|
|
@ -36,21 +36,21 @@ class SummaryWriterInterface : public ResourceBase {
|
|||
|
||||
// These are called in the OpKernel::Compute methods for the summary ops.
|
||||
virtual absl::Status WriteTensor(int64_t global_step, Tensor t,
|
||||
const string& tag,
|
||||
const string& serialized_metadata) = 0;
|
||||
const std::string& tag,
|
||||
const std::string& serialized_metadata) = 0;
|
||||
|
||||
virtual absl::Status WriteScalar(int64_t global_step, Tensor t,
|
||||
const string& tag) = 0;
|
||||
const std::string& tag) = 0;
|
||||
|
||||
virtual absl::Status WriteHistogram(int64_t global_step, Tensor t,
|
||||
const string& tag) = 0;
|
||||
const std::string& tag) = 0;
|
||||
|
||||
virtual absl::Status WriteImage(int64_t global_step, Tensor t,
|
||||
const string& tag, int max_images,
|
||||
const std::string& tag, int max_images,
|
||||
Tensor bad_color) = 0;
|
||||
|
||||
virtual absl::Status WriteAudio(int64_t global_step, Tensor t,
|
||||
const string& tag, int max_outputs_,
|
||||
const std::string& tag, int max_outputs_,
|
||||
float sample_rate) = 0;
|
||||
|
||||
virtual absl::Status WriteGraph(int64_t global_step,
|
||||
|
|
|
|||
|
|
@ -40,19 +40,19 @@ class CreateSummaryFileWriterOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("logdir", &tmp));
|
||||
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()),
|
||||
errors::InvalidArgument("logdir must be a scalar"));
|
||||
const string logdir = tmp->scalar<tstring>()();
|
||||
const std::string logdir = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("max_queue", &tmp));
|
||||
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()),
|
||||
errors::InvalidArgument("max_queue must be a scalar"));
|
||||
const int32_t max_queue = tmp->scalar<int32>()();
|
||||
const int32_t max_queue = tmp->scalar<int32_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("flush_millis", &tmp));
|
||||
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()),
|
||||
errors::InvalidArgument("flush_millis must be a scalar"));
|
||||
const int32_t flush_millis = tmp->scalar<int32>()();
|
||||
const int32_t flush_millis = tmp->scalar<int32_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("filename_suffix", &tmp));
|
||||
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(tmp->shape()),
|
||||
errors::InvalidArgument("filename_suffix must be a scalar"));
|
||||
const string filename_suffix = tmp->scalar<tstring>()();
|
||||
const std::string filename_suffix = tmp->scalar<tstring>()();
|
||||
|
||||
core::RefCountPtr<SummaryWriterInterface> s;
|
||||
OP_REQUIRES_OK(ctx, LookupOrCreateResource<SummaryWriterInterface>(
|
||||
|
|
@ -75,13 +75,13 @@ class CreateSummaryDbWriterOp : public OpKernel {
|
|||
void Compute(OpKernelContext* ctx) override {
|
||||
const Tensor* tmp;
|
||||
OP_REQUIRES_OK(ctx, ctx->input("db_uri", &tmp));
|
||||
const string db_uri = tmp->scalar<tstring>()();
|
||||
const std::string db_uri = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("experiment_name", &tmp));
|
||||
const string experiment_name = tmp->scalar<tstring>()();
|
||||
const std::string experiment_name = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("run_name", &tmp));
|
||||
const string run_name = tmp->scalar<tstring>()();
|
||||
const std::string run_name = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("user_name", &tmp));
|
||||
const string user_name = tmp->scalar<tstring>()();
|
||||
const std::string user_name = tmp->scalar<tstring>()();
|
||||
|
||||
core::RefCountPtr<SummaryWriterInterface> s;
|
||||
OP_REQUIRES_OK(
|
||||
|
|
@ -140,9 +140,9 @@ class WriteSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("step", &tmp));
|
||||
const int64_t step = tmp->scalar<int64_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tag", &tmp));
|
||||
const string& tag = tmp->scalar<tstring>()();
|
||||
const std::string& tag = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("summary_metadata", &tmp));
|
||||
const string& serialized_metadata = tmp->scalar<tstring>()();
|
||||
const std::string& serialized_metadata = tmp->scalar<tstring>()();
|
||||
|
||||
const Tensor* t;
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tensor", &t));
|
||||
|
|
@ -220,7 +220,7 @@ class WriteScalarSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("step", &tmp));
|
||||
const int64_t step = tmp->scalar<int64_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tag", &tmp));
|
||||
const string& tag = tmp->scalar<tstring>()();
|
||||
const std::string& tag = tmp->scalar<tstring>()();
|
||||
|
||||
const Tensor* t;
|
||||
OP_REQUIRES_OK(ctx, ctx->input("value", &t));
|
||||
|
|
@ -242,7 +242,7 @@ class WriteHistogramSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("step", &tmp));
|
||||
const int64_t step = tmp->scalar<int64_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tag", &tmp));
|
||||
const string& tag = tmp->scalar<tstring>()();
|
||||
const std::string& tag = tmp->scalar<tstring>()();
|
||||
|
||||
const Tensor* t;
|
||||
OP_REQUIRES_OK(ctx, ctx->input("values", &t));
|
||||
|
|
@ -260,7 +260,7 @@ class WriteImageSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->GetAttr("max_images", &max_images_tmp));
|
||||
OP_REQUIRES(ctx, max_images_tmp < (1LL << 31),
|
||||
errors::InvalidArgument("max_images must be < 2^31"));
|
||||
max_images_ = static_cast<int32>(max_images_tmp);
|
||||
max_images_ = static_cast<int32_t>(max_images_tmp);
|
||||
}
|
||||
|
||||
void Compute(OpKernelContext* ctx) override {
|
||||
|
|
@ -270,7 +270,7 @@ class WriteImageSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("step", &tmp));
|
||||
const int64_t step = tmp->scalar<int64_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tag", &tmp));
|
||||
const string& tag = tmp->scalar<tstring>()();
|
||||
const std::string& tag = tmp->scalar<tstring>()();
|
||||
const Tensor* bad_color;
|
||||
OP_REQUIRES_OK(ctx, ctx->input("bad_color", &bad_color));
|
||||
OP_REQUIRES(
|
||||
|
|
@ -285,7 +285,7 @@ class WriteImageSummaryOp : public OpKernel {
|
|||
}
|
||||
|
||||
private:
|
||||
int32 max_images_;
|
||||
int32_t max_images_;
|
||||
};
|
||||
REGISTER_KERNEL_BUILDER(Name("WriteImageSummary").Device(DEVICE_CPU),
|
||||
WriteImageSummaryOp);
|
||||
|
|
@ -305,7 +305,7 @@ class WriteAudioSummaryOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, ctx->input("step", &tmp));
|
||||
const int64_t step = tmp->scalar<int64_t>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("tag", &tmp));
|
||||
const string& tag = tmp->scalar<tstring>()();
|
||||
const std::string& tag = tmp->scalar<tstring>()();
|
||||
OP_REQUIRES_OK(ctx, ctx->input("sample_rate", &tmp));
|
||||
const float sample_rate = tmp->scalar<float>()();
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ namespace tensorflow {
|
|||
namespace {
|
||||
|
||||
static void EXPECT_SummaryMatches(const Summary& actual,
|
||||
const string& expected_str) {
|
||||
const std::string& expected_str) {
|
||||
Summary expected;
|
||||
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
|
||||
EXPECT_EQ(expected.DebugString(), actual.DebugString());
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ class SummaryTensorOpV2 : public OpKernel {
|
|||
|
||||
Summary s;
|
||||
Summary::Value* v = s.add_value();
|
||||
v->set_tag(string(tag.scalar<tstring>()())); // NOLINT
|
||||
v->set_tag(std::string(tag.scalar<tstring>()())); // NOLINT
|
||||
|
||||
if (tensor.dtype() == DT_STRING) {
|
||||
// tensor_util.makeNdarray doesn't work for strings in tensor_content
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ namespace tensorflow {
|
|||
namespace {
|
||||
|
||||
static void EXPECT_SummaryMatches(const Summary& actual,
|
||||
const string& expected_str) {
|
||||
const std::string& expected_str) {
|
||||
Summary expected;
|
||||
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
|
||||
EXPECT_EQ(expected.DebugString(), actual.DebugString());
|
||||
|
|
|
|||
|
|
@ -136,8 +136,9 @@ class TensorArray : public ResourceBase {
|
|||
// 'N' elements. While the underlying storage is a std::vector and
|
||||
// can hold more than MAX_INT entries, in practice we do not expect
|
||||
// users to construct this many Tensors for storage in a TensorArray.
|
||||
TensorArray(const string& key, const DataType& dtype, const Tensor& handle,
|
||||
int32_t N, const PartialTensorShape& element_shape,
|
||||
TensorArray(const std::string& key, const DataType& dtype,
|
||||
const Tensor& handle, int32_t N,
|
||||
const PartialTensorShape& element_shape,
|
||||
bool identical_element_shapes, bool dynamic_size,
|
||||
bool multiple_writes_aggregate, bool is_grad, int32_t marked_size,
|
||||
bool clear_after_read)
|
||||
|
|
@ -193,7 +194,7 @@ class TensorArray : public ResourceBase {
|
|||
|
||||
template <typename Device, typename T>
|
||||
absl::Status WriteOrAggregateMany(OpKernelContext* ctx,
|
||||
const std::vector<int32>& indices,
|
||||
const std::vector<int32_t>& indices,
|
||||
std::vector<Tensor>* values) {
|
||||
mutex_lock l(mu_);
|
||||
int32_t i = 0;
|
||||
|
|
@ -228,7 +229,8 @@ class TensorArray : public ResourceBase {
|
|||
}
|
||||
|
||||
template <typename Device, typename T>
|
||||
absl::Status ReadMany(OpKernelContext* ctx, const std::vector<int32>& indices,
|
||||
absl::Status ReadMany(OpKernelContext* ctx,
|
||||
const std::vector<int32_t>& indices,
|
||||
std::vector<Tensor>* values) {
|
||||
mutex_lock l(mu_);
|
||||
values->clear();
|
||||
|
|
@ -260,7 +262,7 @@ class TensorArray : public ResourceBase {
|
|||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
string DebugString() const override {
|
||||
std::string DebugString() const override {
|
||||
mutex_lock l(mu_);
|
||||
CHECK(!closed_);
|
||||
return absl::StrCat("TensorArray[", tensors_.size(), "]");
|
||||
|
|
@ -272,7 +274,7 @@ class TensorArray : public ResourceBase {
|
|||
}
|
||||
|
||||
// Return the size of the TensorArray.
|
||||
absl::Status Size(int32* size) {
|
||||
absl::Status Size(int32_t* size) {
|
||||
mutex_lock l(mu_);
|
||||
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
|
||||
*size = tensors_.size();
|
||||
|
|
@ -290,7 +292,7 @@ class TensorArray : public ResourceBase {
|
|||
}
|
||||
|
||||
// Return the marked size of the TensorArray.
|
||||
absl::Status MarkedSize(int32* size) {
|
||||
absl::Status MarkedSize(int32_t* size) {
|
||||
mutex_lock l(mu_);
|
||||
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
|
||||
*size = marked_size_;
|
||||
|
|
@ -298,7 +300,7 @@ class TensorArray : public ResourceBase {
|
|||
}
|
||||
|
||||
// Return the size that should be used by pack or concat op.
|
||||
absl::Status PackOrConcatSize(int32* size) {
|
||||
absl::Status PackOrConcatSize(int32_t* size) {
|
||||
mutex_lock l(mu_);
|
||||
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
|
||||
*size = is_grad_ ? marked_size_ : tensors_.size();
|
||||
|
|
@ -372,7 +374,7 @@ class TensorArray : public ResourceBase {
|
|||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
const string key_;
|
||||
const std::string key_;
|
||||
|
||||
const DataType dtype_;
|
||||
Tensor handle_;
|
||||
|
|
@ -401,7 +403,7 @@ class TensorArray : public ResourceBase {
|
|||
|
||||
// The size of the TensorArray after a (legacy) unpack or split is performed.
|
||||
// -1 if there has been no unpack or split performed on the TensorArray.
|
||||
int32 marked_size_;
|
||||
int32_t marked_size_;
|
||||
|
||||
// The shape of each element in the TensorArray, may be partially known or not
|
||||
// known at all.
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ typedef Eigen::GpuDevice GPUDevice;
|
|||
|
||||
namespace tensorflow {
|
||||
|
||||
absl::Status GetHandle(OpKernelContext* ctx, string* container,
|
||||
string* ta_handle) {
|
||||
absl::Status GetHandle(OpKernelContext* ctx, std::string* container,
|
||||
std::string* ta_handle) {
|
||||
{
|
||||
Tensor tensor;
|
||||
// Assuming that handle is the input at index 0.
|
||||
|
|
@ -80,8 +80,8 @@ absl::Status GetHandle(OpKernelContext* ctx, string* container,
|
|||
}
|
||||
|
||||
absl::Status GetTensorArray(OpKernelContext* ctx, TensorArray** tensor_array) {
|
||||
string container;
|
||||
string ta_handle;
|
||||
std::string container;
|
||||
std::string ta_handle;
|
||||
if (ctx->input_dtype(0) != DT_RESOURCE) {
|
||||
TF_RETURN_IF_ERROR(GetHandle(ctx, &container, &ta_handle));
|
||||
ResourceMgr* rm = ctx->resource_manager();
|
||||
|
|
@ -197,13 +197,13 @@ class TensorArrayOp : public TensorArrayCreationOp {
|
|||
"TensorArray size must be scalar, but had shape: ",
|
||||
tensor_size->shape().DebugString());
|
||||
}
|
||||
const int32_t size = tensor_size->scalar<int32>()();
|
||||
const int32_t size = tensor_size->scalar<int32_t>()();
|
||||
if (size < 0) {
|
||||
return errors::InvalidArgument("Size should be >= 0.");
|
||||
}
|
||||
|
||||
auto handle = tensor_array_output_handle->flat<tstring>();
|
||||
string unique_tensor_array_name =
|
||||
std::string unique_tensor_array_name =
|
||||
absl::StrCat(tensor_array_name_, "_",
|
||||
TensorArray::tensor_array_counter.fetch_add(1));
|
||||
handle(0) = "_tensor_arrays";
|
||||
|
|
@ -230,7 +230,7 @@ class TensorArrayOp : public TensorArrayCreationOp {
|
|||
bool identical_element_shapes_;
|
||||
bool dynamic_size_;
|
||||
bool clear_after_read_;
|
||||
string tensor_array_name_; // The name used to create the TensorArray.
|
||||
std::string tensor_array_name_; // The name used to create the TensorArray.
|
||||
|
||||
TensorArrayOp(const TensorArrayOp&) = delete;
|
||||
void operator=(const TensorArrayOp&) = delete;
|
||||
|
|
@ -314,8 +314,8 @@ class TensorArrayGradOp : public TensorArrayCreationOp {
|
|||
absl::Status CreateTensorArray(OpKernelContext* ctx, ResourceMgr* rm,
|
||||
Tensor* tensor_array_output_handle,
|
||||
TensorArray** output_tensor_array) override {
|
||||
string container;
|
||||
string tensor_array_name;
|
||||
std::string container;
|
||||
std::string tensor_array_name;
|
||||
if (ctx->input_dtype(0) != DT_RESOURCE) {
|
||||
TF_RETURN_IF_ERROR(GetHandle(ctx, &container, &tensor_array_name));
|
||||
if (container != "_tensor_arrays") {
|
||||
|
|
@ -331,8 +331,8 @@ class TensorArrayGradOp : public TensorArrayCreationOp {
|
|||
return errors::InvalidArgument("Wrong input container. ",
|
||||
resource.name());
|
||||
}
|
||||
tensor_array_name =
|
||||
string(absl::string_view(resource.name()).substr(container.size()));
|
||||
tensor_array_name = std::string(
|
||||
absl::string_view(resource.name()).substr(container.size()));
|
||||
}
|
||||
|
||||
auto output_handle = tensor_array_output_handle->flat<tstring>();
|
||||
|
|
@ -407,7 +407,7 @@ class TensorArrayGradOp : public TensorArrayCreationOp {
|
|||
// The gradient source for creating the given
|
||||
// gradient TensorArray. This should be unique to each gradients
|
||||
// call. Typical values look like "gradients", "gradients_1", ...
|
||||
string source_;
|
||||
std::string source_;
|
||||
|
||||
TensorArrayGradOp(const TensorArrayGradOp&) = delete;
|
||||
void operator=(const TensorArrayGradOp&) = delete;
|
||||
|
|
@ -490,7 +490,7 @@ class TensorArrayWriteOp : public OpKernel {
|
|||
TensorArray* tensor_array = nullptr;
|
||||
OP_REQUIRES_OK(ctx, GetTensorArray(ctx, &tensor_array));
|
||||
core::ScopedUnref unref(tensor_array);
|
||||
const int32_t index = tensor_index->scalar<int32>()();
|
||||
const int32_t index = tensor_index->scalar<int32_t>()();
|
||||
OP_REQUIRES(
|
||||
ctx, tensor_value->dtype() == tensor_array->ElemType(),
|
||||
errors::InvalidArgument("TensorArray dtype is ",
|
||||
|
|
@ -571,7 +571,7 @@ class TensorArrayReadOp : public OpKernel {
|
|||
OP_REQUIRES_OK(ctx, GetTensorArray(ctx, &tensor_array));
|
||||
core::ScopedUnref unref(tensor_array);
|
||||
|
||||
const int32_t index = tensor_index->scalar<int32>()();
|
||||
const int32_t index = tensor_index->scalar<int32_t>()();
|
||||
OP_REQUIRES(
|
||||
ctx, dtype_ == tensor_array->ElemType(),
|
||||
errors::InvalidArgument(
|
||||
|
|
@ -669,7 +669,7 @@ class TensorArrayPackOrGatherOp : public OpKernel {
|
|||
|
||||
int32_t num_indices;
|
||||
std::vector<Tensor> values;
|
||||
std::vector<int32> indices;
|
||||
std::vector<int32_t> indices;
|
||||
if (LEGACY_PACK) {
|
||||
OP_REQUIRES_OK(ctx, tensor_array->PackOrConcatSize(&num_indices));
|
||||
indices.resize(num_indices);
|
||||
|
|
@ -681,7 +681,7 @@ class TensorArrayPackOrGatherOp : public OpKernel {
|
|||
errors::InvalidArgument(
|
||||
"Expected indices to be a vector, but received shape: ",
|
||||
tensor_indices->shape().DebugString()));
|
||||
const auto indices_t = tensor_indices->vec<int32>();
|
||||
const auto indices_t = tensor_indices->vec<int32_t>();
|
||||
num_indices = tensor_indices->NumElements();
|
||||
indices.resize(num_indices);
|
||||
std::copy(indices_t.data(), indices_t.data() + num_indices,
|
||||
|
|
@ -911,7 +911,7 @@ class TensorArrayConcatOp : public OpKernel {
|
|||
|
||||
// Read all the Tensors into a vector to keep track of their memory.
|
||||
std::vector<Tensor> values;
|
||||
std::vector<int32> indices(array_size);
|
||||
std::vector<int32_t> indices(array_size);
|
||||
std::iota(indices.begin(), indices.end(), 0);
|
||||
absl::Status s = tensor_array->ReadMany<Device, T>(ctx, indices, &values);
|
||||
OP_REQUIRES_OK(ctx, s);
|
||||
|
|
@ -1110,7 +1110,7 @@ class TensorArrayUnpackOrScatterOp : public OpKernel {
|
|||
|
||||
OP_REQUIRES(ctx,
|
||||
FastBoundsCheck(element_shape.dim_size(0),
|
||||
std::numeric_limits<int32>::max()),
|
||||
std::numeric_limits<int32_t>::max()),
|
||||
errors::InvalidArgument("tensor dim0 too large to unpack"));
|
||||
|
||||
OP_REQUIRES(
|
||||
|
|
@ -1128,7 +1128,7 @@ class TensorArrayUnpackOrScatterOp : public OpKernel {
|
|||
|
||||
int32_t max_index;
|
||||
int32_t num_values;
|
||||
std::vector<int32> write_indices;
|
||||
std::vector<int32_t> write_indices;
|
||||
if (LEGACY_UNPACK) {
|
||||
num_values = element_shape.dim_size(0);
|
||||
max_index = num_values - 1;
|
||||
|
|
@ -1147,7 +1147,7 @@ class TensorArrayUnpackOrScatterOp : public OpKernel {
|
|||
"Expected len(indices) == values.shape[0], but saw: ",
|
||||
tensor_indices->NumElements(), " vs. ",
|
||||
element_shape.dim_size(0)));
|
||||
const auto indices_t = tensor_indices->vec<int32>();
|
||||
const auto indices_t = tensor_indices->vec<int32_t>();
|
||||
num_values = tensor_indices->NumElements();
|
||||
max_index = (num_values == 0)
|
||||
? -1
|
||||
|
|
@ -1163,7 +1163,7 @@ class TensorArrayUnpackOrScatterOp : public OpKernel {
|
|||
|
||||
// If dynamic size, we may have to resize the TensorArray to fit.
|
||||
if (dynamic_size && array_size < max_index + 1) {
|
||||
array_size = static_cast<int32>(max_index + 1);
|
||||
array_size = static_cast<int32_t>(max_index + 1);
|
||||
}
|
||||
|
||||
if (LEGACY_UNPACK) {
|
||||
|
|
@ -1310,11 +1310,11 @@ class TensorArraySplitOp : public OpKernel {
|
|||
tensor_lengths->shape().DebugString()));
|
||||
OP_REQUIRES(ctx,
|
||||
FastBoundsCheck(tensor_lengths->NumElements(),
|
||||
std::numeric_limits<int32>::max()),
|
||||
std::numeric_limits<int32_t>::max()),
|
||||
errors::InvalidArgument(
|
||||
"Expected lengths to have < max int32 entries"));
|
||||
|
||||
int32_t num_tensors = static_cast<int32>(tensor_lengths->NumElements());
|
||||
int32_t num_tensors = static_cast<int32_t>(tensor_lengths->NumElements());
|
||||
auto tensor_lengths_t = tensor_lengths->vec<int64_t>();
|
||||
std::vector<int64_t> cumulative_lengths;
|
||||
cumulative_lengths.reserve(num_tensors);
|
||||
|
|
@ -1402,7 +1402,7 @@ class TensorArraySplitOp : public OpKernel {
|
|||
// Record the concat size of the TensorArray.
|
||||
OP_REQUIRES_OK(ctx, tensor_array->SetMarkedSize(array_size));
|
||||
|
||||
std::vector<int32> indices(array_size);
|
||||
std::vector<int32_t> indices(array_size);
|
||||
std::iota(indices.begin(), indices.end(), 0);
|
||||
|
||||
absl::Status s = tensor_array->WriteOrAggregateMany<Device, T>(
|
||||
|
|
@ -1467,7 +1467,7 @@ class TensorArraySizeOp : public OpKernel {
|
|||
core::ScopedUnref unref(tensor_array);
|
||||
Tensor* output = nullptr;
|
||||
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));
|
||||
OP_REQUIRES_OK(ctx, tensor_array->Size(&(output->scalar<int32>()())));
|
||||
OP_REQUIRES_OK(ctx, tensor_array->Size(&(output->scalar<int32_t>()())));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ void TensorCord::Encode(VariantTensorData* data) const {
|
|||
}
|
||||
|
||||
bool TensorCord::Decode(VariantTensorData data) {
|
||||
auto* str = new string(std::move(data.metadata_string()));
|
||||
auto* str = new std::string(std::move(data.metadata_string()));
|
||||
Cleanup();
|
||||
chunks_.push_back(new CordRep(absl::string_view(*str), &StringReleaser, str));
|
||||
return true;
|
||||
|
|
@ -57,7 +57,7 @@ void TensorCord::TensorBufReleaser(void* tensor_buffer) {
|
|||
}
|
||||
|
||||
void TensorCord::StringReleaser(void* str_ptr) {
|
||||
delete static_cast<string*>(str_ptr);
|
||||
delete static_cast<std::string*>(str_ptr);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
|
@ -85,14 +85,15 @@ struct ResizeUninitializedTraits<
|
|||
};
|
||||
|
||||
// Resize string `s` to `new_size`, leaving the data uninitialized.
|
||||
static inline void STLStringResizeUninitialized(string* s, size_t new_size) {
|
||||
ResizeUninitializedTraits<string>::Resize(s, new_size);
|
||||
static inline void STLStringResizeUninitialized(std::string* s,
|
||||
size_t new_size) {
|
||||
ResizeUninitializedTraits<std::string>::Resize(s, new_size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TensorCord::operator string() const {
|
||||
string out;
|
||||
TensorCord::operator std::string() const {
|
||||
std::string out;
|
||||
STLStringResizeUninitialized(&out, size());
|
||||
char* data = const_cast<char*>(out.data());
|
||||
for (auto* rep : chunks_) {
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ class TensorCord {
|
|||
bool empty() const { return size() == 0; }
|
||||
|
||||
// NOTE: This performs an expensive copy of the underlying data.
|
||||
explicit operator string() const;
|
||||
explicit operator std::string() const;
|
||||
|
||||
class ChunkIterator {
|
||||
public:
|
||||
|
|
@ -188,9 +188,9 @@ class TensorCord {
|
|||
return ChunkIterator(this, chunks_.size());
|
||||
}
|
||||
|
||||
static string TypeName() { return kTypeName; }
|
||||
static std::string TypeName() { return kTypeName; }
|
||||
|
||||
string DebugString() const {
|
||||
std::string DebugString() const {
|
||||
return absl::StrCat("<TensorCord size=", size(), ">");
|
||||
}
|
||||
|
||||
|
|
@ -217,7 +217,7 @@ class TensorCord {
|
|||
if (is_inline_) {
|
||||
return absl::string_view(
|
||||
rep_.internal.data() + 1,
|
||||
*reinterpret_cast<const uint8*>(rep_.internal.data()));
|
||||
*reinterpret_cast<const uint8_t*>(rep_.internal.data()));
|
||||
} else {
|
||||
return rep_.external.view;
|
||||
}
|
||||
|
|
@ -256,7 +256,7 @@ class TensorCord {
|
|||
|
||||
explicit _rep_union(absl::string_view view) {
|
||||
DCHECK_LT(view.size(), kMaxInlineSize);
|
||||
*reinterpret_cast<uint8*>(internal.data()) = view.size();
|
||||
*reinterpret_cast<uint8_t*>(internal.data()) = view.size();
|
||||
std::memcpy(static_cast<char*>(internal.data() + 1), view.data(),
|
||||
view.size());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ TEST(TensorCordTest, Copy) {
|
|||
auto cleaner = [&cleaned]() { ++cleaned; };
|
||||
auto thunk = CreateThunkFor(cleaner);
|
||||
TensorCord tc_copy;
|
||||
string a = "abc";
|
||||
std::string a = "abc";
|
||||
{
|
||||
TensorCord tc(a, thunk, &cleaner);
|
||||
tc_copy = tc;
|
||||
|
|
@ -104,7 +104,7 @@ TEST(TensorCordTest, AppendCord) {
|
|||
TensorCord tc_0("abc", thunk_0, &cleaner_0);
|
||||
TensorCord tc_1("cba", thunk_1, &cleaner_1);
|
||||
tc_0.Append(tc_1);
|
||||
EXPECT_EQ(string(tc_0), "abccba");
|
||||
EXPECT_EQ(std::string(tc_0), "abccba");
|
||||
auto it = tc_0.chunk_begin();
|
||||
EXPECT_EQ(*it, "abc");
|
||||
++it;
|
||||
|
|
@ -128,7 +128,7 @@ TEST(TensorCordTest, AppendView) {
|
|||
auto thunk_1 = CreateThunkFor(cleaner_1);
|
||||
TensorCord tc_0("abc", thunk_0, &cleaner_0);
|
||||
tc_0.Append("cba", thunk_1, &cleaner_1);
|
||||
EXPECT_EQ(string(tc_0), "abccba");
|
||||
EXPECT_EQ(std::string(tc_0), "abccba");
|
||||
auto it = tc_0.chunk_begin();
|
||||
EXPECT_EQ(*it, "abc");
|
||||
++it;
|
||||
|
|
@ -147,7 +147,7 @@ TEST(TensorCordTest, Move) {
|
|||
auto cleaner = [&cleaned]() { ++cleaned; };
|
||||
auto thunk = CreateThunkFor(cleaner);
|
||||
TensorCord tc_copy;
|
||||
string a = "abc";
|
||||
std::string a = "abc";
|
||||
{
|
||||
TensorCord tc(a, thunk, &cleaner);
|
||||
tc_copy = std::move(tc);
|
||||
|
|
@ -167,7 +167,7 @@ TEST(TensorCordTest, CopyConstructor) {
|
|||
int cleaned = 0;
|
||||
auto cleaner = [&cleaned]() { ++cleaned; };
|
||||
auto thunk = CreateThunkFor(cleaner);
|
||||
string a = "abc";
|
||||
std::string a = "abc";
|
||||
TensorCord tc(a, thunk, &cleaner);
|
||||
TensorCord tc_copy(tc);
|
||||
EXPECT_EQ(tc.size(), 3);
|
||||
|
|
@ -187,7 +187,7 @@ TEST(TensorCordTest, MoveConstructor) {
|
|||
int cleaned = 0;
|
||||
auto cleaner = [&cleaned]() { ++cleaned; };
|
||||
auto thunk = CreateThunkFor(cleaner);
|
||||
string a = "abc";
|
||||
std::string a = "abc";
|
||||
TensorCord tc(a, thunk, &cleaner);
|
||||
TensorCord tc_copy(std::move(tc));
|
||||
EXPECT_EQ(tc_copy.size(), 3);
|
||||
|
|
@ -236,7 +236,7 @@ void TensorCordFromAbslCordBenchmark(benchmark::State& state, int num_elem,
|
|||
int string_size) {
|
||||
std::vector<absl::Cord> cords(num_elem);
|
||||
for (int i = 0; i < num_elem; ++i) {
|
||||
string s(string_size, 'a');
|
||||
std::string s(string_size, 'a');
|
||||
cords[i] = s;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,16 +35,16 @@ void TensorList::Encode(VariantTensorData* data) const {
|
|||
invalid_indices.push_back(i);
|
||||
}
|
||||
}
|
||||
string metadata;
|
||||
std::string metadata;
|
||||
// TODO(b/118838800): Add a proto for storing the metadata.
|
||||
// Metadata format:
|
||||
// <num_invalid_tensors><invalid_indices><element_dtype><element_shape_proto>
|
||||
core::PutVarint64(&metadata, static_cast<uint64>(invalid_indices.size()));
|
||||
core::PutVarint64(&metadata, static_cast<uint64_t>(invalid_indices.size()));
|
||||
for (size_t i : invalid_indices) {
|
||||
core::PutVarint64(&metadata, static_cast<uint64>(i));
|
||||
core::PutVarint64(&metadata, static_cast<uint64_t>(i));
|
||||
}
|
||||
core::PutVarint64(&metadata, static_cast<uint64>(element_dtype));
|
||||
core::PutVarint64(&metadata, static_cast<uint64>(max_num_elements));
|
||||
core::PutVarint64(&metadata, static_cast<uint64_t>(element_dtype));
|
||||
core::PutVarint64(&metadata, static_cast<uint64_t>(max_num_elements));
|
||||
TensorShapeProto element_shape_proto;
|
||||
element_shape.AsProto(&element_shape_proto);
|
||||
element_shape_proto.AppendToString(&metadata);
|
||||
|
|
@ -55,9 +55,9 @@ bool TensorList::Decode(const VariantTensorData& data) {
|
|||
// TODO(srbs): Change the signature to Decode(VariantTensorData data) so
|
||||
// that we do not have to copy each tensor individually below. This would
|
||||
// require changing VariantTensorData::tensors() as well.
|
||||
string metadata;
|
||||
std::string metadata;
|
||||
data.get_metadata(&metadata);
|
||||
uint64 scratch;
|
||||
uint64_t scratch;
|
||||
absl::string_view iter(metadata);
|
||||
std::vector<size_t> invalid_indices;
|
||||
core::GetVarint64(&iter, &scratch);
|
||||
|
|
@ -91,7 +91,7 @@ bool TensorList::Decode(const VariantTensorData& data) {
|
|||
core::GetVarint64(&iter, &scratch);
|
||||
max_num_elements = static_cast<int>(scratch);
|
||||
TensorShapeProto element_shape_proto;
|
||||
element_shape_proto.ParseFromString(string(iter.data(), iter.size()));
|
||||
element_shape_proto.ParseFromString(iter);
|
||||
element_shape = PartialTensorShape(element_shape_proto);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,14 +105,14 @@ class TensorList {
|
|||
|
||||
static const char kTypeName[];
|
||||
|
||||
string TypeName() const { return kTypeName; }
|
||||
std::string TypeName() const { return kTypeName; }
|
||||
|
||||
void Encode(VariantTensorData* data) const;
|
||||
|
||||
bool Decode(const VariantTensorData& data);
|
||||
|
||||
// TODO(apassos) fill this out
|
||||
string DebugString() const { return "TensorList"; }
|
||||
std::string DebugString() const { return "TensorList"; }
|
||||
|
||||
PartialTensorShape element_shape;
|
||||
|
||||
|
|
|
|||
|
|
@ -93,14 +93,14 @@ class TensorMap {
|
|||
|
||||
static const char kTypeName[];
|
||||
|
||||
string TypeName() const { return kTypeName; }
|
||||
std::string TypeName() const { return kTypeName; }
|
||||
|
||||
void Encode(VariantTensorData* data) const;
|
||||
|
||||
bool Decode(const VariantTensorData& data);
|
||||
|
||||
// TODO(apassos) fill this out
|
||||
string DebugString() const { return "TensorMap"; }
|
||||
std::string DebugString() const { return "TensorMap"; }
|
||||
|
||||
// Access to the underlying tensor container.
|
||||
absl::flat_hash_map<TensorKey, Tensor>& tensors() {
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ TEST(TensorMapTest, Insert) {
|
|||
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it =
|
||||
tm.tensors().begin();
|
||||
EXPECT_EQ(map_it->first, k);
|
||||
test::ExpectTensorEqual<int32>(map_it->second, v);
|
||||
test::ExpectTensorEqual<int32_t>(map_it->second, v);
|
||||
map_it++;
|
||||
EXPECT_EQ(map_it, tm.tensors().end());
|
||||
}
|
||||
|
|
@ -68,7 +68,7 @@ TEST(TensorMapTest, Lookup) {
|
|||
Tensor f = map_it->second;
|
||||
|
||||
EXPECT_EQ(map_it->first, k);
|
||||
test::ExpectTensorEqual<int32>(f, v);
|
||||
test::ExpectTensorEqual<int32_t>(f, v);
|
||||
}
|
||||
|
||||
TEST(TensorMapTest, Erase) {
|
||||
|
|
@ -91,7 +91,7 @@ TEST(TensorMapTest, SameKeyInsert) {
|
|||
EXPECT_EQ(b2, false);
|
||||
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
|
||||
EXPECT_EQ(map_it->first, k);
|
||||
test::ExpectTensorEqual<int32>(map_it->second, v1);
|
||||
test::ExpectTensorEqual<int32_t>(map_it->second, v1);
|
||||
}
|
||||
|
||||
TEST(TensorMapTest, Replace) {
|
||||
|
|
@ -102,7 +102,7 @@ TEST(TensorMapTest, Replace) {
|
|||
tm[k] = v2;
|
||||
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
|
||||
EXPECT_EQ(map_it->first, k);
|
||||
test::ExpectTensorEqual<int32>(map_it->second, v2);
|
||||
test::ExpectTensorEqual<int32_t>(map_it->second, v2);
|
||||
}
|
||||
|
||||
TEST(TensorMapTest, ListKeys) {
|
||||
|
|
@ -153,7 +153,7 @@ TEST(TensorMapTest, Copy) {
|
|||
EXPECT_NE(tm.find(k), tm.tensors().end());
|
||||
EXPECT_NE(tmc.find(k), tmc.tensors().end());
|
||||
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
|
||||
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
|
||||
test::ExpectTensorEqual<int32_t>(tm.find(k)->second, tmc.find(k)->second);
|
||||
}
|
||||
|
||||
TEST(TensorMapTest, EncodeDecode) {
|
||||
|
|
@ -169,7 +169,7 @@ TEST(TensorMapTest, EncodeDecode) {
|
|||
EXPECT_NE(tm.find(k), tm.tensors().end());
|
||||
EXPECT_NE(tmc.find(k), tmc.tensors().end());
|
||||
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
|
||||
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
|
||||
test::ExpectTensorEqual<int32_t>(tm.find(k)->second, tmc.find(k)->second);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ template <typename Device, typename T>
|
|||
struct LaunchTensorToHashBucket {
|
||||
void operator()(OpKernelContext* c, const int64_t num_buckets, const T* input,
|
||||
const int num_elems, int64_t* output) {
|
||||
string format = "%";
|
||||
std::string format = "%";
|
||||
switch (DataTypeToEnum<T>::value) {
|
||||
case DT_INT8:
|
||||
case DT_INT16:
|
||||
|
|
@ -55,9 +55,9 @@ struct LaunchTensorToHashBucket {
|
|||
}
|
||||
|
||||
for (int i = 0; i < num_elems; ++i) {
|
||||
string input_str = strings::Printf(format.c_str(), input[i]);
|
||||
const uint64 input_hash = Fingerprint64(input_str);
|
||||
const uint64 bucket_id = input_hash % num_buckets;
|
||||
std::string input_str = strings::Printf(format.c_str(), input[i]);
|
||||
const uint64_t input_hash = Fingerprint64(input_str);
|
||||
const uint64_t bucket_id = input_hash % num_buckets;
|
||||
// The number of buckets is always in the positive range of int64 so is
|
||||
// the resulting bucket_id. Casting the bucket_id from uint64 to int64 is
|
||||
// safe.
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ namespace tensorflow {
|
|||
|
||||
class TextLineReader : public ReaderBase {
|
||||
public:
|
||||
TextLineReader(const string& node_name, int skip_header_lines, Env* env)
|
||||
TextLineReader(const std::string& node_name, int skip_header_lines, Env* env)
|
||||
: ReaderBase(absl::StrCat("TextLineReader '", node_name, "'")),
|
||||
skip_header_lines_(skip_header_lines),
|
||||
env_(env),
|
||||
|
|
@ -41,7 +41,7 @@ class TextLineReader : public ReaderBase {
|
|||
|
||||
input_buffer_.reset(new io::InputBuffer(file_.get(), kBufferSize));
|
||||
for (; line_number_ < skip_header_lines_; ++line_number_) {
|
||||
string line_contents;
|
||||
std::string line_contents;
|
||||
absl::Status status = input_buffer_->ReadLine(&line_contents);
|
||||
if (absl::IsOutOfRange(status)) {
|
||||
// We ignore an end of file error when skipping header lines.
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ namespace tensorflow {
|
|||
|
||||
class TFRecordReader : public ReaderBase {
|
||||
public:
|
||||
TFRecordReader(const string& node_name, const string& compression_type,
|
||||
Env* env)
|
||||
TFRecordReader(const std::string& node_name,
|
||||
const std::string& compression_type, Env* env)
|
||||
: ReaderBase(absl::StrCat("TFRecordReader '", node_name, "'")),
|
||||
env_(env),
|
||||
offset_(0),
|
||||
|
|
@ -76,10 +76,10 @@ class TFRecordReader : public ReaderBase {
|
|||
|
||||
private:
|
||||
Env* const env_;
|
||||
uint64 offset_;
|
||||
uint64_t offset_;
|
||||
std::unique_ptr<RandomAccessFile> file_;
|
||||
std::unique_ptr<io::RecordReader> reader_;
|
||||
string compression_type_ = "";
|
||||
std::string compression_type_ = "";
|
||||
};
|
||||
|
||||
class TFRecordReaderOp : public ReaderOpKernel {
|
||||
|
|
@ -88,7 +88,7 @@ class TFRecordReaderOp : public ReaderOpKernel {
|
|||
: ReaderOpKernel(context) {
|
||||
Env* env = context->env();
|
||||
|
||||
string compression_type;
|
||||
std::string compression_type;
|
||||
OP_REQUIRES_OK(context,
|
||||
context->GetAttr("compression_type", &compression_type));
|
||||
|
||||
|
|
|
|||
|
|
@ -585,23 +585,23 @@ TF_CALL_complex128(HANDLE_TYPE_NAME_GPU);
|
|||
REGISTER_KERNEL_BUILDER(Name("Tile")
|
||||
.Device(DEVICE_CPU)
|
||||
.HostMemory("multiples")
|
||||
.TypeConstraint<int32>("Tmultiples"),
|
||||
TileOp<CPUDevice, int32>);
|
||||
.TypeConstraint<int32_t>("Tmultiples"),
|
||||
TileOp<CPUDevice, int32_t>);
|
||||
REGISTER_KERNEL_BUILDER(Name("Tile")
|
||||
.Device(DEVICE_CPU)
|
||||
.HostMemory("multiples")
|
||||
.TypeConstraint<int64_t>("Tmultiples"),
|
||||
TileOp<CPUDevice, int64>);
|
||||
TileOp<CPUDevice, int64_t>);
|
||||
REGISTER_KERNEL_BUILDER(Name("TileGrad")
|
||||
.Device(DEVICE_CPU)
|
||||
.HostMemory("multiples")
|
||||
.TypeConstraint<int32>("Tmultiples"),
|
||||
TileGradientOp<CPUDevice, int32>);
|
||||
.TypeConstraint<int32_t>("Tmultiples"),
|
||||
TileGradientOp<CPUDevice, int32_t>);
|
||||
REGISTER_KERNEL_BUILDER(Name("TileGrad")
|
||||
.Device(DEVICE_CPU)
|
||||
.HostMemory("multiples")
|
||||
.TypeConstraint<int64_t>("Tmultiples"),
|
||||
TileGradientOp<CPUDevice, int64>);
|
||||
TileGradientOp<CPUDevice, int64_t>);
|
||||
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#define REGISTER_GPU_TILE(type) \
|
||||
|
|
|
|||
|
|
@ -244,9 +244,10 @@ struct TopKFunctor<CPUDevice, T, Tidx> {
|
|||
const double sort_cost = (k == num_cols) ? base_cost : 4 * base_cost;
|
||||
const double copy_cost = 2 * k * Eigen::TensorOpCost::AddCost<T>();
|
||||
const double total_cost = sort_cost + copy_cost;
|
||||
const int64_t final_cost = (total_cost >= static_cast<double>(kint64max))
|
||||
? kint64max
|
||||
: static_cast<int64_t>(total_cost);
|
||||
const int64_t final_cost =
|
||||
(total_cost >= static_cast<double>(std::numeric_limits<int64_t>::max()))
|
||||
? std::numeric_limits<int64_t>::max()
|
||||
: static_cast<int64_t>(total_cost);
|
||||
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
|
||||
Shard(worker_threads.num_threads, worker_threads.workers, num_rows,
|
||||
final_cost, SortIndices);
|
||||
|
|
|
|||
|
|
@ -1566,9 +1566,9 @@ class SparseApplyProximalGradientDescentOp : public OpKernel {
|
|||
.TypeConstraint<Tindices>("Tindices"), \
|
||||
SparseApplyProximalGradientDescentOp<T, Tindices>);
|
||||
|
||||
REGISTER_KERNELS(float, int32);
|
||||
REGISTER_KERNELS(float, int32_t);
|
||||
REGISTER_KERNELS(float, int64_t);
|
||||
REGISTER_KERNELS(double, int32);
|
||||
REGISTER_KERNELS(double, int32_t);
|
||||
REGISTER_KERNELS(double, int64_t);
|
||||
#undef REGISTER_KERNELS
|
||||
|
||||
|
|
@ -2252,9 +2252,9 @@ class SparseApplyProximalAdagradOp : public OpKernel {
|
|||
.TypeConstraint<Tindices>("Tindices"), \
|
||||
SparseApplyProximalAdagradOp<D##Device, T, Tindices>);
|
||||
|
||||
REGISTER_KERNELS(CPU, float, int32);
|
||||
REGISTER_KERNELS(CPU, float, int32_t);
|
||||
REGISTER_KERNELS(CPU, float, int64_t);
|
||||
REGISTER_KERNELS(CPU, double, int32);
|
||||
REGISTER_KERNELS(CPU, double, int32_t);
|
||||
REGISTER_KERNELS(CPU, double, int64_t);
|
||||
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
|
|
@ -2582,9 +2582,9 @@ class SparseApplyAdagradDAOp : public OpKernel {
|
|||
.TypeConstraint<Tindices>("Tindices"), \
|
||||
SparseApplyAdagradDAOp<T, Tindices>);
|
||||
|
||||
REGISTER_KERNELS(float, int32);
|
||||
REGISTER_KERNELS(float, int32_t);
|
||||
REGISTER_KERNELS(float, int64_t);
|
||||
REGISTER_KERNELS(double, int32);
|
||||
REGISTER_KERNELS(double, int32_t);
|
||||
REGISTER_KERNELS(double, int64_t);
|
||||
#undef REGISTER_KERNELS
|
||||
|
||||
|
|
@ -4465,15 +4465,15 @@ class SparseApplyCenteredRMSPropOp : public OpKernel {
|
|||
.TypeConstraint<Tindices>("Tindices"), \
|
||||
SparseApplyCenteredRMSPropOp<T, Tindices>);
|
||||
|
||||
REGISTER_KERNELS(Eigen::half, int32);
|
||||
REGISTER_KERNELS(Eigen::half, int32_t);
|
||||
REGISTER_KERNELS(Eigen::half, int64_t);
|
||||
REGISTER_KERNELS(float, int32);
|
||||
REGISTER_KERNELS(float, int32_t);
|
||||
REGISTER_KERNELS(float, int64_t);
|
||||
REGISTER_KERNELS(double, int32);
|
||||
REGISTER_KERNELS(double, int32_t);
|
||||
REGISTER_KERNELS(double, int64_t);
|
||||
REGISTER_KERNELS(complex64, int32);
|
||||
REGISTER_KERNELS(complex64, int32_t);
|
||||
REGISTER_KERNELS(complex64, int64_t);
|
||||
REGISTER_KERNELS(complex128, int32);
|
||||
REGISTER_KERNELS(complex128, int32_t);
|
||||
REGISTER_KERNELS(complex128, int64_t);
|
||||
|
||||
#undef REGISTER_KERNELS
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ static Node* Random(Graph* g, int m, int n) {
|
|||
|
||||
static Node* Iota(Graph* g, int n) {
|
||||
Tensor data(DT_INT32, TensorShape({n}));
|
||||
int32* base = data.flat<int32>().data();
|
||||
int32_t* base = data.flat<int32_t>().data();
|
||||
for (int i = 0; i < n; ++i) base[i] = i;
|
||||
return test::graph::Constant(g, data);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ namespace tensorflow {
|
|||
// REQUIRES: in.dim_size(perm[i]) == out->dim_size(i)
|
||||
template <typename Device>
|
||||
absl::Status DoTranspose(const Device& device, const Tensor& in,
|
||||
const absl::Span<const int32> perm, Tensor* out);
|
||||
const absl::Span<const int32_t> perm, Tensor* out);
|
||||
|
||||
// Conjugate and transpose tensor 'in' into tensor 'out' according to dimension
|
||||
// permutation 'perm'.
|
||||
|
|
@ -45,7 +45,7 @@ absl::Status DoTranspose(const Device& device, const Tensor& in,
|
|||
// REQUIRES: in.dim_size(perm[i]) == out->dim_size(i)
|
||||
template <typename Device>
|
||||
absl::Status DoConjugateTranspose(const Device& device, const Tensor& in,
|
||||
const absl::Span<const int32> perm,
|
||||
const absl::Span<const int32_t> perm,
|
||||
Tensor* out);
|
||||
|
||||
// Convenience versions of DoTranspose that only swap the last (inner) two
|
||||
|
|
@ -64,14 +64,14 @@ absl::Status DoConjugateMatrixTranspose(const Device& device, const Tensor& in,
|
|||
template <typename Device, typename T, bool conjugate = false>
|
||||
struct Transpose {
|
||||
static void run(const Device& d, const Tensor& in,
|
||||
const absl::Span<const int32> perm, Tensor* out);
|
||||
const absl::Span<const int32_t> perm, Tensor* out);
|
||||
};
|
||||
|
||||
// Implementation details.
|
||||
namespace internal {
|
||||
|
||||
typedef absl::InlinedVector<int64_t, 8UL> TransposeDimsVec;
|
||||
typedef absl::InlinedVector<int32, 8UL> TransposePermsVec;
|
||||
typedef absl::InlinedVector<int32_t, 8UL> TransposePermsVec;
|
||||
|
||||
// Helper function that takes a tensor shape, a permutation, combines the
|
||||
// neighboring shapes if their indices in the permutation are consecutive.
|
||||
|
|
@ -79,7 +79,7 @@ typedef absl::InlinedVector<int32, 8UL> TransposePermsVec;
|
|||
// Example: Tensor shape {2, 3, 4, 5, 120} and permutation {0, 4, 1, 2, 3} will
|
||||
// produce new shape {2, 60, 120} and new permutation {0, 2, 1}.
|
||||
inline void ReduceTransposeDimensions(const TensorShape& shape,
|
||||
absl::Span<const int32> perm,
|
||||
absl::Span<const int32_t> perm,
|
||||
TransposePermsVec* new_perm,
|
||||
TransposeDimsVec* new_dims) {
|
||||
CHECK_EQ(shape.dims(), perm.size());
|
||||
|
|
@ -130,8 +130,8 @@ inline void ReduceTransposeDimensions(const TensorShape& shape,
|
|||
// That is, for all i, 0 <= perm[i] < input_shape.dims().
|
||||
// In practice, this is checked in TransposeOp::Compute prior to calling this
|
||||
// function, and the function sits here to facilitate unit testing.
|
||||
inline bool NonSingletonDimensionsAlign(const TensorShape& input_shape,
|
||||
const std::vector<int32>& permutation) {
|
||||
inline bool NonSingletonDimensionsAlign(
|
||||
const TensorShape& input_shape, const std::vector<int32_t>& permutation) {
|
||||
int last_nonsingleton_perm_dim = -1;
|
||||
for (int perm_dim : permutation) {
|
||||
if (input_shape.dim_size(perm_dim) == 1) {
|
||||
|
|
@ -148,7 +148,7 @@ inline bool NonSingletonDimensionsAlign(const TensorShape& input_shape,
|
|||
// Uses Eigen to transpose.
|
||||
template <typename Device, typename T, int NDIMS>
|
||||
void TransposeUsingEigen(const Device& d, const Tensor& in,
|
||||
const absl::Span<const int32> perm, bool conjugate,
|
||||
const absl::Span<const int32_t> perm, bool conjugate,
|
||||
Tensor* out) {
|
||||
Eigen::array<int, NDIMS> p;
|
||||
for (int i = 0; i < NDIMS; ++i) p[i] = perm[i];
|
||||
|
|
@ -167,8 +167,8 @@ void TransposeUsingEigen(const Device& d, const Tensor& in,
|
|||
|
||||
template <typename Device>
|
||||
absl::Status DoTransposeImpl(const Device& d, const Tensor& in,
|
||||
const absl::Span<const int32> perm, bool conjugate,
|
||||
Tensor* out) {
|
||||
const absl::Span<const int32_t> perm,
|
||||
bool conjugate, Tensor* out) {
|
||||
// log a msg
|
||||
CHECK_EQ(in.dims(), out->dims());
|
||||
CHECK_EQ(in.dims(), perm.size());
|
||||
|
|
@ -181,7 +181,7 @@ absl::Status DoTransposeImpl(const Device& d, const Tensor& in,
|
|||
case DT_UINT8:
|
||||
case DT_FLOAT8_E5M2:
|
||||
case DT_FLOAT8_E4M3FN:
|
||||
Transpose<Device, uint8>::run(d, in, perm, out);
|
||||
Transpose<Device, uint8_t>::run(d, in, perm, out);
|
||||
break;
|
||||
|
||||
case DT_BFLOAT16:
|
||||
|
|
@ -190,20 +190,20 @@ absl::Status DoTransposeImpl(const Device& d, const Tensor& in,
|
|||
case DT_QINT16:
|
||||
case DT_QUINT16:
|
||||
case DT_UINT16:
|
||||
Transpose<Device, uint16>::run(d, in, perm, out);
|
||||
Transpose<Device, uint16_t>::run(d, in, perm, out);
|
||||
break;
|
||||
|
||||
case DT_FLOAT:
|
||||
case DT_INT32:
|
||||
case DT_QINT32:
|
||||
case DT_UINT32:
|
||||
Transpose<Device, uint32>::run(d, in, perm, out);
|
||||
Transpose<Device, uint32_t>::run(d, in, perm, out);
|
||||
break;
|
||||
|
||||
case DT_DOUBLE:
|
||||
case DT_INT64:
|
||||
case DT_UINT64:
|
||||
Transpose<Device, uint64>::run(d, in, perm, out);
|
||||
Transpose<Device, uint64_t>::run(d, in, perm, out);
|
||||
break;
|
||||
|
||||
case DT_COMPLEX64:
|
||||
|
|
@ -217,7 +217,7 @@ absl::Status DoTransposeImpl(const Device& d, const Tensor& in,
|
|||
Transpose<Device, complex64, /*conjugate=*/true>::run(d, in, perm, out);
|
||||
#endif
|
||||
} else {
|
||||
Transpose<Device, uint64>::run(d, in, perm, out);
|
||||
Transpose<Device, uint64_t>::run(d, in, perm, out);
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ namespace {
|
|||
|
||||
template <typename T, bool conjugate>
|
||||
void TransposeSimple(const CPUDevice& device, const Tensor& in,
|
||||
const absl::Span<const int32> perm, Tensor* out) {
|
||||
const absl::Span<const int32_t> perm, Tensor* out) {
|
||||
const int ndims = in.dims();
|
||||
absl::InlinedVector<int64_t, 8UL> in_strides =
|
||||
ComputeStride<int64_t>(in.shape());
|
||||
|
|
@ -73,7 +73,7 @@ void TransposeSimple(const CPUDevice& device, const Tensor& in,
|
|||
template <typename T, bool conjugate>
|
||||
struct Transpose<CPUDevice, T, conjugate> {
|
||||
static void run(const CPUDevice& d, const Tensor& in,
|
||||
const absl::Span<const int32> perm, Tensor* out) {
|
||||
const absl::Span<const int32_t> perm, Tensor* out) {
|
||||
switch (in.dims()) {
|
||||
case 2:
|
||||
internal::TransposeUsingEigen<CPUDevice, T, 2>(d, in, perm, conjugate,
|
||||
|
|
|
|||
|
|
@ -50,10 +50,11 @@ class InvertPermutationOp : public OpKernel {
|
|||
context, TensorShapeUtils::IsVector(input.shape()),
|
||||
errors::InvalidArgument("invert_permutation expects a 1D vector."));
|
||||
auto Tin = input.vec<T>();
|
||||
OP_REQUIRES(context,
|
||||
FastBoundsCheck(Tin.size(), std::numeric_limits<int32>::max()),
|
||||
errors::InvalidArgument("permutation of nonnegative int32s "
|
||||
"must have <= int32 max elements"));
|
||||
OP_REQUIRES(
|
||||
context,
|
||||
FastBoundsCheck(Tin.size(), std::numeric_limits<int32_t>::max()),
|
||||
errors::InvalidArgument("permutation of nonnegative int32s "
|
||||
"must have <= int32 max elements"));
|
||||
const T N = static_cast<T>(Tin.size()); // Safe: bounds-checked above.
|
||||
Tensor* output = nullptr;
|
||||
OP_REQUIRES_OK(context,
|
||||
|
|
@ -72,18 +73,18 @@ class InvertPermutationOp : public OpKernel {
|
|||
};
|
||||
|
||||
REGISTER_KERNEL_BUILDER(
|
||||
Name("InvertPermutation").Device(DEVICE_CPU).TypeConstraint<int32>("T"),
|
||||
InvertPermutationOp<int32>);
|
||||
Name("InvertPermutation").Device(DEVICE_CPU).TypeConstraint<int32_t>("T"),
|
||||
InvertPermutationOp<int32_t>);
|
||||
REGISTER_KERNEL_BUILDER(
|
||||
Name("InvertPermutation").Device(DEVICE_CPU).TypeConstraint<int64_t>("T"),
|
||||
InvertPermutationOp<int64_t>);
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("InvertPermutation")
|
||||
.Device(DEVICE_DEFAULT)
|
||||
.TypeConstraint<int32>("T")
|
||||
.TypeConstraint<int32_t>("T")
|
||||
.HostMemory("x")
|
||||
.HostMemory("y"),
|
||||
InvertPermutationOp<int32>);
|
||||
InvertPermutationOp<int32_t>);
|
||||
REGISTER_KERNEL_BUILDER(Name("InvertPermutation")
|
||||
.Device(DEVICE_DEFAULT)
|
||||
.TypeConstraint<int64_t>("T")
|
||||
|
|
@ -94,7 +95,7 @@ REGISTER_KERNEL_BUILDER(Name("InvertPermutation")
|
|||
namespace {
|
||||
template <typename Tperm>
|
||||
absl::Status PermutationHelper(const Tensor& perm, const int dims,
|
||||
std::vector<int32>* permutation) {
|
||||
std::vector<int32_t>* permutation) {
|
||||
auto Vperm = perm.vec<Tperm>();
|
||||
if (dims != Vperm.size()) {
|
||||
return errors::InvalidArgument("transpose expects a vector of size ", dims,
|
||||
|
|
@ -105,7 +106,7 @@ absl::Status PermutationHelper(const Tensor& perm, const int dims,
|
|||
// asynchrony boundary is permutation.
|
||||
const volatile Tperm* perm_begin =
|
||||
reinterpret_cast<const volatile Tperm*>(Vperm.data());
|
||||
*permutation = std::vector<int32>(perm_begin, perm_begin + dims);
|
||||
*permutation = std::vector<int32_t>(perm_begin, perm_begin + dims);
|
||||
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
|
@ -136,10 +137,10 @@ void TransposeOp::Compute(OpKernelContext* ctx) {
|
|||
|
||||
// Although Tperm may be an int64 type, an int32 is sufficient to hold
|
||||
// dimension range values, so the narrowing here should be safe.
|
||||
std::vector<int32> permutation;
|
||||
std::vector<int32_t> permutation;
|
||||
const int dims = input.dims();
|
||||
if (perm.dtype() == DT_INT32) {
|
||||
OP_REQUIRES_OK(ctx, PermutationHelper<int32>(perm, dims, &permutation));
|
||||
OP_REQUIRES_OK(ctx, PermutationHelper<int32_t>(perm, dims, &permutation));
|
||||
} else {
|
||||
OP_REQUIRES_OK(ctx, PermutationHelper<int64_t>(perm, dims, &permutation));
|
||||
}
|
||||
|
|
@ -191,17 +192,16 @@ void TransposeOp::Compute(OpKernelContext* ctx) {
|
|||
}
|
||||
|
||||
absl::Status TransposeCpuOp::DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm,
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) {
|
||||
typedef Eigen::ThreadPoolDevice CPUDevice;
|
||||
return ::tensorflow::DoTranspose(ctx->eigen_device<CPUDevice>(), in, perm,
|
||||
out);
|
||||
}
|
||||
|
||||
absl::Status ConjugateTransposeCpuOp::DoTranspose(OpKernelContext* ctx,
|
||||
const Tensor& in,
|
||||
absl::Span<const int32> perm,
|
||||
Tensor* out) {
|
||||
absl::Status ConjugateTransposeCpuOp::DoTranspose(
|
||||
OpKernelContext* ctx, const Tensor& in, absl::Span<const int32_t> perm,
|
||||
Tensor* out) {
|
||||
typedef Eigen::ThreadPoolDevice CPUDevice;
|
||||
return ::tensorflow::DoConjugateTranspose(ctx->eigen_device<CPUDevice>(), in,
|
||||
perm, out);
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class TransposeOp : public OpKernel {
|
|||
|
||||
protected:
|
||||
virtual absl::Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm,
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) = 0;
|
||||
virtual bool IsConjugate() const { return false; }
|
||||
};
|
||||
|
|
@ -40,7 +40,8 @@ class TransposeCpuOp : public TransposeOp {
|
|||
|
||||
protected:
|
||||
absl::Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm, Tensor* out) override;
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) override;
|
||||
};
|
||||
|
||||
#if defined(INTEL_MKL)
|
||||
|
|
@ -60,7 +61,8 @@ class TransposeGpuOp : public TransposeOp {
|
|||
|
||||
protected:
|
||||
absl::Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm, Tensor* out) override;
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) override;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -72,7 +74,8 @@ class ConjugateTransposeCpuOp : public TransposeOp {
|
|||
|
||||
protected:
|
||||
absl::Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm, Tensor* out) override;
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) override;
|
||||
bool IsConjugate() const override { return true; }
|
||||
};
|
||||
|
||||
|
|
@ -96,7 +99,8 @@ class ConjugateTransposeGpuOp : public TransposeOp {
|
|||
|
||||
protected:
|
||||
absl::Status DoTranspose(OpKernelContext* ctx, const Tensor& in,
|
||||
absl::Span<const int32> perm, Tensor* out) override;
|
||||
absl::Span<const int32_t> perm,
|
||||
Tensor* out) override;
|
||||
bool IsConjugate() const override { return true; }
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -23,14 +23,14 @@ namespace tensorflow {
|
|||
class TransposeUtilTest : public ::testing::Test {
|
||||
protected:
|
||||
void TestDimensionReduction(const TensorShape& shape,
|
||||
const absl::Span<const int32> perm,
|
||||
const absl::Span<const int32> expected_perm,
|
||||
const absl::Span<const int32_t> perm,
|
||||
const absl::Span<const int32_t> expected_perm,
|
||||
const absl::Span<const int64_t> expected_dims) {
|
||||
internal::TransposePermsVec new_perm;
|
||||
internal::TransposeDimsVec new_dims;
|
||||
internal::ReduceTransposeDimensions(shape, perm, &new_perm, &new_dims);
|
||||
|
||||
absl::Span<const int32> computed_perm(new_perm);
|
||||
absl::Span<const int32_t> computed_perm(new_perm);
|
||||
absl::Span<const int64_t> computed_dims(new_dims);
|
||||
EXPECT_EQ(computed_perm, expected_perm);
|
||||
EXPECT_EQ(computed_dims, expected_dims);
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ class TypedConditionalAccumulatorBase : public ConditionalAccumulatorBase {
|
|||
public:
|
||||
TypedConditionalAccumulatorBase(const DataType& dtype,
|
||||
const PartialTensorShape& shape,
|
||||
const string& name,
|
||||
const string& reduction_type)
|
||||
const std::string& name,
|
||||
const std::string& reduction_type)
|
||||
: ConditionalAccumulatorBase(dtype, shape, name, reduction_type) {}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class TypedQueue : public QueueBase {
|
|||
public:
|
||||
TypedQueue(const int32_t capacity, const DataTypeVector& component_dtypes,
|
||||
const std::vector<TensorShape>& component_shapes,
|
||||
const string& name);
|
||||
const std::string& name);
|
||||
|
||||
virtual absl::Status Initialize(); // Must be called before any other method.
|
||||
|
||||
|
|
@ -47,7 +47,7 @@ class TypedQueue : public QueueBase {
|
|||
template <typename SubQueue>
|
||||
TypedQueue<SubQueue>::TypedQueue(
|
||||
int32_t capacity, const DataTypeVector& component_dtypes,
|
||||
const std::vector<TensorShape>& component_shapes, const string& name)
|
||||
const std::vector<TensorShape>& component_shapes, const std::string& name)
|
||||
: QueueBase(capacity, component_dtypes, component_shapes, name) {}
|
||||
|
||||
template <typename SubQueue>
|
||||
|
|
|
|||
|
|
@ -43,12 +43,13 @@ struct UnaryOpsCompositionBase {
|
|||
int cost;
|
||||
};
|
||||
|
||||
bool HasComputeFn(const string& name) {
|
||||
bool HasComputeFn(const std::string& name) {
|
||||
return compute_fns.find(name) != compute_fns.end();
|
||||
}
|
||||
|
||||
protected:
|
||||
void RegisterComputeFn(const string& name, ComputeFn compute_fn, int cost) {
|
||||
void RegisterComputeFn(const std::string& name, ComputeFn compute_fn,
|
||||
int cost) {
|
||||
VLOG(5) << "Register compute fn: name=" << name << " cost=" << cost;
|
||||
compute_fns[name] = {compute_fn, cost};
|
||||
}
|
||||
|
|
@ -56,9 +57,9 @@ struct UnaryOpsCompositionBase {
|
|||
private:
|
||||
friend class UnaryOpsComposition<T>;
|
||||
|
||||
absl::Status ExportComputeFns(const std::vector<string>& op_names,
|
||||
absl::Status ExportComputeFns(const std::vector<std::string>& op_names,
|
||||
std::vector<ComputeFn>* fns, int* cost) {
|
||||
for (const string& op_name : op_names) {
|
||||
for (const std::string& op_name : op_names) {
|
||||
auto it = compute_fns.find(op_name);
|
||||
if (it == compute_fns.end())
|
||||
return errors::InvalidArgument(
|
||||
|
|
@ -72,7 +73,7 @@ struct UnaryOpsCompositionBase {
|
|||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
std::unordered_map<string, ComputeFnRegistration> compute_fns;
|
||||
std::unordered_map<std::string, ComputeFnRegistration> compute_fns;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
@ -151,7 +152,7 @@ class UnaryOpsComposition : public OpKernel {
|
|||
|
||||
Support support_;
|
||||
|
||||
std::vector<string> op_names_;
|
||||
std::vector<std::string> op_names_;
|
||||
std::vector<ComputeFn> fns_;
|
||||
int cost_ = 0;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -33,7 +33,8 @@ namespace {
|
|||
class UnaryOpsCompositionTest : public OpsTestBase {
|
||||
protected:
|
||||
template <typename T>
|
||||
void RunComposedOp(const std::vector<string> op_names, T input, T expected) {
|
||||
void RunComposedOp(const std::vector<std::string> op_names, T input,
|
||||
T expected) {
|
||||
TF_ASSERT_OK(NodeDefBuilder("unary_op_composition", "_UnaryOpsComposition")
|
||||
.Input(FakeInput(DataTypeToEnum<T>::v()))
|
||||
.Attr("T", DataTypeToEnum<T>::v())
|
||||
|
|
@ -82,8 +83,9 @@ TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu6_F) {
|
|||
|
||||
// Performance benchmarks below.
|
||||
|
||||
string Function(int i) {
|
||||
std::vector<string> ops = {"Tanh", "Relu", "Sigmoid", "Sqrt", "Log", "Exp"};
|
||||
std::string Function(int i) {
|
||||
std::vector<std::string> ops = {"Tanh", "Relu", "Sigmoid",
|
||||
"Sqrt", "Log", "Exp"};
|
||||
return ops[i % ops.size()];
|
||||
}
|
||||
|
||||
|
|
@ -127,7 +129,7 @@ static Graph* UnaryOpsCompo(int tensor_size, int repeat_graph,
|
|||
Tensor t(DT_FLOAT, TensorShape({tensor_size}));
|
||||
t.flat<float>() = t.flat<float>().setRandom();
|
||||
|
||||
std::vector<string> functions;
|
||||
std::vector<std::string> functions;
|
||||
for (int j = 0; j < num_functions; ++j) {
|
||||
functions.push_back(Function(j));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ void unicode_error_callback(const void* context, UConverterToUnicodeArgs* args,
|
|||
// encoding position.
|
||||
// callback: function(UChar32 codepoint, int num_bytes_consumed_from_source_str,
|
||||
// bool fatal_format_error)
|
||||
void IterateUnicodeString(const string& str, UConverter* converter,
|
||||
void IterateUnicodeString(const std::string& str, UConverter* converter,
|
||||
std::function<void(UChar32, int, bool)> callback) {
|
||||
const char* source = str.data();
|
||||
const char* limit = str.data() + str.length();
|
||||
|
|
@ -165,7 +165,7 @@ class WrappedConverter {
|
|||
}
|
||||
}
|
||||
|
||||
void init(const string& name) {
|
||||
void init(const std::string& name) {
|
||||
if (converter_ && name == name_) {
|
||||
// Note: this reset is not typically needed, but if not done, then in some
|
||||
// cases the cached converter will maintain state of input endianness
|
||||
|
|
@ -193,7 +193,7 @@ class WrappedConverter {
|
|||
}
|
||||
|
||||
UConverter* converter_ = nullptr;
|
||||
string name_;
|
||||
std::string name_;
|
||||
};
|
||||
|
||||
struct ErrorOptions {
|
||||
|
|
@ -206,7 +206,7 @@ struct ErrorOptions {
|
|||
absl::Status GetErrorOptions(OpKernelConstruction* ctx, ErrorOptions* out) {
|
||||
*out = ErrorOptions();
|
||||
|
||||
string error_policy;
|
||||
std::string error_policy;
|
||||
TF_RETURN_IF_ERROR(ctx->GetAttr("errors", &error_policy));
|
||||
|
||||
if (error_policy == "replace") {
|
||||
|
|
@ -251,7 +251,7 @@ class UnicodeTranscodeOp : public OpKernel {
|
|||
explicit UnicodeTranscodeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
|
||||
OP_REQUIRES_OK(ctx, GetErrorOptions(ctx, &error_options_));
|
||||
|
||||
string output_encoding;
|
||||
std::string output_encoding;
|
||||
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_encoding", &output_encoding));
|
||||
OP_REQUIRES_OK(ctx,
|
||||
ParseUnicodeEncoding(output_encoding, &output_encoding_));
|
||||
|
|
@ -338,7 +338,7 @@ class UnicodeTranscodeOp : public OpKernel {
|
|||
Encode(output_encoding_, source, s);
|
||||
}
|
||||
|
||||
string input_encoding_;
|
||||
std::string input_encoding_;
|
||||
ErrorOptions error_options_;
|
||||
UnicodeEncoding output_encoding_ = UnicodeEncoding::UTF8;
|
||||
};
|
||||
|
|
@ -420,7 +420,7 @@ class UnicodeDecodeBaseOp : public OpKernel {
|
|||
int row_split_index = 0;
|
||||
SPLITS_TYPE next_row_split = 0;
|
||||
for (int i = 0; i < input_vec.size(); ++i) {
|
||||
const string& input = input_vec(i);
|
||||
const std::string& input = input_vec(i);
|
||||
// Convert input strings into unicode values. Output to a list of
|
||||
// char_values, record row splits and char_to_byte_starts, which are all
|
||||
// the fields needed to construct a RaggedTensor.
|
||||
|
|
@ -441,7 +441,7 @@ class UnicodeDecodeBaseOp : public OpKernel {
|
|||
ctx, ctx->allocate_output(
|
||||
"char_values", {static_cast<SPLITS_TYPE>(char_values.size())},
|
||||
&output_char_values));
|
||||
auto out_char_values = output_char_values->vec<int32>();
|
||||
auto out_char_values = output_char_values->vec<int32_t>();
|
||||
if (generate_offsets_) {
|
||||
DCHECK(offset_values.size() == char_values.size());
|
||||
Tensor* output_offset_values;
|
||||
|
|
@ -453,18 +453,18 @@ class UnicodeDecodeBaseOp : public OpKernel {
|
|||
|
||||
// Load output tensors from intermediate value arrays.
|
||||
for (int i = 0; i < char_values.size(); ++i) {
|
||||
out_char_values(i) = static_cast<int32>(char_values[i]);
|
||||
out_char_values(i) = static_cast<int32_t>(char_values[i]);
|
||||
out_offset_values(i) = offset_values[i];
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < char_values.size(); ++i) {
|
||||
out_char_values(i) = static_cast<int32>(char_values[i]);
|
||||
out_char_values(i) = static_cast<int32_t>(char_values[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
string input_encoding_;
|
||||
std::string input_encoding_;
|
||||
ErrorOptions error_options_;
|
||||
bool generate_offsets_ = false;
|
||||
};
|
||||
|
|
@ -491,18 +491,18 @@ REGISTER_KERNEL_BUILDER(Name("UnicodeDecodeWithOffsets")
|
|||
.TypeConstraint<int64_t>("Tsplits"),
|
||||
UnicodeDecodeWithOffsetsOp<int64_t>);
|
||||
REGISTER_KERNEL_BUILDER(
|
||||
Name("UnicodeDecode").Device(DEVICE_CPU).TypeConstraint<int32>("Tsplits"),
|
||||
UnicodeDecodeOp<int32>);
|
||||
Name("UnicodeDecode").Device(DEVICE_CPU).TypeConstraint<int32_t>("Tsplits"),
|
||||
UnicodeDecodeOp<int32_t>);
|
||||
REGISTER_KERNEL_BUILDER(Name("UnicodeDecodeWithOffsets")
|
||||
.Device(DEVICE_CPU)
|
||||
.TypeConstraint<int32>("Tsplits"),
|
||||
UnicodeDecodeWithOffsetsOp<int32>);
|
||||
.TypeConstraint<int32_t>("Tsplits"),
|
||||
UnicodeDecodeWithOffsetsOp<int32_t>);
|
||||
|
||||
template <typename SPLITS_TYPE>
|
||||
class UnicodeEncodeOp : public OpKernel {
|
||||
public:
|
||||
explicit UnicodeEncodeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
|
||||
string encoding_tmp;
|
||||
std::string encoding_tmp;
|
||||
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_encoding", &encoding_tmp));
|
||||
OP_REQUIRES_OK(ctx, ParseUnicodeEncoding(encoding_tmp, &encoding_));
|
||||
OP_REQUIRES_OK(ctx, GetErrorOptions(ctx, &error_options_));
|
||||
|
|
@ -521,7 +521,7 @@ class UnicodeEncodeOp : public OpKernel {
|
|||
void Compute(OpKernelContext* context) override {
|
||||
// Get inputs
|
||||
const Tensor& input_tensor = context->input(0);
|
||||
const auto input_tensor_flat = input_tensor.flat<int32>();
|
||||
const auto input_tensor_flat = input_tensor.flat<int32_t>();
|
||||
const Tensor& input_splits = context->input(1);
|
||||
const auto input_splits_flat = input_splits.flat<SPLITS_TYPE>();
|
||||
|
||||
|
|
@ -602,7 +602,7 @@ REGISTER_KERNEL_BUILDER(
|
|||
Name("UnicodeEncode").Device(DEVICE_CPU).TypeConstraint<int64_t>("Tsplits"),
|
||||
UnicodeEncodeOp<int64_t>);
|
||||
REGISTER_KERNEL_BUILDER(
|
||||
Name("UnicodeEncode").Device(DEVICE_CPU).TypeConstraint<int32>("Tsplits"),
|
||||
UnicodeEncodeOp<int32>);
|
||||
Name("UnicodeEncode").Device(DEVICE_CPU).TypeConstraint<int32_t>("Tsplits"),
|
||||
UnicodeEncodeOp<int32_t>);
|
||||
|
||||
} // namespace tensorflow
|
||||
|
|
|
|||
|
|
@ -26,13 +26,13 @@ class UnicodeScriptOp : public OpKernel {
|
|||
void Compute(OpKernelContext* context) override {
|
||||
const Tensor* input_tensor;
|
||||
OP_REQUIRES_OK(context, context->input("input", &input_tensor));
|
||||
const auto& input_flat = input_tensor->flat<int32>();
|
||||
const auto& input_flat = input_tensor->flat<int32_t>();
|
||||
|
||||
Tensor* output_tensor = nullptr;
|
||||
OP_REQUIRES_OK(context,
|
||||
context->allocate_output("output", input_tensor->shape(),
|
||||
&output_tensor));
|
||||
auto output_flat = output_tensor->flat<int32>();
|
||||
auto output_flat = output_tensor->flat<int32_t>();
|
||||
|
||||
icu::ErrorCode status;
|
||||
for (int i = 0; i < input_flat.size(); i++) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user