Go: Update generated wrapper functions for TensorFlow ops.

PiperOrigin-RevId: 173966068
This commit is contained in:
A. Unique TensorFlower 2017-10-30 16:14:34 -07:00 committed by TensorFlower Gardener
parent f9a673cb71
commit 558f146e1d

View File

@ -7996,146 +7996,6 @@ func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
return op.Output(0)
}
// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
type FusedBatchNormGradAttr func(optionalAttr)
// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
//
// value: A small float number added to the variance of x.
// If not specified, defaults to 0.0001
func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["epsilon"] = value
}
}
// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
//
// value: The data format for y_backprop, x, x_backprop.
// Either "NHWC" (default) or "NCHW".
// If not specified, defaults to "NHWC"
func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
//
// value: A bool value to indicate the operation is for training (default)
// or inference.
// If not specified, defaults to true
func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["is_training"] = value
}
}
// Gradient for batch normalization.
//
// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
// y_backprop: A 4D Tensor for the gradient with respect to y.
// x: A 4D Tensor for input data.
// scale: A 1D Tensor for scaling factor, to scale the normalized x.
// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
// mean to be reused in gradient computation. When is_training is
// False, a 1D Tensor for the population mean to be reused in both
// 1st and 2nd order gradient computation.
// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
// variance (inverted variance in the cuDNN case) to be reused in
// gradient computation. When is_training is False, a 1D Tensor
// for the population variance to be reused in both 1st and 2nd
// order gradient computation.
//
// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
// in FusedBatchNorm.
func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FusedBatchNormGrad",
Input: []tf.Input{
y_backprop, x, scale, reserve_space_1, reserve_space_2,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
// ShapeAttr is an optional argument to Shape.
type ShapeAttr func(optionalAttr)
// ShapeOutType sets the optional out_type attribute to value.
// If not specified, defaults to DT_INT32
func ShapeOutType(value tf.DataType) ShapeAttr {
return func(m optionalAttr) {
m["out_type"] = value
}
}
// Returns the shape of a tensor.
//
// This operation returns a 1-D integer tensor representing the shape of `input`.
//
// For example:
//
// ```
// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
// shape(t) ==> [2, 2, 3]
// ```
func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Shape",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes softmax cross entropy cost and gradients to backpropagate.
//
// Inputs are the logits, not probabilities.
//
// Arguments:
// features: batch_size x num_classes matrix
// labels: batch_size x num_classes matrix
// The caller must ensure that each batch of labels represents a valid
// probability distribution.
//
// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "SoftmaxCrossEntropyWithLogits",
Input: []tf.Input{
features, labels,
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
type MaxPool3DGradGradAttr func(optionalAttr)
@ -10312,7 +10172,7 @@ func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples t
// Requires `updates.shape = indices.shape + ref.shape[1:]`.
//
// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt>
// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
// </div>
//
// Arguments:
@ -13575,45 +13435,6 @@ func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, pa
return op.Output(0)
}
// L2 Loss.
//
// Computes half the L2 norm of a tensor without the `sqrt`:
//
// output = sum(t ** 2) / 2
//
// Arguments:
// t: Typically 2-D, but may have any dimensions.
//
// Returns 0-D.
func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "L2Loss",
Input: []tf.Input{
t,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes rectified linear: `max(features, 0)`.
func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Relu",
Input: []tf.Input{
features,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns the truth value of (x >= y) element-wise.
//
// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
@ -14684,6 +14505,185 @@ func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
return op.Output(0)
}
// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
type FusedBatchNormGradAttr func(optionalAttr)
// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
//
// value: A small float number added to the variance of x.
// If not specified, defaults to 0.0001
func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["epsilon"] = value
}
}
// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
//
// value: The data format for y_backprop, x, x_backprop.
// Either "NHWC" (default) or "NCHW".
// If not specified, defaults to "NHWC"
func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
//
// value: A bool value to indicate the operation is for training (default)
// or inference.
// If not specified, defaults to true
func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["is_training"] = value
}
}
// Gradient for batch normalization.
//
// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
// y_backprop: A 4D Tensor for the gradient with respect to y.
// x: A 4D Tensor for input data.
// scale: A 1D Tensor for scaling factor, to scale the normalized x.
// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
// mean to be reused in gradient computation. When is_training is
// False, a 1D Tensor for the population mean to be reused in both
// 1st and 2nd order gradient computation.
// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
// variance (inverted variance in the cuDNN case) to be reused in
// gradient computation. When is_training is False, a 1D Tensor
// for the population variance to be reused in both 1st and 2nd
// order gradient computation.
//
// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
// in FusedBatchNorm.
func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FusedBatchNormGrad",
Input: []tf.Input{
y_backprop, x, scale, reserve_space_1, reserve_space_2,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
// Computes rectified linear: `max(features, 0)`.
func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Relu",
Input: []tf.Input{
features,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// L2 Loss.
//
// Computes half the L2 norm of a tensor without the `sqrt`:
//
// output = sum(t ** 2) / 2
//
// Arguments:
// t: Typically 2-D, but may have any dimensions.
//
// Returns 0-D.
func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "L2Loss",
Input: []tf.Input{
t,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ShapeAttr is an optional argument to Shape.
type ShapeAttr func(optionalAttr)
// ShapeOutType sets the optional out_type attribute to value.
// If not specified, defaults to DT_INT32
func ShapeOutType(value tf.DataType) ShapeAttr {
return func(m optionalAttr) {
m["out_type"] = value
}
}
// Returns the shape of a tensor.
//
// This operation returns a 1-D integer tensor representing the shape of `input`.
//
// For example:
//
// ```
// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
// shape(t) ==> [2, 2, 3]
// ```
func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Shape",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes softmax cross entropy cost and gradients to backpropagate.
//
// Inputs are the logits, not probabilities.
//
// Arguments:
// features: batch_size x num_classes matrix
// labels: batch_size x num_classes matrix
// The caller must ensure that each batch of labels represents a valid
// probability distribution.
//
// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "SoftmaxCrossEntropyWithLogits",
Input: []tf.Input{
features, labels,
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Get the value of the tensor specified by its handle.
//
// Arguments:
@ -18413,6 +18413,195 @@ func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output t
return op.Output(0)
}
// Assigns sparse updates to the variable referenced by `resource`.
//
// This operation computes
//
// # Scalar indices
// ref[indices, ...] = updates[...]
//
// # Vector indices (for each i)
// ref[indices[i], ...] = updates[i, ...]
//
// # High rank indices (for each i, ..., j)
// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
//
// Arguments:
// resource: Should be from a `Variable` node.
// indices: A tensor of indices into the first dimension of `ref`.
// updates: A tensor of updated values to add to `ref`.
//
// Returns the created operation.
func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "ResourceScatterUpdate",
Input: []tf.Input{
resource, indices, updates,
},
}
return scope.AddOperation(opspec)
}
// Given a path to new and old vocabulary files, returns a remapping Tensor of
//
// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
// in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
// use in the partitioned variable case, and should generally be set through
// examining partitioning info. The format of the files should be a text file,
// with each line containing a single entity within the vocabulary.
//
// For example, with `new_vocab_file` a text file containing each of the following
// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
// `[0, -1, 2]`.
//
// The op also returns a count of how many entries in the new vocabulary
// were present in the old vocabulary, which is used to calculate the number of
// values to initialize in a weight matrix remapping
//
// This functionality can be used to remap both row vocabularies (typically,
// features) and column vocabularies (typically, classes) from TensorFlow
// checkpoints. Note that the partitioning logic relies on contiguous vocabularies
// corresponding to div-partitioned variables. Moreover, the underlying remapping
// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
// use the corresponding index_table_from_file() as the FeatureColumn framework
// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
//
// Arguments:
// new_vocab_file: Path to the new vocab file.
// old_vocab_file: Path to the old vocab file.
// new_vocab_offset: How many entries into the new vocab file to start reading.
// num_new_vocab: Number of entries in the new vocab file to remap.
//
// Returns A Tensor of length num_new_vocab where the element at index i
// is equal to the old ID that maps to the new ID i. This element is -1 for any
// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64) (remapping tf.Output, num_present tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
opspec := tf.OpSpec{
Type: "GenerateVocabRemapping",
Input: []tf.Input{
new_vocab_file, old_vocab_file,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Computes softsign: `features / (abs(features) + 1)`.
func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Softsign",
Input: []tf.Input{
features,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ResizeBilinearAttr is an optional argument to ResizeBilinear.
type ResizeBilinearAttr func(optionalAttr)
// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
//
// value: If true, rescale input by (new_height - 1) / (height - 1), which
// exactly aligns the 4 corners of images and resized images. If false, rescale
// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
return func(m optionalAttr) {
m["align_corners"] = value
}
}
// Resize `images` to `size` using bilinear interpolation.
//
// Input images can be of different types but output images are always float.
//
// Arguments:
// images: 4-D with shape `[batch, height, width, channels]`.
// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
// new size for the images.
//
// Returns 4-D with shape
// `[batch, new_height, new_width, channels]`.
func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "ResizeBilinear",
Input: []tf.Input{
images, size,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ProdAttr is an optional argument to Prod.
type ProdAttr func(optionalAttr)
// ProdKeepDims sets the optional keep_dims attribute to value.
//
// value: If true, retain reduced dimensions with length 1.
// If not specified, defaults to false
func ProdKeepDims(value bool) ProdAttr {
return func(m optionalAttr) {
m["keep_dims"] = value
}
}
// Computes the product of elements across dimensions of a tensor.
//
// Reduces `input` along the dimensions given in `reduction_indices`. Unless
// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
// retained with length 1.
//
// Arguments:
// input: The tensor to reduce.
// reduction_indices: The dimensions to reduce. Must be in the range
// `[-rank(input), rank(input))`.
//
// Returns The reduced tensor.
func Prod(scope *Scope, input tf.Output, reduction_indices tf.Output, optional ...ProdAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Prod",
Input: []tf.Input{
input, reduction_indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// StringSplitAttr is an optional argument to StringSplit.
type StringSplitAttr func(optionalAttr)
@ -21437,163 +21626,6 @@ func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backpr
return op.Output(0)
}
// Given a path to new and old vocabulary files, returns a remapping Tensor of
//
// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
// in the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables
// use in the partitioned variable case, and should generally be set through
// examining partitioning info. The format of the files should be a text file,
// with each line containing a single entity within the vocabulary.
//
// For example, with `new_vocab_file` a text file containing each of the following
// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
// `[0, -1, 2]`.
//
// The op also returns a count of how many entries in the new vocabulary
// were present in the old vocabulary, which is used to calculate the number of
// values to initialize in a weight matrix remapping
//
// This functionality can be used to remap both row vocabularies (typically,
// features) and column vocabularies (typically, classes) from TensorFlow
// checkpoints. Note that the partitioning logic relies on contiguous vocabularies
// corresponding to div-partitioned variables. Moreover, the underlying remapping
// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
// use the corresponding index_table_from_file() as the FeatureColumn framework
// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
//
// Arguments:
// new_vocab_file: Path to the new vocab file.
// old_vocab_file: Path to the old vocab file.
// new_vocab_offset: How many entries into the new vocab file to start reading.
// num_new_vocab: Number of entries in the new vocab file to remap.
//
// Returns A Tensor of length num_new_vocab where the element at index i
// is equal to the old ID that maps to the new ID i. This element is -1 for any
// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64) (remapping tf.Output, num_present tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
opspec := tf.OpSpec{
Type: "GenerateVocabRemapping",
Input: []tf.Input{
new_vocab_file, old_vocab_file,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Computes softsign: `features / (abs(features) + 1)`.
func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Softsign",
Input: []tf.Input{
features,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ResizeBilinearAttr is an optional argument to ResizeBilinear.
type ResizeBilinearAttr func(optionalAttr)
// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
//
// value: If true, rescale input by (new_height - 1) / (height - 1), which
// exactly aligns the 4 corners of images and resized images. If false, rescale
// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
return func(m optionalAttr) {
m["align_corners"] = value
}
}
// Resize `images` to `size` using bilinear interpolation.
//
// Input images can be of different types but output images are always float.
//
// Arguments:
// images: 4-D with shape `[batch, height, width, channels]`.
// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
// new size for the images.
//
// Returns 4-D with shape
// `[batch, new_height, new_width, channels]`.
func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "ResizeBilinear",
Input: []tf.Input{
images, size,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// ProdAttr is an optional argument to Prod.
type ProdAttr func(optionalAttr)
// ProdKeepDims sets the optional keep_dims attribute to value.
//
// value: If true, retain reduced dimensions with length 1.
// If not specified, defaults to false
func ProdKeepDims(value bool) ProdAttr {
return func(m optionalAttr) {
m["keep_dims"] = value
}
}
// Computes the product of elements across dimensions of a tensor.
//
// Reduces `input` along the dimensions given in `reduction_indices`. Unless
// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are
// retained with length 1.
//
// Arguments:
// input: The tensor to reduce.
// reduction_indices: The dimensions to reduce. Must be in the range
// `[-rank(input), rank(input))`.
//
// Returns The reduced tensor.
func Prod(scope *Scope, input tf.Output, reduction_indices tf.Output, optional ...ProdAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "Prod",
Input: []tf.Input{
input, reduction_indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes softsign gradients for a softsign operation.
//
// Arguments:
@ -22842,6 +22874,76 @@ func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
return op.Output(0)
}
// MatrixInverseAttr is an optional argument to MatrixInverse.
type MatrixInverseAttr func(optionalAttr)
// MatrixInverseAdjoint sets the optional adjoint attribute to value.
// If not specified, defaults to false
func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
return func(m optionalAttr) {
m["adjoint"] = value
}
}
// Computes the inverse of one or more square invertible matrices or their
//
// adjoints (conjugate transposes).
//
// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
// form square matrices. The output is a tensor of the same shape as the input
// containing the inverse for all input submatrices `[..., :, :]`.
//
// The op uses LU decomposition with partial pivoting to compute the inverses.
//
// If a matrix is not invertible there is no guarantee what the op does. It
// may detect the condition and raise an exception or it may simply return a
// garbage result.
//
// Arguments:
// input: Shape is `[..., M, M]`.
//
// Returns Shape is `[..., M, M]`.
//
// @compatibility(numpy)
// Equivalent to np.linalg.inv
// @end_compatibility
func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "MatrixInverse",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes the gradient for the sqrt of `x` wrt its input.
//
// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
// is the corresponding input gradient.
func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "SqrtGrad",
Input: []tf.Input{
y, dy,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Inserts a dimension of 1 into a tensor's shape.
//
// Given a tensor `input`, this operation inserts a dimension of 1 at the
@ -27003,73 +27105,3 @@ func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate t
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Computes the gradient for the sqrt of `x` wrt its input.
//
// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
// is the corresponding input gradient.
func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "SqrtGrad",
Input: []tf.Input{
y, dy,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// MatrixInverseAttr is an optional argument to MatrixInverse.
type MatrixInverseAttr func(optionalAttr)
// MatrixInverseAdjoint sets the optional adjoint attribute to value.
// If not specified, defaults to false
func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
return func(m optionalAttr) {
m["adjoint"] = value
}
}
// Computes the inverse of one or more square invertible matrices or their
//
// adjoints (conjugate transposes).
//
// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
// form square matrices. The output is a tensor of the same shape as the input
// containing the inverse for all input submatrices `[..., :, :]`.
//
// The op uses LU decomposition with partial pivoting to compute the inverses.
//
// If a matrix is not invertible there is no guarantee what the op does. It
// may detect the condition and raise an exception or it may simply return a
// garbage result.
//
// Arguments:
// input: Shape is `[..., M, M]`.
//
// Returns Shape is `[..., M, M]`.
//
// @compatibility(numpy)
// Equivalent to np.linalg.inv
// @end_compatibility
func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "MatrixInverse",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}