Go: Update generated wrapper functions for TensorFlow ops.

PiperOrigin-RevId: 381374492
Change-Id: Ie94eb36485d8835b7400759db4d15f0e7fa442d6
This commit is contained in:
A. Unique TensorFlower 2021-06-24 17:48:40 -07:00 committed by TensorFlower Gardener
parent 4cdece0345
commit e8253e1138

View File

@ -23504,29 +23504,6 @@ func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, ou
return op.Output(0)
}
// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
//
// Arguments:
//
// num_threads: Identifies the number of threads to use for the private threadpool.
//
//
func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
Type: "PrivateThreadPoolDataset",
Input: []tf.Input{
input_dataset, num_threads,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a batched matrix tensor with new batched diagonal values.
//
// Given `input` and `diagonal`, this operation returns a tensor with the
@ -28312,6 +28289,200 @@ func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes
return op.Output(0), op.Output(1), op.Output(2)
}
// BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
type BlockLSTMV2Attr func(optionalAttr)
// BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
//
// value: Value to clip the 'cs' value to.
// If not specified, defaults to 0
func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr {
return func(m optionalAttr) {
m["cell_clip"] = value
}
}
// BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
//
// value: Whether to use peephole weights.
// If not specified, defaults to false
func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr {
return func(m optionalAttr) {
m["use_peephole"] = value
}
}
// Computes the LSTM cell forward propagation for all the time steps.
//
// This is equivalent to applying LSTMBlockCell in a loop, like so:
//
// ```python
// for x1 in unpack(x):
// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
// x1, cs_prev, h_prev, w, wci, wcf, wco, b)
// cs_prev = cs1
// h_prev = h1
// i.append(i1)
// cs.append(cs1)
// f.append(f1)
// o.append(o1)
// ci.append(ci1)
// co.append(co1)
// h.append(h1)
// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
//
// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
// this op uses IFCO. So in order for the following snippet to be equivalent
// all gate-related outputs should be reordered.
// ```
//
// Arguments:
// seq_len_max: Maximum time length actually used by this input. Outputs are padded
// with zeros beyond this length.
// x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
// cs_prev: Value of the initial cell state.
// h_prev: Initial output of cell (to be used for peephole).
// w: The weight matrix.
// wci: The weight matrix for input gate peephole connection.
// wcf: The weight matrix for forget gate peephole connection.
// wco: The weight matrix for output gate peephole connection.
// b: The bias vector.
//
// Returns:
// i: The input gate over the whole time sequence.
// cs: The cell state before the tanh over the whole time sequence.
// f: The forget gate over the whole time sequence.
// o: The output gate over the whole time sequence.
// ci: The cell input over the whole time sequence.
// co: The cell after the tanh over the whole time sequence.
// h: The output h vector over the whole time sequence.
func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "BlockLSTMV2",
Input: []tf.Input{
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
}
// Extract `patches` from `images` and put them in the "depth" output dimension.
//
// Arguments:
// images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
// ksizes: The size of the sliding window for each dimension of `images`.
// strides: How far the centers of two consecutive patches are in
// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
// rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
// input stride, specifying how far two consecutive patch samples are in the
// input. Equivalent to extracting patches with
// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
// subsampling them spatially by a factor of `rates`. This is equivalent to
// `rate` in dilated (a.k.a. Atrous) convolutions.
// padding: The type of padding algorithm to use.
//
// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
// ksize_cols * depth]` containing image patches with size
// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
// `out_rows` and `out_cols` are the dimensions of the output patches.
func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
Type: "ExtractImagePatches",
Input: []tf.Input{
images,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Forwards the value of an available tensor from `inputs` to `output`.
//
// `Merge` waits for at least one of the tensors in `inputs` to become available.
// It is usually combined with `Switch` to implement branching.
//
// `Merge` forwards the first tensor to become available to `output`, and sets
// `value_index` to its index in `inputs`.
//
// Arguments:
// inputs: The input tensors, exactly one of which will become available.
//
// Returns:
// output: Will be set to the available input tensor.
// value_index: The index of the chosen input tensor in `inputs`.
func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Merge",
Input: []tf.Input{
tf.OutputList(inputs),
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
type PaddedBatchDatasetV2Attr func(optionalAttr)
// PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
// If not specified, defaults to false
func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr {
return func(m optionalAttr) {
m["parallel_copy"] = value
}
}
// Creates a dataset that batches and pads `batch_size` elements from the input.
//
// Arguments:
//
// batch_size: A scalar representing the number of elements to accumulate in a
// batch.
// padded_shapes: A list of int64 tensors representing the desired padded shapes
// of the corresponding output components. These shapes may be partially
// specified, using `-1` to indicate that a particular dimension should be
// padded to the maximum size of all batch elements.
// padding_values: A list of scalars containing the padding value to use for
// each of the outputs.
// drop_remainder: A scalar representing whether the last batch should be dropped in case its size
// is smaller than desired.
//
func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"output_shapes": output_shapes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "PaddedBatchDatasetV2",
Input: []tf.Input{
input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// CudnnRNNAttr is an optional argument to CudnnRNN.
type CudnnRNNAttr func(optionalAttr)
@ -33442,6 +33613,182 @@ func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths
return op.Output(0)
}
// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
//
// Arguments:
//
// num_threads: Identifies the number of threads to use for the private threadpool.
//
//
func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
Type: "PrivateThreadPoolDataset",
Input: []tf.Input{
input_dataset, num_threads,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns the diagonal part of the tensor.
//
// This operation returns a tensor with the `diagonal` part
// of the `input`. The `diagonal` part is computed as follows:
//
// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
//
// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
//
// For example:
//
// ```
// # 'input' is [[1, 0, 0, 0]
// [0, 2, 0, 0]
// [0, 0, 3, 0]
// [0, 0, 0, 4]]
//
// tf.diag_part(input) ==> [1, 2, 3, 4]
// ```
//
// Arguments:
// input: Rank k tensor where k is even and not zero.
//
// Returns The extracted diagonal.
func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "DiagPart",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
// If not specified, defaults to "lstm"
func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["rnn_mode"] = value
}
}
// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
// If not specified, defaults to "linear_input"
func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["input_mode"] = value
}
}
// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
// If not specified, defaults to "unidirectional"
func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["direction"] = value
}
}
// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["dropout"] = value
}
}
// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
// Retrieves CudnnRNN params in canonical form.
//
// Retrieves a set of weights from the opaque params buffer that can be saved and
// restored in a way compatible with future runs.
//
// Note that the params buffer may not be compatible across different GPUs. So any
// save and restoration should be converted to and from the canonical weights and
// biases.
//
// num_layers: Specifies the number of layers in the RNN model.
// num_units: Specifies the size of the hidden state.
// input_size: Specifies the size of the input state.
// num_params: number of parameter sets for all layers.
// Each layer may contain multiple parameter sets, with each set consisting of
// a weight matrix and a bias vector.
// weights: the canonical form of weights that can be used for saving
// and restoration. They are more likely to be compatible across different
// generations.
// biases: the canonical form of biases that can be used for saving
// and restoration. They are more likely to be compatible across different
// generations.
// rnn_mode: Indicates the type of the RNN model.
// input_mode: Indicate whether there is a linear projection between the input and
// The actual computation before the first layer. 'skip_input' is only allowed
// when input_size == num_units; 'auto_select' implies 'skip_input' when
// input_size == num_units; otherwise, it implies 'linear_input'.
// direction: Indicates whether a bidirectional model will be used.
// dir = (direction == bidirectional) ? 2 : 1
// dropout: dropout probability. When set to 0., dropout is disabled.
// seed: the 1st part of a seed to initialize dropout.
// seed2: the 2nd part of a seed to initialize dropout.
func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_params": num_params}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "CudnnRNNParamsToCanonical",
Input: []tf.Input{
num_layers, num_units, input_size, params,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
var idx int
var err error
if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
scope.UpdateErr("CudnnRNNParamsToCanonical", err)
return
}
if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
scope.UpdateErr("CudnnRNNParamsToCanonical", err)
return
}
return weights, biases
}
// Computes the number of elements in the given queue.
//
// Arguments:
@ -35257,159 +35604,6 @@ func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size
return op.Output(0)
}
// Returns the diagonal part of the tensor.
//
// This operation returns a tensor with the `diagonal` part
// of the `input`. The `diagonal` part is computed as follows:
//
// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
//
// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
//
// For example:
//
// ```
// # 'input' is [[1, 0, 0, 0]
// [0, 2, 0, 0]
// [0, 0, 3, 0]
// [0, 0, 0, 4]]
//
// tf.diag_part(input) ==> [1, 2, 3, 4]
// ```
//
// Arguments:
// input: Rank k tensor where k is even and not zero.
//
// Returns The extracted diagonal.
func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "DiagPart",
Input: []tf.Input{
input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
// If not specified, defaults to "lstm"
func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["rnn_mode"] = value
}
}
// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
// If not specified, defaults to "linear_input"
func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["input_mode"] = value
}
}
// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
// If not specified, defaults to "unidirectional"
func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["direction"] = value
}
}
// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["dropout"] = value
}
}
// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
// If not specified, defaults to 0
func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
// Retrieves CudnnRNN params in canonical form.
//
// Retrieves a set of weights from the opaque params buffer that can be saved and
// restored in a way compatible with future runs.
//
// Note that the params buffer may not be compatible across different GPUs. So any
// save and restoration should be converted to and from the canonical weights and
// biases.
//
// num_layers: Specifies the number of layers in the RNN model.
// num_units: Specifies the size of the hidden state.
// input_size: Specifies the size of the input state.
// num_params: number of parameter sets for all layers.
// Each layer may contain multiple parameter sets, with each set consisting of
// a weight matrix and a bias vector.
// weights: the canonical form of weights that can be used for saving
// and restoration. They are more likely to be compatible across different
// generations.
// biases: the canonical form of biases that can be used for saving
// and restoration. They are more likely to be compatible across different
// generations.
// rnn_mode: Indicates the type of the RNN model.
// input_mode: Indicate whether there is a linear projection between the input and
// The actual computation before the first layer. 'skip_input' is only allowed
// when input_size == num_units; 'auto_select' implies 'skip_input' when
// input_size == num_units; otherwise, it implies 'linear_input'.
// direction: Indicates whether a bidirectional model will be used.
// dir = (direction == bidirectional) ? 2 : 1
// dropout: dropout probability. When set to 0., dropout is disabled.
// seed: the 1st part of a seed to initialize dropout.
// seed2: the 2nd part of a seed to initialize dropout.
func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_params": num_params}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "CudnnRNNParamsToCanonical",
Input: []tf.Input{
num_layers, num_units, input_size, params,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
var idx int
var err error
if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
scope.UpdateErr("CudnnRNNParamsToCanonical", err)
return
}
if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
scope.UpdateErr("CudnnRNNParamsToCanonical", err)
return
}
return weights, biases
}
// Creates a sequence of numbers.
//
// This operation creates a sequence of numbers that begins at `start` and
@ -36438,200 +36632,6 @@ func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV
return op.Output(0)
}
// BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
type BlockLSTMV2Attr func(optionalAttr)
// BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
//
// value: Value to clip the 'cs' value to.
// If not specified, defaults to 0
func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr {
return func(m optionalAttr) {
m["cell_clip"] = value
}
}
// BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
//
// value: Whether to use peephole weights.
// If not specified, defaults to false
func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr {
return func(m optionalAttr) {
m["use_peephole"] = value
}
}
// Computes the LSTM cell forward propagation for all the time steps.
//
// This is equivalent to applying LSTMBlockCell in a loop, like so:
//
// ```python
// for x1 in unpack(x):
// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
// x1, cs_prev, h_prev, w, wci, wcf, wco, b)
// cs_prev = cs1
// h_prev = h1
// i.append(i1)
// cs.append(cs1)
// f.append(f1)
// o.append(o1)
// ci.append(ci1)
// co.append(co1)
// h.append(h1)
// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
//
// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
// this op uses IFCO. So in order for the following snippet to be equivalent
// all gate-related outputs should be reordered.
// ```
//
// Arguments:
// seq_len_max: Maximum time length actually used by this input. Outputs are padded
// with zeros beyond this length.
// x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
// cs_prev: Value of the initial cell state.
// h_prev: Initial output of cell (to be used for peephole).
// w: The weight matrix.
// wci: The weight matrix for input gate peephole connection.
// wcf: The weight matrix for forget gate peephole connection.
// wco: The weight matrix for output gate peephole connection.
// b: The bias vector.
//
// Returns:
// i: The input gate over the whole time sequence.
// cs: The cell state before the tanh over the whole time sequence.
// f: The forget gate over the whole time sequence.
// o: The output gate over the whole time sequence.
// ci: The cell input over the whole time sequence.
// co: The cell after the tanh over the whole time sequence.
// h: The output h vector over the whole time sequence.
func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "BlockLSTMV2",
Input: []tf.Input{
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
}
// PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
type PaddedBatchDatasetV2Attr func(optionalAttr)
// PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
// If not specified, defaults to false
func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr {
return func(m optionalAttr) {
m["parallel_copy"] = value
}
}
// Creates a dataset that batches and pads `batch_size` elements from the input.
//
// Arguments:
//
// batch_size: A scalar representing the number of elements to accumulate in a
// batch.
// padded_shapes: A list of int64 tensors representing the desired padded shapes
// of the corresponding output components. These shapes may be partially
// specified, using `-1` to indicate that a particular dimension should be
// padded to the maximum size of all batch elements.
// padding_values: A list of scalars containing the padding value to use for
// each of the outputs.
// drop_remainder: A scalar representing whether the last batch should be dropped in case its size
// is smaller than desired.
//
func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"output_shapes": output_shapes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "PaddedBatchDatasetV2",
Input: []tf.Input{
input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Extract `patches` from `images` and put them in the "depth" output dimension.
//
// Arguments:
// images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
// ksizes: The size of the sliding window for each dimension of `images`.
// strides: How far the centers of two consecutive patches are in
// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
// rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
// input stride, specifying how far two consecutive patch samples are in the
// input. Equivalent to extracting patches with
// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
// subsampling them spatially by a factor of `rates`. This is equivalent to
// `rate` in dilated (a.k.a. Atrous) convolutions.
// padding: The type of padding algorithm to use.
//
// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
// ksize_cols * depth]` containing image patches with size
// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
// `out_rows` and `out_cols` are the dimensions of the output patches.
func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
Type: "ExtractImagePatches",
Input: []tf.Input{
images,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Forwards the value of an available tensor from `inputs` to `output`.
//
// `Merge` waits for at least one of the tensors in `inputs` to become available.
// It is usually combined with `Switch` to implement branching.
//
// `Merge` forwards the first tensor to become available to `output`, and sets
// `value_index` to its index in `inputs`.
//
// Arguments:
// inputs: The input tensors, exactly one of which will become available.
//
// Returns:
// output: Will be set to the available input tensor.
// value_index: The index of the chosen input tensor in `inputs`.
func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Merge",
Input: []tf.Input{
tf.OutputList(inputs),
},
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1)
}
// Strip leading and trailing whitespaces from the Tensor.
//
// Examples: