Document torch.quantile interpolation kwarg (#70637)

Summary:
clone of https://github.com/pytorch/pytorch/pull/59397

This PR documents the interpolation kwarg parameter added in https://github.com/pytorch/pytorch/issues/49267. Now that the forward compatibility period is over, we can expose this parameter.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/70637

Reviewed By: jbschlosser

Differential Revision: D33411707

Pulled By: anjali411

fbshipit-source-id: f5f2d0a6739b3a855bbdf58fc671ac2f0342ce69
This commit is contained in:
Heitor Schueroff 2022-01-05 10:58:23 -08:00 committed by Facebook GitHub Bot
parent 616afcf981
commit 34c49d3d3b
7 changed files with 54 additions and 142 deletions

View File

@ -521,14 +521,10 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) {
KERNEL_CPU(ADD_NS(prod), "prod", Tensor(const Tensor &, c10::optional<at::ScalarType>), fp32)
KERNEL_CPU(ADD_NS(prod), "prod.dim_int", Tensor(const Tensor &, int64_t, bool, c10::optional<at::ScalarType>), fp32)
KERNEL_CPU(ADD_NS(prod), "prod.dim_Dimname", Tensor(const Tensor &, at::Dimname, bool, c10::optional<at::ScalarType>), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile.scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile.new", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile.new_scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile.scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile.new", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile.new_scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(quantile), "quantile.scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(nanquantile), "nanquantile.scalar", Tensor(const Tensor &, double, c10::optional<int64_t>, bool, c10::string_view), fp32)
KERNEL_CPU(ADD_NS(stft), "stft", Tensor(const Tensor &, int64_t, c10::optional<int64_t>, c10::optional<int64_t>, const c10::optional<Tensor> &, bool, c10::optional<bool>, c10::optional<bool>), fp32)
KERNEL_CPU(ADD_NS(cdist), "cdist", Tensor(const Tensor &, const Tensor &, double, c10::optional<int64_t>), fp32)
KERNEL_CPU(ADD_NS(cross), "cross", Tensor(const Tensor &, const Tensor &, c10::optional<int64_t>), fp32)

View File

@ -515,7 +515,6 @@ Tensor median_impl(const Tensor& self, bool ignore_nan) {
} // namespace
Tensor& quantile_out(
const Tensor& self,
const Tensor& q,
@ -527,8 +526,7 @@ Tensor& quantile_out(
out,
self,
q,
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
get_quantile_interpolation_mode(interpolation),
/*ignore_nan=*/false);
@ -547,8 +545,7 @@ Tensor& quantile_out(
return at::native::quantile_out(
self,
at::scalar_tensor(q, self.options()),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
interpolation,
out);
@ -565,8 +562,7 @@ Tensor quantile(
out,
self,
q,
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
get_quantile_interpolation_mode(interpolation),
/*ignore_nan=*/false);
@ -582,8 +578,7 @@ Tensor quantile(
TORCH_CHECK(
q >= 0 && q <= 1, "quantile() q must be in the range [0, 1] but got ", q);
return at::native::quantile(
// NOLINTNEXTLINE(performance-move-const-arg)
self, at::scalar_tensor(q, self.options()), std::move(dim), keepdim, interpolation);
self, at::scalar_tensor(q, self.options()), dim, keepdim, interpolation);
}
Tensor& nanquantile_out(
@ -597,8 +592,7 @@ Tensor& nanquantile_out(
out,
self,
q,
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
get_quantile_interpolation_mode(interpolation),
/*ignore_nan=*/true);
@ -617,8 +611,7 @@ Tensor& nanquantile_out(
return at::native::nanquantile_out(
self,
at::scalar_tensor(q, self.options()),
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
interpolation,
out);
@ -635,8 +628,7 @@ Tensor nanquantile(
out,
self,
q,
// NOLINTNEXTLINE(performance-move-const-arg)
std::move(dim),
dim,
keepdim,
get_quantile_interpolation_mode(interpolation),
/*ignore_nan=*/true);
@ -652,84 +644,7 @@ Tensor nanquantile(
TORCH_CHECK(
q >= 0 && q <= 1, "quantile() q must be in the range [0, 1] but got ", q);
return at::native::nanquantile(
// NOLINTNEXTLINE(performance-move-const-arg)
self, at::scalar_tensor(q, self.options()), std::move(dim), keepdim, interpolation);
}
Tensor& quantile_out(
const Tensor& self,
const Tensor& q,
optional<int64_t> dim,
bool keepdim,
Tensor& out) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::quantile_out(self, q, std::move(dim), keepdim, "linear", out);
}
Tensor& quantile_out(
const Tensor& self,
double q,
optional<int64_t> dim,
bool keepdim,
Tensor& out) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::quantile_out(self, q, std::move(dim), keepdim, "linear", out);
}
Tensor quantile(
const Tensor& self,
const Tensor& q,
optional<int64_t> dim,
bool keepdim) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::quantile(self, q, std::move(dim), keepdim, "linear");
}
Tensor quantile(
const Tensor& self,
double q,
optional<int64_t> dim,
bool keepdim) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::quantile(self, q, std::move(dim), keepdim, "linear");
}
Tensor& nanquantile_out(
const Tensor& self,
const Tensor& q,
optional<int64_t> dim,
bool keepdim,
Tensor& out) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::nanquantile_out(self, q, std::move(dim), keepdim, "linear", out);
}
Tensor& nanquantile_out(
const Tensor& self,
double q,
optional<int64_t> dim,
bool keepdim,
Tensor& out) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::nanquantile_out(self, q, std::move(dim), keepdim, "linear", out);
}
Tensor nanquantile(
const Tensor& self,
const Tensor& q,
optional<int64_t> dim,
bool keepdim) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::nanquantile(self, q, std::move(dim), keepdim, "linear");
}
Tensor nanquantile(
const Tensor& self,
double q,
optional<int64_t> dim,
bool keepdim) {
// NOLINTNEXTLINE(performance-move-const-arg)
return at::native::nanquantile(self, q, std::move(dim), keepdim, "linear");
self, at::scalar_tensor(q, self.options()), dim, keepdim, interpolation);
}
std::tuple<Tensor&, Tensor&> kthvalue_out_cpu(

View File

@ -7564,48 +7564,25 @@
device_check: NoCheck # TensorIterator
variants: method, function
# The following quantile signatures are DEPRECATED in favor of the new ones with the interpolation kwarg.
- func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor
- func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor
- func: quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor
- func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
- func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
- func: nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor
- func: nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor
variants: method, function
# To keep backward and forward compatibility, and to avoid ambiguity with the original signatures, dim, keepdim and interpolation
# parameters are required for now. Once the deprecated signatures are removed they will be made optional.
- func: quantile.new_scalar_out(Tensor self, float q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!)
- func: quantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> Tensor
variants: method, function
- func: quantile.new_out(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!)
- func: quantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> Tensor
variants: method, function
- func: nanquantile.new_scalar_out(Tensor self, float q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!)
- func: nanquantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> Tensor
variants: method, function
- func: nanquantile.new_out(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation, Tensor(a!) out) -> Tensor(a!)
- func: nanquantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> Tensor
variants: method, function
- func: nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)
- func: sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
device_check: NoCheck # TensorIterator

View File

@ -109,6 +109,8 @@ ALLOW_LIST = [
("aten::_inverse_helper", datetime.date(2021, 12, 31)),
("aten::softplus_backward", datetime.date(2022, 1, 31)),
("aten::softplus_backward.grad_input", datetime.date(2022, 1, 31)),
("aten::quantile", datetime.date(2022, 9, 30)),
("aten::nanquantile", datetime.date(2022, 9, 30)),
]
ALLOW_LIST_COMPILED = [

View File

@ -2879,13 +2879,13 @@ Returns the quantization scheme of a given QTensor.
""")
add_docstr_all('quantile', r"""
quantile(q, dim=None, keepdim=False) -> Tensor
quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
See :func:`torch.quantile`
""")
add_docstr_all('nanquantile', r"""
nanquantile(q, dim=None, keepdim=False) -> Tensor
nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
See :func:`torch.nanquantile`
""")

View File

@ -6306,16 +6306,20 @@ Example::
""".format(**single_dim_common))
add_docstr(torch.quantile, r"""
quantile(input, q, dim=None, keepdim=False, *, out=None) -> Tensor
quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
Computes the q-th quantiles of each row of the :attr:`input` tensor
along the dimension :attr:`dim`.
Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
indices ``i`` and ``j`` in the sorted order, result is computed using linear interpolation as follows:
indices ``i`` and ``j`` in the sorted order, result is computed according to the given
:attr:`interpolation` method as follows:
``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``lower``: ``a``.
- ``higher``: ``b``.
- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- ``midpoint``: ``(a + b) / 2``.
If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
@ -6330,6 +6334,9 @@ Args:
{keepdim}
Keyword arguments:
interpolation (string): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::
@ -6353,10 +6360,22 @@ Example::
>>> a = torch.arange(4.)
>>> a
tensor([0., 1., 2., 3.])
>>> torch.quantile(a, 0.6, interpolation='linear')
tensor(1.8000)
>>> torch.quantile(a, 0.6, interpolation='lower')
tensor(1.)
>>> torch.quantile(a, 0.6, interpolation='higher')
tensor(2.)
>>> torch.quantile(a, 0.6, interpolation='midpoint')
tensor(1.5000)
>>> torch.quantile(a, 0.6, interpolation='nearest')
tensor(2.)
>>> torch.quantile(a, 0.4, interpolation='nearest')
tensor(1.)
""".format(**single_dim_common))
add_docstr(torch.nanquantile, r"""
nanquantile(input, q, dim=None, keepdim=False, *, out=None) -> Tensor
nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
@ -6370,6 +6389,9 @@ Args:
{keepdim}
Keyword arguments:
interpolation (string): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::

View File

@ -843,8 +843,8 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.q_zero_point: lambda input: -1,
torch.qr: lambda input, some=True, out=None: -1,
torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
torch.quantize_per_tensor_dynamic: lambda input, dtype, reduce_range: -1,