Revert "[BE][7/16] fix typos in torch/ (torch/csrc/) (#156317)"

This reverts commit ee72815f11.

Reverted https://github.com/pytorch/pytorch/pull/156317 on behalf of https://github.com/atalman due to export/test_torchbind.py::TestCompileTorchbind::test_compile_error_on_input_aliasing_contents_backend_aot_eager [GH job link](https://github.com/pytorch/pytorch/actions/runs/15804799771/job/44548489912) [HUD commit link](c95f7fa874) ([comment](https://github.com/pytorch/pytorch/pull/156313#issuecomment-2994171213))
This commit is contained in:
PyTorch MergeBot 2025-06-22 12:31:56 +00:00
parent 1d3bca40ed
commit 035a68d25a
34 changed files with 56 additions and 61 deletions

View File

@ -1177,6 +1177,7 @@ exclude_patterns = [
'torch/distributed/tensor/**', 'torch/distributed/tensor/**',
'torch/[j-o]*/**', 'torch/[j-o]*/**',
'torch/utils/**', 'torch/utils/**',
'torch/csrc/**',
'torch/csrc/jit/**', 'torch/csrc/jit/**',
'torch/csrc/jit/[a-o]*/**', 'torch/csrc/jit/[a-o]*/**',
'torch/csrc/[a-i]*/**', 'torch/csrc/[a-i]*/**',

View File

@ -12,12 +12,8 @@ fro
froms froms
hsa hsa
nd nd
nin
nout
NowNs
optins optins
OT OT
overrideable
ptd ptd
rebuild rebuild
rebuilt rebuilt

View File

@ -329,7 +329,7 @@ struct PyWarningHandler {
/** Call if an exception has been thrown /** Call if an exception has been thrown
* Necessary to determine if it is safe to throw from the destructor since * Necessary to determine if it is safe to throw from the desctructor since
* std::uncaught_exception is buggy on some platforms and generally * std::uncaught_exception is buggy on some platforms and generally
* unreliable across dynamic library calls. * unreliable across dynamic library calls.
*/ */

View File

@ -101,7 +101,7 @@ PyObject* THPStorage_Wrap(c10::Storage storage) {
// If the StorageImpl has a PyObject that is managed by a different // If the StorageImpl has a PyObject that is managed by a different
// interpreter than the current one, create a new StorageImpl that points to // interpreter than the current one, create a new StorageImpl that points to
// the same data and then create the Python storage from that. // the same data and then create the Python storage from that.
// NOTE: This is only supposed to happen in MultiPy // codespell:ignore // NOTE: This is only supposed to happen in MultiPy
if (pyobj_slot->has_pyobj_nonhermetic() && if (pyobj_slot->has_pyobj_nonhermetic() &&
!pyobj_slot->check_interpreter(getPyInterpreter())) { !pyobj_slot->check_interpreter(getPyInterpreter())) {
return THPStorage_NewWithStorage( return THPStorage_NewWithStorage(

View File

@ -20,7 +20,7 @@ using size_t = std::size_t;
class TORCH_API hash_t : public c10::uint128 { class TORCH_API hash_t : public c10::uint128 {
public: public:
// Switch from typedef hash_t = uint128 to provide explicit casters // Swich from typedef hash_t = uint128 to provide explicit casters
hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {} hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {} hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {} hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
@ -69,7 +69,7 @@ hash_t Hash(const T& value) {
// breaks falling through to the templated arithmetic types above // breaks falling through to the templated arithmetic types above
hash_t TORCH_API Hash(const std::vector<bool>& value); hash_t TORCH_API Hash(const std::vector<bool>& value);
// Specialized implementations for proprietary types // Specialiazed implementations for proprietary types
static inline hash_t Hash(const c10::ScalarType& value) { static inline hash_t Hash(const c10::ScalarType& value) {
return DataHash(&value, sizeof(value)); return DataHash(&value, sizeof(value));
} }

View File

@ -1042,7 +1042,7 @@ std::vector<BackendDataPtr> LazyGraphExecutor::GatherTensorsData(
void LazyGraphExecutor::TensorCollectionBarrier(SyncTensorCollection* coll) { void LazyGraphExecutor::TensorCollectionBarrier(SyncTensorCollection* coll) {
if (coll) { if (coll) {
static const std::string invalid_device( static const std::string invalid_device(
"Unknown0"); /* Temp solution to identify unassigned devices */ "Unknown0"); /* Temp solution to idetify unassigned devices */
if (coll->device.toString() == invalid_device || !coll->unlocker.empty()) { if (coll->device.toString() == invalid_device || !coll->unlocker.empty()) {
return; return;
} }

View File

@ -232,7 +232,7 @@ TORCH_API std::string CreateMetricReport(
const std::vector<std::string>& metric_names); const std::vector<std::string>& metric_names);
// Returns the currently registered metric names. Note that the list can grow // Returns the currently registered metric names. Note that the list can grow
// since metrics are usually function initialized (they are static function // since metrics are usually function intialized (they are static function
// variables). // variables).
TORCH_API std::vector<std::string> GetMetricNames(); TORCH_API std::vector<std::string> GetMetricNames();
@ -241,7 +241,7 @@ TORCH_API std::vector<std::string> GetMetricNames();
TORCH_API MetricData* GetMetric(const std::string& name); TORCH_API MetricData* GetMetric(const std::string& name);
// Returns the currently registered counter names. Note that the list can grow // Returns the currently registered counter names. Note that the list can grow
// since counters are usually function initialized (they are static function // since counters are usually function intialized (they are static function
// variables). // variables).
TORCH_API std::vector<std::string> GetCounterNames(); TORCH_API std::vector<std::string> GetCounterNames();

View File

@ -60,9 +60,9 @@ class TORCH_API Shape {
// Sizes are the upper bound sizes for a tensor, used by XLA. // Sizes are the upper bound sizes for a tensor, used by XLA.
std::vector<int64_t> sizes_; std::vector<int64_t> sizes_;
// Stores which dimensions are symbolic // Stores which dimmensions are symbolic
// If nullopt, either it hasn't been initialized or the symbolic // If nullopt, either it hasn't been initialized or the symbolic
// dimensions are not calculable // dimmensions are not calculatable
std::optional<std::vector<bool>> is_symbolic_ = std::nullopt; std::optional<std::vector<bool>> is_symbolic_ = std::nullopt;
}; };

View File

@ -73,7 +73,7 @@
namespace torch::lazy { namespace torch::lazy {
// Copied from ATen/native/utils/ParamUtils.h, which apparently I can't include // Copied from ATen/native/utils/ParamUtils.h, which aparently I can't include
// from here? // from here?
static std::vector<int64_t> expand_param_if_needed( static std::vector<int64_t> expand_param_if_needed(
at::IntArrayRef list_param, at::IntArrayRef list_param,
@ -281,7 +281,7 @@ std::vector<Shape> compute_shape_convolution(
TORCH_CHECK(dim > 0, "weight should have at least three dimensions"); TORCH_CHECK(dim > 0, "weight should have at least three dimensions");
// at::convolution performs parameter expansion before running kernels on // at::convolution performs parameter expansion before running kernels on
// expanded parameters we must do the same. Shape formulae access different // expanded parameters we must do the same. Shape formulae access differnent
// dimensions of e.g. output_padding, but output_padding may be passed in as a // dimensions of e.g. output_padding, but output_padding may be passed in as a
// scalar. Sadly, accessing output_padding[1] in this case gives incorrect // scalar. Sadly, accessing output_padding[1] in this case gives incorrect
// results rather than indexing error // results rather than indexing error

View File

@ -252,7 +252,7 @@ at::Tensor LazyTensor::ToTensor(bool detached) {
tensor = *tensor_data; tensor = *tensor_data;
if (detached) { if (detached) {
if (data()->ir_value || data()->handle != nullptr) { if (data()->ir_value || data()->handle != nullptr) {
// If we have other authoritative sources, just drop our reference and // If we have other authoritive sources, just drop our reference and
// transfer it to the caller. // transfer it to the caller.
data()->tensor_data = std::nullopt; data()->tensor_data = std::nullopt;
} else { } else {

View File

@ -231,7 +231,7 @@ TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
// lazy tensors, then you should think of that function as an "entrypoint" to // lazy tensors, then you should think of that function as an "entrypoint" to
// functionalization, and use functionalize_output=true Examples include: // functionalization, and use functionalize_output=true Examples include:
// - factory functions (the LTC kernel for at::empty) // - factory functions (the LTC kernel for at::empty)
// - CPU -> Lazy device conversions (the LTC kernel for at::to_device) // - CPU -> Lazy device converions (the LTC kernel for at::to_device)
// //
// Case 2: lazy -> lazy // Case 2: lazy -> lazy
// If you're implementing a function that takes in lazy tensors and returns // If you're implementing a function that takes in lazy tensors and returns

View File

@ -44,7 +44,7 @@ static std::ptrdiff_t GetTensorId(const at::Tensor& tensor) {
static std::string GetTensorsDump( static std::string GetTensorsDump(
const std::vector<at::Tensor>& tensors, const std::vector<at::Tensor>& tensors,
const std::function<std::string(c10::ArrayRef<const torch::lazy::Node*>)>& const std::function<std::string(c10::ArrayRef<const torch::lazy::Node*>)>&
converter) { coverter) {
std::vector<const torch::lazy::Node*> nodes; std::vector<const torch::lazy::Node*> nodes;
std::vector<torch::lazy::Value> values; std::vector<torch::lazy::Value> values;
for (auto& tensor : tensors) { for (auto& tensor : tensors) {
@ -54,7 +54,7 @@ static std::string GetTensorsDump(
values.push_back(lazy_tensor->GetIrValue()); values.push_back(lazy_tensor->GetIrValue());
nodes.push_back(values.back().node.get()); nodes.push_back(values.back().node.get());
} }
return converter(nodes); return coverter(nodes);
} }
static std::vector<torch::lazy::LazyTensorPtr> GetLtcTensors( static std::vector<torch::lazy::LazyTensorPtr> GetLtcTensors(
@ -146,18 +146,18 @@ void initLazyBindings(PyObject* module) {
lazy.def( lazy.def(
"_get_tensors_text", "_get_tensors_text",
[](const std::vector<at::Tensor>& tensors) -> std::string { [](const std::vector<at::Tensor>& tensors) -> std::string {
auto converter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) { auto coverter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
return torch::lazy::DumpUtil::ToText(nodes); return torch::lazy::DumpUtil::ToText(nodes);
}; };
return GetTensorsDump(tensors, converter); return GetTensorsDump(tensors, coverter);
}); });
lazy.def( lazy.def(
"_get_tensors_dot", "_get_tensors_dot",
[](const std::vector<at::Tensor>& tensors) -> std::string { [](const std::vector<at::Tensor>& tensors) -> std::string {
auto converter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) { auto coverter = [](c10::ArrayRef<const torch::lazy::Node*> nodes) {
return torch::lazy::DumpUtil::ToDot(nodes); return torch::lazy::DumpUtil::ToDot(nodes);
}; };
return GetTensorsDump(tensors, converter); return GetTensorsDump(tensors, coverter);
}); });
lazy.def( lazy.def(
"_get_tensors_backend", "_get_tensors_backend",
@ -325,11 +325,10 @@ void initLazyBindings(PyObject* module) {
#endif // !(defined(FBCODE_CAFFE2) || defined(OVRSOURCE)) #endif // !(defined(FBCODE_CAFFE2) || defined(OVRSOURCE))
}); });
// GetPythonFramesFunction() has not ever worked with // GetPythonFramesFunction() has not ever worked with torchdeploy/multipy
// torchdeploy/multipy possibly because // codespell:ignore multipy // possibly becuase GetPythonFrames resolves to external cpython rather
// GetPythonFrames resolves to external cpython rather than embedded cpython. // than embedded cpython. So far this problem has only been observed
// So far this problem has only been observed internally, so we will just // internally, so we will just block it off there.
// block it off there.
#if !(defined(USE_DEPLOY)) #if !(defined(USE_DEPLOY))

View File

@ -30,7 +30,7 @@ NodePtr DeviceData::Create(const std::shared_ptr<BackendData>& data) {
// ReuseOrMakeNode may return a reused node which has the same shape, // ReuseOrMakeNode may return a reused node which has the same shape,
// however, we need to replace the old data_ with the new one. // however, we need to replace the old data_ with the new one.
// Ditching the old data_ is safe because tracing is done iteration // Ditching the old data_ is safe because tracing is done iteration
// by iteration, and after we launch the async device execution for the // by iteration, and after we lauch the async device execution for the
// previous iteration, data_ in DeviceData nodes are not needed anymore. // previous iteration, data_ in DeviceData nodes are not needed anymore.
DeviceData* device_data = static_cast<DeviceData*>(node.get()); DeviceData* device_data = static_cast<DeviceData*>(node.get());
device_data->SetData(data); device_data->SetData(data);

View File

@ -5,8 +5,8 @@
namespace torch::lazy { namespace torch::lazy {
// This IR was copied from code-generated output, but the entire _to_copy // This IR was copied from code-generated output, but the entire _to_copy
// operator cannot be trivially code generated since it is only desirable to // operator cannot be trivially code genereated since it is only desirable to
// capture IR for certain permutations of _to_copy (e.g. dtype), and for the // capture IR for certain permutaions of _to_copy (e.g. dtype), and for the
// others it is difficult to even invoke the aten/eager fallback necessitating // others it is difficult to even invoke the aten/eager fallback necessitating
// directly implementing the right to(device) behavior // directly implementing the right to(device) behavior
class ToCopy : public torch::lazy::TsNode { class ToCopy : public torch::lazy::TsNode {

View File

@ -271,7 +271,7 @@ void ts_eager_fallback(
// the temporary eager output tensor that we created. // the temporary eager output tensor that we created.
// //
// Note [Eager Fallback Does Not Handle View Operators] // Note [Eager Fallback Does Not Handle View Operators]
// Also note that we are incapable of handling immutable aliases properly. // Also note that we are incapable of handling immutable alises properly.
// Why? // Why?
// Schemas with an immutable alias'd tensor outputs correspond to view // Schemas with an immutable alias'd tensor outputs correspond to view
// operators. For example, the `view_as` schema from native_functions.yaml: // operators. For example, the `view_as` schema from native_functions.yaml:
@ -340,7 +340,7 @@ void ts_eager_fallback(
// We should never hit this for a view op, // We should never hit this for a view op,
// because LazyTensor should provide a lowering for the // because LazyTensor should provide a lowering for the
// corresponding view_copy operator. The functionalization pass will // corresponding view_copy operator. The functionalization pass will
// take care of calling the view_copy operator instead of the view. // take care of calling the view_copy operator intead of the view.
TORCH_CHECK( TORCH_CHECK(
false, false,
"The operator ", "The operator ",

View File

@ -398,7 +398,7 @@ at::Tensor LazyNativeFunctions::lift_fresh(const at::Tensor& tensor) {
// All of the below ops correspond to CompositeExplicitAutograd kernels from // All of the below ops correspond to CompositeExplicitAutograd kernels from
// core that call into view operators internally. These are all composite ops // core that call into view operators internally. These are all composite ops
// that LTC can technically reuse / get for free, but we need to // that LTC can technically re-use / get for free, but we need to
// "functionalize" them to remove the view ops before we can use them. // "functionalize" them to remove the view ops before we can use them.
at::Tensor LazyNativeFunctions::block_diag(at::TensorList tensors) { at::Tensor LazyNativeFunctions::block_diag(at::TensorList tensors) {
return at::functionalization::functionalize_aten_op<ATEN_OP( return at::functionalization::functionalize_aten_op<ATEN_OP(
@ -529,7 +529,7 @@ at::Tensor LazyNativeFunctions::slice_backward_symint(
std::move(step)); std::move(step));
} }
// reuse the composite kernel from core, that way we don't need to provide a // re-use the composite kernel from core, that way we don't need to provide a
// backwards formula for native_group_norm // backwards formula for native_group_norm
std::tuple<Tensor, Tensor, Tensor> LazyNativeFunctions::native_group_norm( std::tuple<Tensor, Tensor, Tensor> LazyNativeFunctions::native_group_norm(
const at::Tensor& input, const at::Tensor& input,

View File

@ -78,7 +78,7 @@ const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list");
// Note: shape is undefined for TensorList. We assert in some places that // Note: shape is undefined for TensorList. We assert in some places that
// #shapes matches #outputs and this stems from // #shapes matches #outputs and this stems from
// the fact that currently all IR nodes represent tensors (there is no // the fact that currently all IR nodes represent tensors (there is no
// type system for this IR). Because of this, TensorList is a bit of a // type system for this IR). Becuase of this, TensorList is a bit of a
// hack. // hack.
// //
// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and // TODO(whc) once Shape() API is moved to Node base, also make it virtual, and

View File

@ -218,7 +218,7 @@ If we don't stop the trace after `optimizer_step` it will include two or more it
Another important point is that after `mark_step()` we actually continue tracing the next iteration! And... start executing the previous one at the same time! Really, nothing stops us from tracing the next iteration ...and then the one after next until we hit `if batch_idx % log_interval == 0:` where Another important point is that after `mark_step()` we actually continue tracing the next iteration! And... start executing the previous one at the same time! Really, nothing stops us from tracing the next iteration ...and then the one after next until we hit `if batch_idx % log_interval == 0:` where
we actually need to wait for execution to catch up, so we can print out `loss`. Remember to avoid accessing intermediate results too often if you would like to extract the maximum benefit out of Lazy Tensor. we actually need to wait for execution to catch up, so we can print out `loss`. Remember to avoid accessing intermediate results too often if you would like to extract the maximum benefit out of Lazy Tensor.
Since every iteration looks exactly like the one before it, the TS backend will be reusing the same TS compilation. Since every iteration looks exactly like the one before it, the TS backend will be re-using the same TS compilation.
Alright, let's run it now! Alright, let's run it now!

View File

@ -443,7 +443,7 @@ void initModule(PyObject* module) {
} }
TORCH_CHECK( TORCH_CHECK(
threads.has_value() && threads->size() < 4, threads.has_value() && threads->size() < 4,
"Number of threads is undefined or has wrong dimension"); "Number of threads is undefined or has wrong dimention");
TORCH_CHECK( TORCH_CHECK(
!group_size.has_value() || !group_size.has_value() ||
threads->size() == group_size->size()); threads->size() == group_size->size());

View File

@ -58,7 +58,7 @@ struct RawTensors {
void calculateUniqueTensorIDs( void calculateUniqueTensorIDs(
std::vector<std::shared_ptr<Result>>& sorted_results) { std::vector<std::shared_ptr<Result>>& sorted_results) {
// This task is equivalent to https://leetcode.com/problems/number-of-islands/ // This task is equivilent to https://leetcode.com/problems/number-of-islands/
// We first cluster events with a greedy index assignment, and then merge // We first cluster events with a greedy index assignment, and then merge
// groups that overlap. // groups that overlap.
std::vector<RawTensorInfo> tensors; std::vector<RawTensorInfo> tensors;

View File

@ -35,7 +35,7 @@ using AllocationID = strong::type<
strong::regular, strong::regular,
strong::hashable>; strong::hashable>;
// We use a Tensor's TensorImpl address and StorageImpl data start to build the // We use a Tensor's TensorImpl adress and StorageImpl data start to build the
// data flow graph. We do not hold an owning reference so we wrap them in strong // data flow graph. We do not hold an owning reference so we wrap them in strong
// types to prevent direct access. // types to prevent direct access.
using TensorImplAddress = strong::type< using TensorImplAddress = strong::type<

View File

@ -13,7 +13,7 @@ using perf_counters_t = std::vector<uint64_t>;
/* Standard list of performance events independent of hardware or backend */ /* Standard list of performance events independent of hardware or backend */
constexpr std::array<const char*, 2> ProfilerPerfEvents = { constexpr std::array<const char*, 2> ProfilerPerfEvents = {
/* /*
* Number of Processing Element (PE) cycles between two points of interest * Number of Processing Elelement (PE) cycles between two points of interest
* in time. This should correlate positively with wall-time. Measured in * in time. This should correlate positively with wall-time. Measured in
* uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs * uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs
* participating (i.e. threadpool). * participating (i.e. threadpool).

View File

@ -206,7 +206,7 @@ struct TORCH_API ExecutionTraceObserver { // NOLINT
// All tensors and operators have an unique id assigned. Increment id for each // All tensors and operators have an unique id assigned. Increment id for each
// new tensor or operator node. // new tensor or operator node.
// 0 -> uninitialized // 0 -> unintialized
// 1 -> root ID // 1 -> root ID
// 2 ... -> regular node ID // 2 ... -> regular node ID
std::atomic<ID> id_{2}; std::atomic<ID> id_{2};

View File

@ -35,7 +35,7 @@ struct Section {
/// Memory maps a file into the address space read-only, and manages the /// Memory maps a file into the address space read-only, and manages the
/// lifetime of the mapping. Here are a few use cases: /// lifetime of the mapping. Here are a few use cases:
/// 1. Used in the loader to read in initial image, and to inspect /// 1. Used in the loader to read in initial image, and to inspect
// ELF files for dependencies before calling dlopen. // ELF files for dependencies before callling dlopen.
/// ///
/// 2. Used in unity to load the elf file. /// 2. Used in unity to load the elf file.
struct MemFile { struct MemFile {

View File

@ -9,7 +9,7 @@ namespace torch::unwind {
template <typename T> template <typename T>
struct RangeTable { struct RangeTable {
RangeTable() { RangeTable() {
// guarantee that lower_bound[-1] is always valid // guarentee that lower_bound[-1] is always valid
addresses_.push_back(0); addresses_.push_back(0);
payloads_.emplace_back(std::nullopt); payloads_.emplace_back(std::nullopt);
} }

View File

@ -254,7 +254,7 @@ namespace torch::gdb {
// Return an human-readable representation of the given Tensor. The resulting // Return an human-readable representation of the given Tensor. The resulting
// string is stored into a malloc()ed buffer. The caller is responsible to // string is stored into a malloc()ed buffer. The caller is responsible to
// free() it. We use malloc() instead of new[] because it's much easier to // free() it. We use malloc() instead of new[] because it's much easier to
// call free than delete[] from within gdb. // call free than delete[] from withing gdb.
// Currently the code for computing the repr of a tensor is written in Python, // Currently the code for computing the repr of a tensor is written in Python,
// so we need to wrap the Tensor into a Python object first. // so we need to wrap the Tensor into a Python object first.
char* tensor_repr(const at::Tensor& tensor) { char* tensor_repr(const at::Tensor& tensor) {

View File

@ -339,7 +339,7 @@ struct type_caster<c10::complex<T>> {
bool load(handle src, bool) { bool load(handle src, bool) {
PyObject* obj = src.ptr(); PyObject* obj = src.ptr();
// Referred from `THPUtils_unpackComplexDouble` // Refered from `THPUtils_unpackComplexDouble`
Py_complex py_complex = PyComplex_AsCComplex(obj); Py_complex py_complex = PyComplex_AsCComplex(obj);
if (py_complex.real == -1.0 && PyErr_Occurred()) { if (py_complex.real == -1.0 && PyErr_Occurred()) {
return false; return false;

View File

@ -1248,7 +1248,7 @@ auto handle_torch_function_indexing(
/* /*
* Check if the input obj is Tensor type, including its subclass, or overloaded * Check if the input obj is Tensor type, including its subclass, or overloaded
* type. If the type defines __torch_function__, it also returns true. * type. If the type defines __torch_function__, it also returns true.
* Otherwise returns false. If the class is not torch.Tensor, and it defines * Otherwise returns flase. If the class is not torch.Tensor, and it defines
* __torch_function__, we append obj to overloaded_args. * __torch_function__, we append obj to overloaded_args.
* *
* 'obj': the input argument to be checked * 'obj': the input argument to be checked

View File

@ -186,12 +186,11 @@ class PythonKernelHolder : public c10::OperatorKernel {
auto arguments = torch::jit::pop(*stack, op.schema().arguments().size()); auto arguments = torch::jit::pop(*stack, op.schema().arguments().size());
py::gil_scoped_acquire g; py::gil_scoped_acquire g;
// Jan 2024: We're slated to get rid of multipy, // codespell:ignore multipy // Jan 2024: We're slated to get rid of multipy, so stop forcing hermetic
// so stop forcing hermetic mode unconditionally in all situations when // mode unconditionally in all situations when you're using multipy.
// you're using multipy. // codespell:ignore multipy // Eventually just delete this entirely. (Note that you may break multipy
// Eventually just delete this entirely. (Note that you may break // anyway this way with dispatcher registered functions that require
// multipy anyway this way with dispatcher // codespell:ignore multipy // hermetic to be off.)
// registered functions that require hermetic to be off.)
#if defined(USE_DEPLOY) #if defined(USE_DEPLOY)
EnableHermeticPyObject g2; EnableHermeticPyObject g2;
#endif #endif
@ -300,8 +299,8 @@ void initDispatchBindings(PyObject* module) {
return; return;
}, },
"") "")
// Some of these APIs are only for testing and do not work in // Some of these APIs are only for testing and do not work in multipy
// multipy environment // codespell:ignore multipy // environment
.def( .def(
"def_", "def_",
[](py::object self, const char* schema, const char* alias) { [](py::object self, const char* schema, const char* alias) {

View File

@ -182,7 +182,7 @@ inline bool THPUtils_unpackNumberAsBool(PyObject* obj) {
if (value == -1 && PyErr_Occurred()) { if (value == -1 && PyErr_Occurred()) {
throw python_error(); throw python_error();
} }
// No need to check overflow, because when overflow occurred, it should // No need to check overflow, because when overflow occured, it should
// return true in order to keep the same behavior of numpy. // return true in order to keep the same behavior of numpy.
return (bool)value; return (bool)value;
} }

View File

@ -5,7 +5,7 @@
* https://github.com/python/cpython/blob/2.7/Objects/structseq.c * https://github.com/python/cpython/blob/2.7/Objects/structseq.c
* *
* The purpose of this file is to overwrite the default behavior * The purpose of this file is to overwrite the default behavior
* of repr of structseq to provide better printing for returned * of repr of structseq to provide better printting for returned
* structseq objects from operators, aka torch.return_types.* * structseq objects from operators, aka torch.return_types.*
* *
* For more information on copyright of CPython, see: * For more information on copyright of CPython, see:

View File

@ -1786,7 +1786,7 @@ Tensor asarray(
tensor = tensor.clone(); tensor = tensor.clone();
} }
} else { } else {
// If we are not copying, we have to check whether we have the tensor // If we are not copying, we have to check whther we have the tensor
// in the right device, with the right dtype. // in the right device, with the right dtype.
TORCH_CHECK_VALUE( TORCH_CHECK_VALUE(
!wrong_device, !wrong_device,

View File

@ -18,7 +18,7 @@ namespace torch::throughput_benchmark {
/** /**
* The struct is used to provide results of a benchmark to the caller * The struct is used to provide results of a benchmark to the caller
* In the future all additional statistics should be added here. * In the future all additional statics should be added here.
*/ */
struct BenchmarkExecutionStats { struct BenchmarkExecutionStats {
float latency_avg_ms{-1}; float latency_avg_ms{-1};