[2/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort caffe2 (#127123)

The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127123
Approved by: https://github.com/Skylion007
ghstack dependencies: #127122
This commit is contained in:
Xuehai Pan 2024-05-24 19:57:37 +00:00 committed by PyTorch MergeBot
parent da141b096b
commit 0dae2ba5bd
21 changed files with 39 additions and 30 deletions

View File

@ -1,8 +1,9 @@
import numpy as np
from caffe2.python import core, workspace
from utils import NUM_LOOP_ITERS
from caffe2.python import core, workspace
workspace.GlobalInit(["caffe2"])

View File

@ -2,6 +2,7 @@ from collections import namedtuple
import benchmark_utils
from benchmark_test_generator import _register_test
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for element-wise Add operator. Supports both Caffe2/PyTorch."""

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for BatchBoxCox operator."""

View File

@ -1,9 +1,9 @@
import benchmark_caffe2 as op_bench_c2
import numpy
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for element-wise BatchGather operator."""

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core, dyndep
import operator_benchmark as op_bench
from caffe2.python import core, dyndep
dyndep.InitOpsLibrary("@/caffe2/caffe2/fb/operators:clip_ranges_op")

View File

@ -2,9 +2,9 @@ import random
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for Concat operator. Supports both Caffe2/PyTorch."""

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for MatMul operator"""

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""

View File

@ -1,8 +1,8 @@
import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""

View File

@ -1,4 +1,5 @@
import operator_benchmark as op_bench
from caffe2.python import core

View File

@ -34,10 +34,11 @@ include_trailing_comma = true
[tool.usort.known]
first_party = ["caffe2"]
standard_library = ["typing_extensions"]
[tool.usort.kown]
first_party = ["caffe2", "torch", "torchgen", "functorch", "tests"]
first_party = ["torch", "torchgen", "functorch", "tests"]
[tool.ruff]

View File

@ -11,12 +11,14 @@ import tempfile
from urllib.request import urlretrieve
import boto3
import caffe2.python.onnx.backend
import caffe2.python.onnx.frontend
import caffe2.python.workspace as c2_workspace
import numpy as np
import onnx
import onnx.backend
from onnx import numpy_helper
import caffe2.python.onnx.backend
import caffe2.python.onnx.frontend
import caffe2.python.workspace as c2_workspace
from caffe2.proto import caffe2_pb2
from caffe2.python.models.download import (
@ -24,7 +26,6 @@ from caffe2.python.models.download import (
downloadFromURLToFile,
getURLFromName,
)
from onnx import numpy_helper
"""A script converting Caffe2 models to ONNX, and updating ONNX model zoos.

View File

@ -1,13 +1,13 @@
import sys
import caffe2.python.onnx.backend as c2
import onnx
import pytorch_test_common
import torch
import torch.jit
from torch.autograd import Variable
import caffe2.python.onnx.backend as c2
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch

View File

@ -3,6 +3,7 @@ import io
import onnx
import torch.onnx
from caffe2.python.core import BlobReference, Net
from caffe2.python.onnx.backend import Caffe2Backend

View File

@ -3,12 +3,12 @@
import glob
import os
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx.backend.test
from onnx import numpy_helper
import caffe2.python.onnx.backend as c2
def load_tensor_as_numpy_array(f):
tensor = onnx.TensorProto()

View File

@ -1,6 +1,5 @@
# Owner(s): ["module: onnx"]
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx
import pytorch_test_common
@ -9,6 +8,8 @@ import torch.utils.cpp_extension
from test_pytorch_onnx_caffe2 import do_export
from torch.testing._internal import common_utils
import caffe2.python.onnx.backend as c2
class TestCaffe2CustomOps(pytorch_test_common.ExportTestCase):
def test_custom_add(self):

View File

@ -8,13 +8,14 @@ import pytorch_test_common
import torch.nn.init as init
import torch.onnx
from caffe2.python.core import workspace
from caffe2.python.model_helper import ModelHelper
from pytorch_helper import PyTorchModule
from torch import nn
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
from caffe2.python.core import workspace
from caffe2.python.model_helper import ModelHelper
class TestCaffe2Backend(pytorch_test_common.ExportTestCase):
@skipIfNoLapack

View File

@ -6,8 +6,6 @@ import sys
import unittest
from typing import Tuple
import caffe2.python.onnx.backend as c2
import model_defs.dcgan as dcgan
import model_defs.word_language_model as word_language_model
import numpy as np
@ -17,10 +15,6 @@ import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
import verify
from caffe2.python.operator_test.torch_integration_test import (
create_bbox_transform_inputs,
generate_rois_rotated,
)
from debug_embed_params import run_embed_params
from model_defs.lstm_flattening_result import LstmFlatteningResult
from model_defs.mnist import MNIST
@ -53,6 +47,12 @@ from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
import caffe2.python.onnx.backend as c2
from caffe2.python.operator_test.torch_integration_test import (
create_bbox_transform_inputs,
generate_rois_rotated,
)
skip = unittest.skip

View File

@ -2,8 +2,6 @@
import io
import caffe2.python.onnx.backend as c2
import numpy as np
import onnx
import pytorch_test_common
@ -12,6 +10,8 @@ import torch.nn as nn
import torch.onnx
from torch.testing._internal import common_utils
import caffe2.python.onnx.backend as c2
class TestQuantizedOps(pytorch_test_common.ExportTestCase):
def generic_test(

View File

@ -1,12 +1,13 @@
# Owner(s): ["module: onnx"]
import caffe2.python.onnx.backend as backend
import torch
from torch.autograd import Function
from torch.nn import Module, Parameter
from torch.testing._internal import common_utils
from verify import verify
import caffe2.python.onnx.backend as backend
class TestVerify(common_utils.TestCase):
maxDiff = None