Fix op benchmarks error in OSS environment (#19518)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/19518

Previous design needs to run the op benchmarks from PyTorch root directory which could lead to `module not found` error in OSS environment. This diff fixes that issue by making the benchmark to be launched in the `benchmarks` folder.

Reviewed By: ilia-cher

Differential Revision: D15020787

fbshipit-source-id: eb09814a33432a66cc857702bc86538cd17bea3b
This commit is contained in:
Mingzhe Li 2019-04-19 16:22:13 -07:00 committed by Facebook Github Bot
parent 5da7b74d48
commit 26f12af537
9 changed files with 20 additions and 13 deletions

View File

@ -0,0 +1,7 @@
# PyTorch/Caffe2 Operator Micro-benchmarks
## Run benchmarks
Go to `pytorch-src-folder/benchmarks`
`python -m operator_benchmark.ops.matmul_test`
should report the execution time of matmul operator with PyTorch and Caffe2

View File

@ -4,7 +4,7 @@ from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from benchmarks.operator_benchmark import benchmark_core, benchmark_utils
from operator_benchmark import benchmark_core, benchmark_utils
"""Caffe2 performance microbenchmarks.

View File

@ -8,7 +8,7 @@ import numpy as np
import timeit
import json
from benchmarks.operator_benchmark import benchmark_utils
from operator_benchmark import benchmark_utils
"""Performance microbenchmarks.

View File

@ -3,7 +3,7 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from benchmarks.operator_benchmark import benchmark_core, benchmark_utils
from operator_benchmark import benchmark_core, benchmark_utils
import torch

View File

@ -8,7 +8,7 @@ import argparse
from caffe2.python import workspace
from benchmarks.operator_benchmark import benchmark_core
from operator_benchmark import benchmark_core
"""Performance microbenchmarks's main binary.

View File

@ -4,9 +4,9 @@ from __future__ import print_function
from __future__ import unicode_literals
from benchmarks.operator_benchmark.benchmark_caffe2 import Caffe2OperatorTestCase
from benchmarks.operator_benchmark.benchmark_pytorch import PyTorchOperatorTestCase
from benchmarks.operator_benchmark.benchmark_utils import * # noqa
from operator_benchmark.benchmark_caffe2 import Caffe2OperatorTestCase
from operator_benchmark.benchmark_pytorch import PyTorchOperatorTestCase
from operator_benchmark.benchmark_utils import * # noqa
def generate_test(configs, map_config, ops, OperatorTestCase):

View File

@ -3,8 +3,8 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from benchmarks.operator_benchmark import benchmark_core, benchmark_runner
from benchmarks.operator_benchmark.benchmark_test_generator import *
from operator_benchmark import benchmark_core, benchmark_runner
from operator_benchmark.benchmark_test_generator import *
import torch

View File

@ -3,8 +3,8 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from benchmarks.operator_benchmark import benchmark_runner
from benchmarks.operator_benchmark.ops import ( # noqa
from operator_benchmark import benchmark_runner
from operator_benchmark.ops import ( # noqa
add_test, # noqa
matmul_test) # noqa

View File

@ -3,8 +3,8 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from benchmarks.operator_benchmark import benchmark_core, benchmark_runner
from benchmarks.operator_benchmark.benchmark_test_generator import *
from operator_benchmark import benchmark_core, benchmark_runner
from operator_benchmark.benchmark_test_generator import *
import torch