mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: This PR is based on commit "977c6b3" as this version allows MKL to use all the cores available. All MKL related files are added here after incorporating review comments, major changes include 1. usage of Clang-format(Linter) with --style = Google 2. usage of macros for checking input and filter dimension in the mkl operators 3. merged Max and Average pooling functions 4. created a new folder for mkl related python scripts in Python folder and moved them there 5. there is no mkl_alexnet_test.py as that was redundant while convnet_benchmark.py does the same thing Closes https://github.com/caffe2/caffe2/pull/270 Differential Revision: D4905219 Pulled By: Yangqing fbshipit-source-id: e5f5b189714a835b93b9ebda24c52e09572dfca7
52 lines
1.7 KiB
Python
52 lines
1.7 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
import unittest
|
|
import hypothesis.strategies as st
|
|
from hypothesis import given, settings
|
|
import numpy as np
|
|
from caffe2.python import core, workspace
|
|
import caffe2.python.hypothesis_test_util as hu
|
|
import caffe2.python.mkl_test_util as mu
|
|
|
|
|
|
@unittest.skipIf(not workspace.C.has_mkldnn,
|
|
"Skipping as we do not have mkldnn.")
|
|
class MKLConvTest(hu.HypothesisTestCase):
|
|
@given(stride=st.integers(1, 3),
|
|
pad=st.integers(0, 3),
|
|
kernel=st.integers(3, 5),
|
|
size=st.integers(8, 8),
|
|
input_channels=st.integers(1, 3),
|
|
output_channels=st.integers(1, 3),
|
|
batch_size=st.integers(1, 3),
|
|
**mu.gcs)
|
|
@settings(max_examples=2, timeout=100)
|
|
def test_mkl_convolution(self, stride, pad, kernel, size,
|
|
input_channels, output_channels,
|
|
batch_size, gc, dc):
|
|
op = core.CreateOperator(
|
|
"Conv",
|
|
["X", "w", "b"],
|
|
["Y"],
|
|
stride=stride,
|
|
pad=pad,
|
|
kernel=kernel,
|
|
)
|
|
X = np.random.rand(
|
|
batch_size, input_channels, size, size).astype(np.float32) - 0.5
|
|
w = np.random.rand(
|
|
output_channels, input_channels, kernel, kernel) \
|
|
.astype(np.float32) - 0.5
|
|
b = np.random.rand(output_channels).astype(np.float32) - 0.5
|
|
|
|
inputs = [X, w, b]
|
|
self.assertDeviceChecks(dc, op, inputs, [0])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import unittest
|
|
unittest.main()
|