pytorch/caffe2/operators/elementwise_op_gpu_test.cc
Yangqing Jia 38f3d1fc40 move flags to c10 (#12144)
Summary:
still influx.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/12144

Reviewed By: smessmer

Differential Revision: D10140176

Pulled By: Yangqing

fbshipit-source-id: 1a313abed022039333e3925d19f8b3ef2d95306c
2018-10-04 02:09:56 -07:00

43 lines
975 B
C++

#include "caffe2/operators/elementwise_op_test.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/flags.h"
C10_DECLARE_string(caffe_test_root);
template <>
void CopyVector<caffe2::CUDAContext>(const int N, const bool* x, bool* y) {
CUDA_CHECK(cudaMemcpy(y, x, N * sizeof(bool), cudaMemcpyHostToDevice));
}
template <>
caffe2::OperatorDef CreateOperatorDef<caffe2::CUDAContext>() {
caffe2::OperatorDef def;
def.mutable_device_option()->set_device_type(caffe2::PROTO_CUDA);
return def;
}
TEST(ElementwiseGPUTest, And) {
if (!caffe2::HasCudaGPU())
return;
elementwiseAnd<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Or) {
if (!caffe2::HasCudaGPU())
return;
elementwiseOr<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Xor) {
if (!caffe2::HasCudaGPU())
return;
elementwiseXor<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Not) {
if (!caffe2::HasCudaGPU())
return;
elementwiseNot<caffe2::CUDAContext>();
}