pytorch/caffe2/operators/conv_op_shared.cc
Yangqing Jia 38f3d1fc40 move flags to c10 (#12144)
Summary:
still influx.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/12144

Reviewed By: smessmer

Differential Revision: D10140176

Pulled By: Yangqing

fbshipit-source-id: 1a313abed022039333e3925d19f8b3ef2d95306c
2018-10-04 02:09:56 -07:00

35 lines
1.0 KiB
C++

#include "conv_op_shared.h"
#include "caffe2/core/context.h"
#include "caffe2/core/flags.h"
#include "caffe2/core/workspace.h"
C10_DEFINE_bool(
caffe2_force_shared_col_buffer,
false,
"Always use the shared col buffer");
namespace caffe2 {
template <>
void createSharedBuffer<CPUContext>(Workspace* ws) {
auto* mutexPtr = ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_CPU_MUTEX__")
->GetMutable<std::unique_ptr<std::mutex>>();
mutexPtr->reset(new std::mutex());
ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_CPU__");
}
template <>
void runWithSharedBuffer<CPUContext>(
Workspace* ws,
std::function<void(Tensor* buffer)> f) {
auto* mutexBlob = ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_CPU_MUTEX__");
CAFFE_ENFORCE(mutexBlob, "Must call createSharedBuffer() first");
auto* mutexPtr = mutexBlob->GetMutable<std::unique_ptr<std::mutex>>();
std::lock_guard<std::mutex> g(**mutexPtr);
auto* buffer = BlobGetMutableTensor(
ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_CPU__"), CPU);
f(buffer);
}
}