Re-enable experimental ops build (#12821)

Summary:
The experimental ops for the c10 dispatcher have accidentally been disabled in the oss build when the directory changed from `c10` to `experimental/c10`. This PR re-enables them.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/12821

Differential Revision: D10446779

Pulled By: smessmer

fbshipit-source-id: ac58cd1ba1281370e62169ec26052d0962225375
This commit is contained in:
Sebastian Messmer 2018-10-29 11:27:08 -07:00 committed by Facebook Github Bot
parent b818d31a3e
commit 62b27d27b7
7 changed files with 19 additions and 9 deletions

View File

@ -21,8 +21,7 @@ inline std::shared_ptr<void> init_state<void>() {
template <class T>
using is_output_arg = std::is_same<Tensor*, T>;
template <class ParameterDef>
using extract_type_t =
c10::guts::result_of_t<decltype (&ParameterDef::parse)(ArgumentHelper)>;
using extract_type_t = typename ParameterDef::type;
} // namespace details
/**
@ -278,6 +277,7 @@ class C10OperatorWrapper final : public Operator<Context> {
template <class ParameterDef>
struct ParameterHelper final {
using type = typename ParameterDef::type;
static typename ParameterDef::type parse(const ArgumentHelper& helper) {
return helper.GetSingleArgument<typename ParameterDef::type>(
ParameterDef::name(), ParameterDef::default_value());

View File

@ -40,7 +40,7 @@ file(GLOB tmp *.cc)
file(GLOB tmp_cudnn *_cudnn.cc)
exclude(tmp "${tmp}" ${tmp_cudnn})
set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} ${tmp})
file(GLOB_RECURSE tmp c10/*.cc)
file(GLOB_RECURSE tmp experimental/c10/*.cc)
set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} ${tmp})
# exclude test files and gpu files
file(GLOB tmp *_test.cc)

View File

@ -11,6 +11,7 @@ C10_DEFINE_OP_SCHEMA(caffe2::ops::Cast);
namespace {
struct ToParameter final {
using type = caffe2::TensorProto_DataType;
static caffe2::TensorProto_DataType parse(
const caffe2::ArgumentHelper& helper) {
return caffe2::cast::GetCastDataType(helper, "to");

View File

@ -9,6 +9,7 @@ C10_DEFINE_OP_SCHEMA(caffe2::ops::ExpandDims);
namespace {
struct DimsParameter final {
using type = std::vector<int>;
static std::vector<int> parse(const caffe2::ArgumentHelper& helper) {
return helper.GetRepeatedArgument<int>("dims");
}

View File

@ -15,21 +15,25 @@ C10_DEFINE_OP_SCHEMA(caffe2::ops::GivenTensorFill<int64_t>);
namespace {
struct ShapeParameter final {
using type = std::vector<int64_t>;
static std::vector<int64_t> parse(const caffe2::ArgumentHelper& helper) {
return helper.GetRepeatedArgument<int64_t>("shape");
}
};
struct ExtraShapeParameter final {
using type = std::vector<int>;
static std::vector<int> parse(const caffe2::ArgumentHelper& helper) {
return helper.GetRepeatedArgument<int>("extra_shape");
}
};
struct InputAsShapeParameter final {
using type = bool;
static bool parse(const caffe2::ArgumentHelper& helper) {
return helper.GetSingleArgument<bool>("input_as_shape", false);
}
};
struct DTypeParameter final {
using type = int;
static int parse(const caffe2::ArgumentHelper& helper) {
auto dtype = helper.GetSingleArgument<int>(
"dtype", caffe2::TensorProto_DataType_FLOAT);
@ -50,6 +54,7 @@ struct DTypeParameter final {
}
};
struct ValueParameter final {
using type = caffe2::ops::ConstantFill::Value;
static caffe2::ops::ConstantFill::Value parse(
const caffe2::ArgumentHelper& helper) {
caffe2::ops::ConstantFill::Value result;
@ -68,17 +73,20 @@ struct ValueParameter final {
}
};
struct MinParameter final {
using type = float;
static float parse(const caffe2::ArgumentHelper& helper) {
return helper.GetSingleArgument<float>("min", 0);
}
};
struct MaxParameter final {
using type = float;
static float parse(const caffe2::ArgumentHelper& helper) {
return helper.GetSingleArgument<float>("max", 1);
}
};
template <class T>
struct ValuesParameter final {
using type = Tensor;
static Tensor parse(const caffe2::ArgumentHelper& helper) {
if (!std::is_same<T, float>::value || !helper.HasArgument("dtype")) {
return ExtractValues<T>(helper);

View File

@ -200,7 +200,7 @@ namespace ska
template<typename T>
struct sherwood_v3_entry_constexpr
{
constexpr explicit sherwood_v3_entry_constexpr(int8_t distance_from_desired_ = -1, typename std::aligned_storage<sizeof(T), alignof(T)>::type bytes_ = {})
constexpr explicit sherwood_v3_entry_constexpr(int8_t distance_from_desired_, typename std::aligned_storage<sizeof(T), alignof(T)>::type bytes_)
: distance_from_desired(distance_from_desired_), bytes(bytes_) {}
static constexpr sherwood_v3_entry_constexpr special_end_entry()
@ -219,10 +219,10 @@ namespace ska
{
static constexpr std::array<const sherwood_v3_entry_constexpr<T>, min_lookups> table
{{
sherwood_v3_entry_constexpr<T>(),
sherwood_v3_entry_constexpr<T>(),
sherwood_v3_entry_constexpr<T>(),
sherwood_v3_entry_constexpr<T>::special_end_entry()
sherwood_v3_entry_constexpr<T>(-1, {}),
sherwood_v3_entry_constexpr<T>(-1, {}),
sherwood_v3_entry_constexpr<T>(-1, {}),
sherwood_v3_entry_constexpr<T>(sherwood_v3_entry<T>::special_end_value, {})
}};
};
template<typename T>

View File

@ -45,7 +45,7 @@ if NOT DEFINED CMAKE_GENERATOR (
:: In default we use win64 VS 2015.
:: Main reason is that currently, cuda 9 does not support VS 2017 newest
:: version. To use cuda you will have to use 2015.
set CMAKE_GENERATOR="Visual Studio 14 2015 Win64"
set CMAKE_GENERATOR="Visual Studio 15 2017 Win64"
)
)