Default hidden visibility (#10752)

Summary:
Flipping to hidden visibility one more time. Let's see what fails.

cc mingzhe09088 pjh5 Yangqing
Pull Request resolved: https://github.com/pytorch/pytorch/pull/10752

Reviewed By: ezyang

Differential Revision: D9526343

Pulled By: orionr

fbshipit-source-id: c0e9c29270e95e1b2e21c598095f720c199e1e52
This commit is contained in:
Orion Reblitz-Richardson 2018-08-28 15:15:54 -07:00 committed by Facebook Github Bot
parent 92ff070b83
commit 4cb968fb77
18 changed files with 114 additions and 98 deletions

View File

@ -2,11 +2,13 @@
#include <atomic>
#include "ATen/core/ATenGeneral.h"
namespace at {
// base class for refcounted things, allows for collects of generic
// refcounted objects that include tensors
struct Retainable {
struct AT_API Retainable {
Retainable(): refcount(1), weak_refcount(1) {}
void retain() {
++refcount;

View File

@ -4,3 +4,5 @@
// TODO: Merge the *_API macros.
#define AT_API AT_CORE_API
#define AT_EXPORT AT_CORE_EXPORT
#define AT_IMPORT AT_CORE_IMPORT

View File

@ -19,7 +19,7 @@ namespace at {
namespace detail {
// Obtains the base name from a full path.
std::string StripBasename(const std::string& full_path);
AT_CORE_API std::string StripBasename(const std::string& full_path);
inline std::ostream& _str(std::ostream& ss) {
return ss;
@ -56,7 +56,7 @@ inline std::string str(const char* c_str) {
}
/// Represents a location in source code (for debugging).
struct SourceLocation {
struct AT_CORE_API SourceLocation {
const char* function;
const char* file;
uint32_t line;

View File

@ -12,21 +12,33 @@
#ifdef _WIN32
#if !defined(AT_CORE_STATIC_WINDOWS)
// TODO: unfiy the controlling macros.
#if defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
#define AT_CORE_API __declspec(dllexport)
#else // defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
#define AT_CORE_API __declspec(dllimport)
#endif // defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
#define AT_CORE_EXPORT __declspec(dllexport)
#define AT_CORE_IMPORT __declspec(dllimport)
#else // !defined(AT_CORE_STATIC_WINDOWS)
#define AT_CORE_API
#define AT_CORE_EXPORT
#define AT_CORE_IMPORT
#endif // !defined(AT_CORE_STATIC_WINDOWS)
#else // _WIN32
#if defined(__GNUC__)
#define AT_CORE_API __attribute__((__visibility__("default")))
#define AT_CORE_EXPORT __attribute__((__visibility__("default")))
#else // defined(__GNUC__)
#define AT_CORE_EXPORT
#endif // defined(__GNUC__)
#define AT_CORE_IMPORT AT_CORE_EXPORT
#endif // _WIN32
// AT_CORE_API is a macro that, depends on whether you are building the
// main library or not, resolves to either AT_CORE_EXPORT or
// AT_CORE_IMPORT.
//
// TODO: unify the controlling macros.
#if defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
#define AT_CORE_API AT_CORE_EXPORT
#else // defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
#define AT_CORE_API AT_CORE_IMPORT
#endif // defined(CAFFE2_BUILD_MAIN_LIBS) || defined(ATen_cpu_EXPORTS) || defined(caffe2_EXPORTS)
// Disable the copy and assignment operator for a class. Note that this will
// disable the usage of the class in std containers.
#define AT_DISABLE_COPY_AND_ASSIGN(classname) \

View File

@ -1,5 +1,6 @@
#pragma once
#include <ATen/core/ATenGeneral.h>
#include <ATen/core/Error.h>
#include <atomic>
#include <stdexcept>
@ -32,7 +33,7 @@ namespace c10 {
// tells us if the object was allocated by us. If it wasn't, no
// intrusive_ptr for you!
class intrusive_ptr_target {
class AT_CORE_API intrusive_ptr_target {
// Note [Weak references for intrusive refcounting]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Here's the scheme:
@ -113,7 +114,7 @@ class intrusive_ptr_target {
namespace detail {
template <class TTarget>
struct intrusive_target_default_null_type final {
struct AT_CORE_EXPORT intrusive_target_default_null_type final {
static constexpr TTarget* singleton() noexcept {
return nullptr;
}
@ -126,7 +127,7 @@ class weak_intrusive_ptr;
template <
class TTarget,
class NullType = detail::intrusive_target_default_null_type<TTarget>>
class intrusive_ptr final {
class AT_CORE_EXPORT intrusive_ptr final {
private:
static_assert(
std::is_base_of<intrusive_ptr_target, TTarget>::value,
@ -415,7 +416,7 @@ inline bool operator!=(
template <
typename TTarget,
class NullType = detail::intrusive_target_default_null_type<TTarget>>
class weak_intrusive_ptr final {
class AT_CORE_EXPORT weak_intrusive_ptr final {
private:
static_assert(
std::is_base_of<intrusive_ptr_target, TTarget>::value,
@ -797,13 +798,13 @@ namespace std {
// To allow intrusive_ptr and weak_intrusive_ptr inside std::unordered_map or
// std::unordered_set, we need std::hash
template <class TTarget, class NullType>
struct hash<c10::intrusive_ptr<TTarget, NullType>> {
struct AT_CORE_EXPORT hash<c10::intrusive_ptr<TTarget, NullType>> {
size_t operator()(const c10::intrusive_ptr<TTarget, NullType>& x) const {
return std::hash<TTarget*>()(x.get());
}
};
template <class TTarget, class NullType>
struct hash<c10::weak_intrusive_ptr<TTarget, NullType>> {
struct AT_CORE_EXPORT hash<c10::weak_intrusive_ptr<TTarget, NullType>> {
size_t operator()(const c10::weak_intrusive_ptr<TTarget, NullType>& x) const {
return std::hash<TTarget*>()(x._unsafe_get_target());
}

View File

@ -207,6 +207,7 @@ target_link_libraries(caffe2_protos PUBLIC protobuf::libprotobuf)
# Compile exposed libraries.
list(APPEND Caffe2_CPU_SRCs $<TARGET_OBJECTS:c10>)
add_library(caffe2 ${Caffe2_CPU_SRCS})
target_compile_options(caffe2 PRIVATE "-fvisibility=hidden")
caffe2_interface_library(caffe2_protos caffe2_protos_whole)
target_link_libraries(caffe2 PRIVATE caffe2_protos_whole)
if (${CAFFE2_LINK_LOCAL_PROTOBUF})
@ -229,13 +230,6 @@ else()
target_compile_options(caffe2 INTERFACE "$<$<COMPILE_LANGUAGE:CXX>:-std=c++11>")
endif()
# Note(jiayq): This is not complete yet, but in the end we will need to deal with
# explicit hidden visibility.
# This line is here so that when testing build, we can enable it to properly test
# annotation of public symbols. When finally doing proper build with all symbols
# annotated, we will enable this line and have it wrapped with gcc/clang checks.
# target_compile_options(caffe2 PRIVATE "-fvisibility=hidden")
target_compile_options(caffe2 PRIVATE "-DCAFFE2_BUILD_MAIN_LIB")
if (MSVC AND NOT BUILD_SHARED_LIBS)
# Note [Supporting both static and dynamic libraries on Window]

View File

@ -12,6 +12,7 @@
#ifndef NOM_GRAPH_GRAPH_H
#define NOM_GRAPH_GRAPH_H
#include "caffe2/core/common.h"
#include "nomnigraph/Support/Common.h"
#include <algorithm>
@ -42,7 +43,7 @@ class Node;
// \brief Edge within a Graph.
template <typename T, typename... U>
class Edge : public StorageType<U...> {
class CAFFE2_API Edge : public StorageType<U...> {
public:
using NodeRef = typename Graph<T, U...>::NodeRef;
Edge(NodeRef tail, NodeRef head, U... args)
@ -76,7 +77,7 @@ class Edge : public StorageType<U...> {
// \brief Node within a Graph.
template <typename T, typename... U>
class Node : public StorageType<T>, public Notifier<Node<T, U...>> {
class CAFFE2_API Node : public StorageType<T>, public Notifier<Node<T, U...>> {
public:
using NodeRef = typename Graph<T, U...>::NodeRef;
using EdgeRef = typename Graph<T, U...>::EdgeRef;
@ -155,7 +156,7 @@ class Node : public StorageType<T>, public Notifier<Node<T, U...>> {
/// for example.
///
template <typename T, typename... U>
class Subgraph {
class CAFFE2_API Subgraph {
public:
Subgraph() {
DEBUG_PRINT("Creating instance of Subgraph: %p\n", this);
@ -222,7 +223,7 @@ class Subgraph {
/// Everything is owned by the graph to simplify storage concerns.
///
template <typename T, typename... U>
class Graph {
class CAFFE2_API Graph {
public:
using SubgraphType = Subgraph<T, U...>;
using NodeRef = Node<T, U...>*;

View File

@ -1,13 +1,14 @@
#ifndef NOM_REPRESENTATIONS_COMPILER_H
#define NOM_REPRESENTATIONS_COMPILER_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Support/Casting.h"
namespace nom {
namespace repr {
class Value {
class CAFFE2_API Value {
public:
enum class ValueKind { Value, Instruction, Data };
Value(ValueKind K) : Kind(K) {}
@ -21,10 +22,10 @@ class Value {
const ValueKind Kind;
};
class Data : public Value {
class CAFFE2_API Data : public Value {
public:
Data() : Value(ValueKind::Data) {}
static bool classof(const Value* V) {
CAFFE2_API static bool classof(const Value* V) {
return V->getKind() == ValueKind::Data;
}
virtual ~Data() = default;
@ -40,7 +41,7 @@ class Data : public Value {
size_t Version = 0;
};
class Instruction : public Value {
class CAFFE2_API Instruction : public Value {
public:
/// \brief All the different types of execution.
enum class Opcode {
@ -53,7 +54,7 @@ class Instruction : public Value {
};
Instruction() : Value(ValueKind::Instruction), Op(Opcode::Generic) {}
Instruction(Opcode op) : Value(ValueKind::Instruction), Op(op) {}
static bool classof(const Value* V) {
CAFFE2_API static bool classof(const Value* V) {
return V->getKind() == ValueKind::Instruction;
}
virtual ~Instruction() = default;
@ -65,7 +66,7 @@ class Instruction : public Value {
Opcode Op;
};
class Terminator : public Instruction {
class CAFFE2_API Terminator : public Instruction {
public:
Terminator(Instruction::Opcode op) : Instruction(op) {}
@ -79,17 +80,17 @@ class Terminator : public Instruction {
}
};
class Branch : public Terminator {
class CAFFE2_API Branch : public Terminator {
public:
Branch() : Terminator(Instruction::Opcode::Branch) {}
};
class Return : public Terminator {
class CAFFE2_API Return : public Terminator {
public:
Return() : Terminator(Instruction::Opcode::Return) {}
};
class Phi : public Instruction {
class CAFFE2_API Phi : public Instruction {
public:
Phi() : Instruction(Instruction::Opcode::Phi) {}
};

View File

@ -1,6 +1,7 @@
#ifndef NOM_REPRESENTATIONS_CONTROLFLOW_H
#define NOM_REPRESENTATIONS_CONTROLFLOW_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Representations/Compiler.h"
@ -13,7 +14,7 @@ namespace repr {
/// of the data flow graph as well as an ordering on instruction
/// execution. Basic blocks are used for control flow analysis.
template <typename T, typename... U>
class BasicBlock {
class CAFFE2_API BasicBlock {
public:
using NodeRef = typename Subgraph<T, U...>::NodeRef;
BasicBlock() {}
@ -91,7 +92,7 @@ class BasicBlock {
using Program = Graph<Value>;
template <typename G>
struct ControlFlowGraphImpl {
struct CAFFE2_API ControlFlowGraphImpl {
// Hack to help debugging in case this class is misused.
static_assert(
sizeof(ControlFlowGraphImpl),
@ -101,7 +102,7 @@ struct ControlFlowGraphImpl {
};
template <typename T, typename... U>
struct ControlFlowGraphImpl<Graph<T, U...>> {
struct CAFFE2_API ControlFlowGraphImpl<Graph<T, U...>> {
using type = Graph<std::unique_ptr<BasicBlock<T, U...>>, int>;
using bbType = BasicBlock<T, U...>;
};
@ -111,7 +112,7 @@ struct ControlFlowGraphImpl<Graph<T, U...>> {
///
/// \note G Must be of type Graph<T, U...>.
template <typename G>
class ControlFlowGraph : public ControlFlowGraphImpl<G>::type {
class CAFFE2_API ControlFlowGraph : public ControlFlowGraphImpl<G>::type {
public:
// This is for C++11 compatibility, otherwise we could use "using"
ControlFlowGraph() {}
@ -130,7 +131,7 @@ using BasicBlockType = typename ControlFlowGraphImpl<G>::bbType;
/// \brief Converts graph to SSA representation. Modifies the graph
/// by inserting versions and phi nodes.
template <typename Phi, typename G>
void addSSA(G* dfg, ControlFlowGraph<G>* cfg) {
CAFFE2_API void addSSA(G* dfg, ControlFlowGraph<G>* cfg) {
static_assert(
std::is_base_of<Instruction, Phi>::value,
"Phi type must be derived from Instruction.");

View File

@ -12,6 +12,7 @@
#ifndef NOM_REPRESENTATIONS_NEURALNET_H
#define NOM_REPRESENTATIONS_NEURALNET_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Representations/Compiler.h"
#include "nomnigraph/Representations/ControlFlow.h"
@ -40,7 +41,7 @@ class NeuralNetData;
/// a saved void* pointer for external use. Derived classes
/// add richer semantics to the annotation and it is encouraged
/// to use them.
class Annotation {
class CAFFE2_API Annotation {
public:
enum class AnnotationKind { Generic, Caffe2 };
@ -59,7 +60,7 @@ class Annotation {
const AnnotationKind Kind;
};
class NeuralNetOperator : public Instruction {
class CAFFE2_API NeuralNetOperator : public Instruction {
public:
/// Discriminator for LLVM-style RTTI (isa<>)
enum class NNKind {
@ -132,7 +133,7 @@ class NeuralNetOperator : public Instruction {
std::unique_ptr<Annotation> ExtraAnnotation;
};
class NeuralNetData : public Data {
class CAFFE2_API NeuralNetData : public Data {
public:
/// Discriminator for LLVM-style RTTI (isa<>)
enum class NNDataKind { Generic, Tensor };
@ -156,7 +157,7 @@ class NeuralNetData : public Data {
size_t Version = 0;
};
class Tensor : public NeuralNetData {
class CAFFE2_API Tensor : public NeuralNetData {
public:
enum class DataType { Generic, Float, Half, Int8 };
enum class Layout { Generic, NCHW, NHWC };
@ -165,7 +166,7 @@ class Tensor : public NeuralNetData {
: NeuralNetData(NNDataKind::Tensor),
name_(name),
type_(DataType::Generic) {}
static bool classof(const NeuralNetData* D) {
CAFFE2_API static bool classof(const NeuralNetData* D) {
return D->getKind() == NNDataKind::Tensor;
}
@ -192,10 +193,10 @@ class Tensor : public NeuralNetData {
};
#define NOMNIGRAPH_DEFINE_NN_RTTI(op) \
static bool classof(const NeuralNetOperator* N) { \
CAFFE2_API static bool classof(const NeuralNetOperator* N) { \
return N->getKind() == NNKind::op; \
} \
static bool classof(const Value* N) { \
CAFFE2_API static bool classof(const Value* N) { \
if (isa<NeuralNetOperator>(N)) { \
return dyn_cast<NeuralNetOperator>(N)->getKind() == NNKind::op; \
} \
@ -204,21 +205,21 @@ class Tensor : public NeuralNetData {
#include "nomnigraph/Generated/OpClasses.h"
class While : public NeuralNetOperator {
class CAFFE2_API While : public NeuralNetOperator {
public:
While() : NeuralNetOperator(NNKind::While, Opcode::Branch) {}
NOMNIGRAPH_DEFINE_NN_RTTI(While);
~While() {}
};
class NNPhi : public NeuralNetOperator {
class CAFFE2_API NNPhi : public NeuralNetOperator {
public:
NNPhi() : NeuralNetOperator(NNKind::NNPhi, Opcode::Phi) {}
NOMNIGRAPH_DEFINE_NN_RTTI(NNPhi);
~NNPhi() {}
};
class GenericOperator : public NeuralNetOperator {
class CAFFE2_API GenericOperator : public NeuralNetOperator {
public:
GenericOperator() : NeuralNetOperator(NNKind::GenericOperator) {}
GenericOperator(std::string name)
@ -240,7 +241,7 @@ using NNGraph = nom::Graph<std::unique_ptr<nom::repr::Value>>;
using NNSubgraph = nom::Subgraph<std::unique_ptr<nom::repr::Value>>;
using NNCFGraph = nom::repr::ControlFlowGraph<NNGraph>;
struct NNModule {
struct CAFFE2_API NNModule {
NNGraph dataFlow;
NNCFGraph controlFlow;
std::unordered_set<NNGraph::NodeRef> inputs;
@ -259,7 +260,7 @@ template <bool B, class T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
template <typename T, typename U>
struct inheritedFrom {
struct CAFFE2_API inheritedFrom {
static constexpr bool value =
std::is_base_of<U, T>::value && !std::is_same<U, T>::value;
};
@ -267,14 +268,14 @@ struct inheritedFrom {
// This is just a way to fix issues when the isa<> implementation
// can't automatically downcast.
template <typename T, typename N, typename = void>
struct is_impl {
struct CAFFE2_API is_impl {
inline static bool impl(N n) {
return isa<T>(n->data());
}
};
template <typename T, typename N>
struct is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
struct CAFFE2_API is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
inline static bool impl(N n) {
if (!isa<NeuralNetOperator>(n->data().get())) {
return false;
@ -285,7 +286,7 @@ struct is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
};
template <typename T, typename N>
struct is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
struct CAFFE2_API is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
inline static bool impl(N n) {
if (!isa<NeuralNetData>(n->data().get())) {
return false;
@ -303,14 +304,14 @@ inline bool is(N n) {
// This is just a way to fix issues when the dyn_cast<> implementation
// can't automatically downcast.
template <typename T, typename N, typename = void>
struct get_impl {
struct CAFFE2_API get_impl {
inline static T* impl(N n) {
return dyn_cast<T>(n->data().get());
}
};
template <typename T, typename N>
struct get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
struct CAFFE2_API get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
inline static T* impl(N n) {
if (!is<T>(n)) {
assert(0 && "Cannot get type from node");
@ -322,7 +323,7 @@ struct get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
};
template <typename T, typename N>
struct get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
struct CAFFE2_API get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
inline static T* impl(N n) {
if (!is<T>(n)) {
assert(0 && "Cannot get type from node");
@ -339,7 +340,7 @@ inline T* get(N n) {
}
template <typename T, typename G>
std::vector<typename G::NodeRef> nodeIterator(G& g) {
CAFFE2_API std::vector<typename G::NodeRef> nodeIterator(G& g) {
std::vector<typename G::NodeRef> out;
for (auto node : g.getMutableNodes()) {
if (!is<T>(node)) {
@ -351,7 +352,7 @@ std::vector<typename G::NodeRef> nodeIterator(G& g) {
}
template <typename T, typename G>
std::vector<std::pair<T*, typename G::NodeRef>> dataIterator(G& g) {
CAFFE2_API std::vector<std::pair<T*, typename G::NodeRef>> dataIterator(G& g) {
std::vector<std::pair<T*, typename G::NodeRef>> out;
for (auto node : g.getMutableNodes()) {
if (!is<T>(node)) {
@ -364,7 +365,7 @@ std::vector<std::pair<T*, typename G::NodeRef>> dataIterator(G& g) {
}
template <typename T, typename... Args>
void insertOp(
CAFFE2_API void insertOp(
NNGraph& g,
NNGraph::NodeRef a,
NNGraph::NodeRef b,
@ -394,7 +395,7 @@ void insertOp(
}
template <typename NewT, typename OldT>
NNGraph::NodeRef convertNode(NNGraph& g, NNGraph::NodeRef node) {
CAFFE2_API NNGraph::NodeRef convertNode(NNGraph& g, NNGraph::NodeRef node) {
assert(is<OldT>(node) && "Cannot get type from node.");
NeuralNetOperator* nnOpPtr =
@ -410,21 +411,21 @@ NNGraph::NodeRef convertNode(NNGraph& g, NNGraph::NodeRef node) {
}
/// NeuralNetData specific helpers.
bool hasProducer(NNGraph::NodeRef n);
NNGraph::NodeRef getProducer(NNGraph::NodeRef n);
bool hasConsumer(NNGraph::NodeRef n);
std::vector<NNGraph::NodeRef> getConsumers(NNGraph::NodeRef n);
CAFFE2_API bool hasProducer(NNGraph::NodeRef n);
CAFFE2_API NNGraph::NodeRef getProducer(NNGraph::NodeRef n);
CAFFE2_API bool hasConsumer(NNGraph::NodeRef n);
CAFFE2_API std::vector<NNGraph::NodeRef> getConsumers(NNGraph::NodeRef n);
bool hasInputs(NNGraph::NodeRef n);
std::vector<NNGraph::NodeRef> getInputs(NNGraph::NodeRef n);
std::vector<NNGraph::NodeRef> getOutputs(NNGraph::NodeRef n);
CAFFE2_API bool hasInputs(NNGraph::NodeRef n);
CAFFE2_API std::vector<NNGraph::NodeRef> getInputs(NNGraph::NodeRef n);
CAFFE2_API std::vector<NNGraph::NodeRef> getOutputs(NNGraph::NodeRef n);
void coalesceInsertedDataDependencies(repr::NNModule* m);
CAFFE2_API void coalesceInsertedDataDependencies(repr::NNModule* m);
template <NNGraph* G>
struct NodeHelper {};
struct CAFFE2_API NodeHelper {};
struct NNNodeMatchCriteria {
struct CAFFE2_API NNNodeMatchCriteria {
const std::function<bool(NNGraph::NodeRef)> predicate;
const std::string debugString;
@ -454,22 +455,22 @@ using NNMatchNode = nom::matcher::MatchNode<NNNodeMatchCriteria>;
// Commonly used criteria.
// The node has a single output and the output has a single consumer.
NNNodeMatchCriteria criteriaSingleOutputAndConsumer();
CAFFE2_API NNNodeMatchCriteria criteriaSingleOutputAndConsumer();
// The node has a unique consumer (there may be multiple edges from output
// to the single consumer).
NNNodeMatchCriteria criteriaSingleConsumer();
CAFFE2_API NNNodeMatchCriteria criteriaSingleConsumer();
template <typename NodeType>
NNNodeMatchCriteria matchOp(const std::string& debugString = "matchOp") {
CAFFE2_API NNNodeMatchCriteria matchOp(const std::string& debugString = "matchOp") {
return NNNodeMatchCriteria(
[](NNGraph::NodeRef nodeRef) { return is<NodeType>(nodeRef); },
debugString);
}
NNNodeMatchCriteria matchTensor();
CAFFE2_API NNNodeMatchCriteria matchTensor();
template <typename NodeType>
NNNodeMatchCriteria matchOp(
CAFFE2_API NNNodeMatchCriteria matchOp(
const std::function<bool(const NodeType&)> predicate,
const std::string& debugString = "matchOpWithPredicate") {
return NNNodeMatchCriteria(
@ -481,8 +482,8 @@ NNNodeMatchCriteria matchOp(
debugString);
};
struct NNNodeMatch {
static bool isMatch(
struct CAFFE2_API NNNodeMatch {
CAFFE2_API static bool isMatch(
const NNGraph::NodeRef& node,
const NNNodeMatchCriteria& criteria) {
return criteria.predicate(node);
@ -495,7 +496,7 @@ using NNSubgraphMatcher =
// This helper method makes it easy to create matching criteria in NNGraph.
// For example, operatorSubgraph(opMatch, ...) will refer to a tree like this:
// ... -> opMatch -> opMatch_Output
NNMatchGraph::NodeRef operatorSubgraph(
CAFFE2_API NNMatchGraph::NodeRef operatorSubgraph(
NNMatchGraph& g,
const NNNodeMatchCriteria& root,
const std::vector<NNMatchGraph::NodeRef>& childrenCriteria = {},

View File

@ -26,7 +26,7 @@ using DataPtr = std::shared_ptr<void>;
class StorageImpl;
using Storage = std::shared_ptr<StorageImpl>;
class StorageImpl {
class CAFFE2_API StorageImpl {
public:
StorageImpl() = delete;
StorageImpl(const StorageImpl&) = delete;

View File

@ -882,7 +882,7 @@ class CAFFE2_API TensorImpl : public c10::intrusive_ptr_target {
}
};
class UndefinedTensorImpl final : public TensorImpl {
class CAFFE2_API UndefinedTensorImpl final : public TensorImpl {
UndefinedTensorImpl() : TensorImpl(CPU){};
public:

View File

@ -1,5 +1,6 @@
#pragma once
#include "caffe2/core/common.h"
#include "onnx/onnx_pb.h"
#include <set>
@ -13,7 +14,7 @@ using ::ONNX_NAMESPACE::AttributeProto;
using ::ONNX_NAMESPACE::NodeProto;
// \brief This class generates unique dummy names
class DummyName {
class CAFFE2_API DummyName {
public:
std::string NewDummyName();
@ -70,7 +71,7 @@ inline AttributeProto MakeAttribute(
return attr;
}
NodeProto MakeNode(
CAFFE2_API NodeProto MakeNode(
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,

View File

@ -46,7 +46,7 @@ public:
return &OpDef;
}
static bool classof(const Annotation *A) {
CAFFE2_API static bool classof(const Annotation *A) {
return A->getKind() == AnnotationKind::Caffe2;
}

View File

@ -8,7 +8,7 @@ namespace opt {
using namespace nom;
void sinkMaxPool(nom::repr::NNModule* nn) {
CAFFE2_EXPORT void sinkMaxPool(nom::repr::NNModule* nn) {
for (auto max_pool_node :
repr::nn::nodeIterator<repr::MaxPool>(nn->dataFlow)) {
if (repr::nn::getInputs(max_pool_node).size() != 1) {

View File

@ -16,7 +16,7 @@ using PredictorParameters = std::map<std::string, std::shared_ptr<Blob>>;
/**
* Stores parameters nessasary for creating a PredictorInterface object.
*/
struct PredictorConfig {
struct CAFFE2_API PredictorConfig {
// A map of parameter name to Tensor object. Predictor is supposed to
// guarantee constness of all these Tensor objects.
std::shared_ptr<PredictorParameters> parameters;
@ -40,14 +40,14 @@ struct PredictorConfig {
std::shared_ptr<Workspace> ws;
};
Workspace makeWorkspace(std::shared_ptr<PredictorParameters> parameters);
CAFFE2_API Workspace makeWorkspace(std::shared_ptr<PredictorParameters> parameters);
PredictorConfig makePredictorConfig(
CAFFE2_API PredictorConfig makePredictorConfig(
const MetaNetDef& net,
Workspace* parent = nullptr,
bool run_init = true);
PredictorConfig makePredictorConfig(
CAFFE2_API PredictorConfig makePredictorConfig(
const NetDef& init_net,
const NetDef& run_net,
Workspace* parent = nullptr,

View File

@ -253,8 +253,8 @@ struct TORCH_API Variable : public at::Tensor {
// Variable::Impl
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
struct Variable::Impl : public at::TensorImpl {
TORCH_API explicit Impl(
struct TORCH_API Variable::Impl : public at::TensorImpl {
explicit Impl(
at::Tensor data,
bool requires_grad = false,
Edge gradient_edge = Edge());
@ -367,7 +367,7 @@ struct Variable::Impl : public at::TensorImpl {
/// same version_counter. The grad_fn field of the Variable may become stale
/// due to in-place modifications of the shared data. Accesses should go
/// through get_grad_fn(). All other fields are always valid.
struct Variable::ViewImpl : public Variable::Impl {
struct TORCH_API Variable::ViewImpl : public Variable::Impl {
ViewImpl(Variable base, at::Tensor data, Edge gradient_edge);
/// Gets the up-to-date grad_fn. If the shared data or base was modified, we

View File

@ -13,7 +13,7 @@ template <typename T>
using Shared = c10::intrusive_ptr<T>;
// string
struct ConstantString : c10::intrusive_ptr_target {
struct TORCH_API ConstantString : c10::intrusive_ptr_target {
private:
const std::string str_;
public:
@ -34,7 +34,7 @@ struct ConstantString : c10::intrusive_ptr_target {
// non-mutable list
template<typename Elem>
struct ConstantList : c10::intrusive_ptr_target {
struct TORCH_API ConstantList : c10::intrusive_ptr_target {
private:
std::vector<Elem> elements_;
public:
@ -67,7 +67,7 @@ using DoubleList = ConstantList<double>;
#define TORCH_FORALL_TAGS(_) \
_(None) _(Tensor) _(Double) _(Int) _(Tuple) _(IntList) _(DoubleList) _(String) _(TensorList)
struct IValue {
struct TORCH_API IValue {
IValue()
: payload(0)
, tag(Tag::None)