mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Re-land https://github.com/pytorch/pytorch/pull/23030 Pull Request resolved: https://github.com/pytorch/pytorch/pull/23209 Differential Revision: D16440000 Pulled By: zhangguanheng66 fbshipit-source-id: e05683275522835a33d5a7e6d76b7e94774e4d98
26 lines
788 B
C++
26 lines
788 B
C++
#pragma once
|
|
|
|
// cuda_lazy_init() is always compiled, even for CPU-only builds.
|
|
// Thus, it does not live in the cuda/ folder.
|
|
|
|
namespace torch {
|
|
namespace utils {
|
|
|
|
// The INVARIANT is that this function MUST be called before you attempt
|
|
// to get a CUDA Type object from ATen, in any way. Here are some common
|
|
// ways that a Type object may be retrieved:
|
|
//
|
|
// - You call getNonVariableType or getNonVariableTypeOpt
|
|
// - You call toBackend() on a Type
|
|
//
|
|
// It's important to do this correctly, because if you forget to add it
|
|
// you'll get an oblique error message about "Cannot initialize CUDA without
|
|
// ATen_cuda library" if you try to use CUDA functionality from a CPU-only
|
|
// build, which is not good UX.
|
|
//
|
|
void cuda_lazy_init();
|
|
void set_run_yet_variable_to_false();
|
|
|
|
}
|
|
}
|