mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
[Environment Variable][6/N] Use thread-safe getenv functions (#140200)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/140200 Approved by: https://github.com/ezyang
This commit is contained in:
parent
a2ac96cae0
commit
7d4f5f7508
|
|
@ -7,6 +7,7 @@
|
|||
#include <ATen/core/grad_mode.h>
|
||||
#include <ATen/core/jit_type.h>
|
||||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/env.h>
|
||||
#include <c10/util/flat_hash_map.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <array>
|
||||
|
|
@ -45,9 +46,9 @@ static_assert(
|
|||
"getTypePtr<std::tuple<int64_t, int64_t>> not returning const ref!");
|
||||
|
||||
TypeVerbosity type_verbosity() {
|
||||
static const char* c_verbosity = std::getenv("PYTORCH_JIT_TYPE_VERBOSITY");
|
||||
static const auto c_verbosity = c10::utils::get_env("PYTORCH_JIT_TYPE_VERBOSITY");
|
||||
static TypeVerbosity verbosity = c_verbosity ?
|
||||
static_cast<TypeVerbosity>(std::stoi(c_verbosity)) : TypeVerbosity::Default;
|
||||
static_cast<TypeVerbosity>(std::stoi(c_verbosity.value())) : TypeVerbosity::Default;
|
||||
return verbosity;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <c10/core/DeviceType.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/env.h>
|
||||
|
||||
#if !defined(__s390x__) && !defined(__powerpc__)
|
||||
#include <cpuinfo.h>
|
||||
|
|
@ -24,20 +25,20 @@ static inline bool cpu_has_vxe()
|
|||
#endif
|
||||
|
||||
static CPUCapability compute_cpu_capability() {
|
||||
auto envar = std::getenv("ATEN_CPU_CAPABILITY");
|
||||
if (envar) {
|
||||
const auto envar = c10::utils::get_env("ATEN_CPU_CAPABILITY");
|
||||
if (envar.has_value()) {
|
||||
#if defined(HAVE_VSX_CPU_DEFINITION)
|
||||
if (strcmp(envar, "vsx") == 0) {
|
||||
if (envar == "vsx") {
|
||||
return CPUCapability::VSX;
|
||||
}
|
||||
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
|
||||
if (strcmp(envar, "zvector") == 0) {
|
||||
if (envar == "zvector") {
|
||||
return CPUCapability::ZVECTOR;
|
||||
}
|
||||
#elif defined(HAVE_SVE_CPU_DEFINITION)
|
||||
int sve_vl = cpuinfo_get_max_arm_sve_length(); //Returns maximum SVE VL supported by your HW.
|
||||
#ifdef HAVE_SVE256_CPU_DEFINITION
|
||||
if (strcmp(envar, "sve256") == 0) {
|
||||
if (envar == "sve256") {
|
||||
if (sve_vl == 256) {
|
||||
return CPUCapability::SVE256;
|
||||
}
|
||||
|
|
@ -47,20 +48,20 @@ static CPUCapability compute_cpu_capability() {
|
|||
#endif
|
||||
#else
|
||||
#ifdef HAVE_AVX512_CPU_DEFINITION
|
||||
if (strcmp(envar, "avx512") == 0) {
|
||||
if (envar == "avx512") {
|
||||
return CPUCapability::AVX512;
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_AVX2_CPU_DEFINITION
|
||||
if (strcmp(envar, "avx2") == 0) {
|
||||
if (envar == "avx2") {
|
||||
return CPUCapability::AVX2;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
if (strcmp(envar, "default") == 0) {
|
||||
if (envar == "default") {
|
||||
return CPUCapability::DEFAULT;
|
||||
}
|
||||
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar);
|
||||
TORCH_WARN("ignoring invalid value for ATEN_CPU_CAPABILITY: ", envar.value());
|
||||
}
|
||||
|
||||
#if !defined(__powerpc__) && !defined(__s390x__) && !defined(HAVE_SVE_CPU_DEFINITION)
|
||||
|
|
|
|||
|
|
@ -40,16 +40,8 @@ namespace at::native {
|
|||
// Parse environment variable "TORCH_LINEAR_FLATTEN_3D"
|
||||
static inline bool parseLinearFlatten3d() {
|
||||
// Uninitialized value
|
||||
static int value = -1;
|
||||
if (value == -1) {
|
||||
const char* env_str = std::getenv("TORCH_LINEAR_FLATTEN_3D");
|
||||
if (env_str != nullptr && strcmp(env_str, "1") == 0) {
|
||||
value = 1;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
}
|
||||
return bool(value);
|
||||
static auto value = c10::utils::check_env("TORCH_LINEAR_FLATTEN_3D");
|
||||
return value.has_value() && value.value();
|
||||
}
|
||||
|
||||
// `_flatten_nd_linear` flattens all but the last dimension of the input tensor
|
||||
|
|
|
|||
|
|
@ -179,8 +179,8 @@ cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activa
|
|||
}
|
||||
|
||||
static bool getDisableAddmmCudaLt() {
|
||||
static const char* env_value = std::getenv("DISABLE_ADDMM_CUDA_LT");
|
||||
if (env_value != nullptr && strcmp(env_value, "1") == 0) {
|
||||
static const auto env_value = c10::utils::get_env("DISABLE_ADDMM_CUDA_LT");
|
||||
if (env_value == "1") {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -1395,8 +1395,8 @@ std::string generate_reduction_code(
|
|||
// Acquires (possibly creating) the kernel cache directory
|
||||
std::optional<std::string> get_cache_dir() {
|
||||
// If the environment variable USE_TORCH_KERNEL_CACHE is set to "0" then no persistent cache is used
|
||||
const char* uptkc = std::getenv("USE_PYTORCH_KERNEL_CACHE");
|
||||
const bool use_kernel_cache = (uptkc == nullptr) ? true : std::strcmp(uptkc, "0");
|
||||
const auto uptkc = c10::utils::get_env("USE_PYTORCH_KERNEL_CACHE");
|
||||
const bool use_kernel_cache = (uptkc != "0");
|
||||
|
||||
if (!use_kernel_cache) {
|
||||
return {};
|
||||
|
|
@ -1404,31 +1404,31 @@ std::optional<std::string> get_cache_dir() {
|
|||
|
||||
// Cache path comes from PYTORCH_KERNEL_CACHE_PATH, then TEMP (Windows) or XDG_CACHE_HOME (Linux), then HOME environment variables
|
||||
std::string cache_dir;
|
||||
char* ptkcp = std::getenv("PYTORCH_KERNEL_CACHE_PATH");
|
||||
auto ptkcp = c10::utils::get_env("PYTORCH_KERNEL_CACHE_PATH");
|
||||
// Create kernel_cache_dir if needed as we do not want to create the base directory passed by the user
|
||||
std::string kernels_cache_dir = "";
|
||||
if (ptkcp != nullptr) {
|
||||
cache_dir = std::string(ptkcp);
|
||||
if (ptkcp.has_value()) {
|
||||
cache_dir = ptkcp.value();
|
||||
} else {
|
||||
#ifdef _WIN32
|
||||
ptkcp = std::getenv("TEMP");
|
||||
ptkcp = c10::utils::get_env("TEMP");
|
||||
#else
|
||||
// USES XDG_CACHE_HOME if it's set
|
||||
ptkcp = std::getenv("XDG_CACHE_HOME");
|
||||
ptkcp = c10::utils::get_env("XDG_CACHE_HOME");
|
||||
#endif
|
||||
if (ptkcp != nullptr) {
|
||||
if (ptkcp.has_value()) {
|
||||
kernels_cache_dir = "/torch/kernels";
|
||||
cache_dir = std::string(ptkcp) + kernels_cache_dir;
|
||||
cache_dir = ptkcp.value() + kernels_cache_dir;
|
||||
} else {
|
||||
// Falls back to HOME/.cache
|
||||
ptkcp = std::getenv("HOME");
|
||||
if (ptkcp == nullptr) {
|
||||
ptkcp = c10::utils::get_env("HOME");
|
||||
if (ptkcp.has_value()) {
|
||||
TORCH_WARN_ONCE("No PYTORCH_KERNEL_CACHE_PATH or HOME environment variable set!",
|
||||
" This disables kernel caching.");
|
||||
return {};
|
||||
} else {
|
||||
kernels_cache_dir = "/.cache/torch/kernels";
|
||||
cache_dir = std::string(ptkcp) + kernels_cache_dir;
|
||||
cache_dir = ptkcp.value() + kernels_cache_dir;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1437,7 +1437,7 @@ std::optional<std::string> get_cache_dir() {
|
|||
const char* p_cache_dir = cache_dir.c_str();
|
||||
const bool cache_dir_exists = (access(p_cache_dir, F_OK) == 0);
|
||||
if (!cache_dir_exists) {
|
||||
std::string s_ptkcp = std::string(ptkcp);
|
||||
std::string s_ptkcp = ptkcp.value();
|
||||
if (!r_mkdir_with_base(s_ptkcp, kernels_cache_dir)) {
|
||||
TORCH_WARN_ONCE("Specified kernel cache directory could not be created! This disables kernel caching.",
|
||||
" Specified directory is ", cache_dir, ".",
|
||||
|
|
|
|||
|
|
@ -77,10 +77,10 @@ bool has_env(const char* name) noexcept {
|
|||
std::optional<bool> check_env(const char* name) {
|
||||
auto env_opt = get_env(name);
|
||||
if (env_opt.has_value()) {
|
||||
if (*env_opt == "0") {
|
||||
if (env_opt == "0") {
|
||||
return false;
|
||||
}
|
||||
if (*env_opt == "1") {
|
||||
if (env_opt == "1") {
|
||||
return true;
|
||||
}
|
||||
TORCH_WARN(
|
||||
|
|
|
|||
|
|
@ -129,15 +129,15 @@ inline int getCvarInt(const std::vector<std::string>& env, int def) {
|
|||
* versions of a variable get higher priority than the latter
|
||||
* versions of the same variable */
|
||||
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
|
||||
char* val = std::getenv(env[i].c_str());
|
||||
if (val == nullptr) {
|
||||
const auto val = c10::utils::get_env(env[i].c_str());
|
||||
if (!val.has_value()) {
|
||||
continue;
|
||||
} else if (i) {
|
||||
WARN_ENV_VAR_ONCE(env[i], env[0]);
|
||||
}
|
||||
|
||||
try {
|
||||
ret = std::stoi(val);
|
||||
ret = std::stoi(val.value());
|
||||
} catch (std::exception&) {
|
||||
TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/StringUtil.h>
|
||||
#include <c10/util/env.h>
|
||||
#include <c10/util/irange.h>
|
||||
#include <caffe2/serialize/versions.h>
|
||||
#include <torch/csrc/jit/api/function_impl.h>
|
||||
|
|
@ -47,12 +48,11 @@ bool reportSourceLocation(size_t file_size) {
|
|||
if (file_size < 512ull * 1024) {
|
||||
return true;
|
||||
}
|
||||
const char* enable_env =
|
||||
std::getenv("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
|
||||
const auto enable_env =
|
||||
c10::utils::get_env("PYTORCH_JIT_ENABLE_LARGE_SOURCE_LOCATION");
|
||||
bool flag = true;
|
||||
if (enable_env == nullptr || std::strcmp(enable_env, "0") == 0 ||
|
||||
std::strcmp(enable_env, "FALSE") == 0 ||
|
||||
std::strcmp(enable_env, "false") == 0) {
|
||||
if (!enable_env.has_value() || enable_env == "0" || enable_env == "FALSE" ||
|
||||
enable_env == "false") {
|
||||
flag = false;
|
||||
}
|
||||
return flag;
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
#include <ATen/core/function.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/StringUtil.h>
|
||||
#include <c10/util/env.h>
|
||||
#include <torch/csrc/jit/api/function_impl.h>
|
||||
#include <torch/csrc/jit/frontend/error_report.h>
|
||||
#include <torch/csrc/jit/ir/ir.h>
|
||||
|
|
@ -32,8 +33,10 @@ class JitLoggingConfig {
|
|||
std::ostream* out;
|
||||
|
||||
JitLoggingConfig() : out(&std::cerr) {
|
||||
const char* jit_log_level = std::getenv("PYTORCH_JIT_LOG_LEVEL");
|
||||
logging_levels.assign(jit_log_level == nullptr ? "" : jit_log_level);
|
||||
const auto jit_log_level = c10::utils::get_env("PYTORCH_JIT_LOG_LEVEL");
|
||||
if (jit_log_level.has_value()) {
|
||||
logging_levels = jit_log_level.value();
|
||||
}
|
||||
|
||||
parse();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <ATen/core/function.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/StringUtil.h>
|
||||
#include <c10/util/env.h>
|
||||
#include <torch/csrc/jit/api/function_impl.h>
|
||||
#include <torch/csrc/jit/jit_opt_limit.h>
|
||||
|
||||
|
|
@ -49,14 +48,14 @@ static std::unordered_map<std::string, int64_t> parseJITOptLimitOption(
|
|||
}
|
||||
|
||||
bool opt_limit(const char* pass_name) {
|
||||
static const char* opt_limit = std::getenv("PYTORCH_JIT_OPT_LIMIT");
|
||||
static const auto opt_limit = c10::utils::get_env("PYTORCH_JIT_OPT_LIMIT");
|
||||
// if nothing is provided, let's allow everything
|
||||
if (!opt_limit) {
|
||||
if (!opt_limit.has_value()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static const std::unordered_map<std::string, int64_t> passes_to_opt_limits =
|
||||
parseJITOptLimitOption(opt_limit);
|
||||
parseJITOptLimitOption(opt_limit->c_str());
|
||||
std::string pass{pass_name};
|
||||
pass = c10::detail::StripBasename(pass);
|
||||
pass = c10::detail::ExcludeFileExtension(pass);
|
||||
|
|
|
|||
|
|
@ -156,11 +156,11 @@ void setTensorExprFuserEnabled(bool val) {
|
|||
}
|
||||
|
||||
bool tensorExprFuserEnabled() {
|
||||
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR");
|
||||
if (!enable_c_str) {
|
||||
static const auto enable_opt = c10::utils::get_env("PYTORCH_TENSOREXPR");
|
||||
if (!enable_opt.has_value()) {
|
||||
return texpr_fuser_enabled_;
|
||||
}
|
||||
if (std::string(enable_c_str) == "0") {
|
||||
if (enable_opt == "0") {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
@ -1295,10 +1295,10 @@ class TensorExprFuser {
|
|||
// 'PYTORCH_TENSOREXPR_DONT_FUSE="clamp:mul:add"' disables fusion on
|
||||
// aten::clamp, aten::mul and aten::add.
|
||||
void parseTENotFuseOption() {
|
||||
const char* option = std::getenv("PYTORCH_TENSOREXPR_DONT_FUSE");
|
||||
const auto option = c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_FUSE");
|
||||
std::stringstream in_ss;
|
||||
if (option) {
|
||||
in_ss << option;
|
||||
if (option.has_value()) {
|
||||
in_ss << option.value();
|
||||
}
|
||||
|
||||
std::string line;
|
||||
|
|
|
|||
|
|
@ -861,7 +861,7 @@ bool GraphExecutor::isOptimized() const {
|
|||
|
||||
TORCH_API bool IsNewExecutorEnabled() {
|
||||
static const auto disable_new_executor =
|
||||
std::getenv("TORCH_JIT_DISABLE_NEW_EXECUTOR");
|
||||
c10::utils::has_env("TORCH_JIT_DISABLE_NEW_EXECUTOR");
|
||||
return getExecutorMode() && FLAGS_torch_jit_enable_new_executor &&
|
||||
!disable_new_executor;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,47 +54,49 @@ bool setFallbackAllowed(bool value) {
|
|||
}
|
||||
|
||||
bool fallbackAllowed() {
|
||||
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
|
||||
if (!enable_c_str) {
|
||||
static const auto enable_opt =
|
||||
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
|
||||
if (!enable_opt.has_value()) {
|
||||
return fallback_allowed;
|
||||
}
|
||||
if (std::string(enable_c_str) == "0") {
|
||||
if (enable_opt == "0") {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool fallbackEnforced() {
|
||||
static const char* enable_c_str = std::getenv("PYTORCH_TENSOREXPR_FALLBACK");
|
||||
static const auto enable_opt =
|
||||
c10::utils::get_env("PYTORCH_TENSOREXPR_FALLBACK");
|
||||
if (tensorexpr::getTEGenerateBlockCode()) {
|
||||
return false;
|
||||
}
|
||||
if (!enable_c_str) {
|
||||
if (!enable_opt.has_value()) {
|
||||
return fallback_allowed;
|
||||
}
|
||||
if (std::string(enable_c_str) == "2") {
|
||||
if (enable_opt == "2") {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int64_t randomTransformsRequested() {
|
||||
const char* enable_c_str =
|
||||
std::getenv("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
|
||||
if (!enable_c_str) {
|
||||
const auto enable_opt =
|
||||
c10::utils::get_env("PYTORCH_TENSOREXPR_RANDOM_TRANSFORM_SEED");
|
||||
if (!enable_opt.has_value()) {
|
||||
return 0;
|
||||
}
|
||||
return std::stoi(std::string(enable_c_str));
|
||||
return std::stoi(enable_opt.value());
|
||||
}
|
||||
|
||||
#ifdef TORCH_ENABLE_LLVM
|
||||
static bool dontUseLLVMFlag() {
|
||||
static const char* enable_c_str =
|
||||
std::getenv("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
|
||||
if (!enable_c_str) {
|
||||
static const auto enable_opt =
|
||||
c10::utils::get_env("PYTORCH_TENSOREXPR_DONT_USE_LLVM");
|
||||
if (!enable_opt) {
|
||||
return false;
|
||||
}
|
||||
return std::string(enable_c_str) == "1";
|
||||
return enable_opt == "1";
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
#include <c10/macros/Macros.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <c10/util/env.h>
|
||||
#include <torch/csrc/profiler/unwind/unwind.h>
|
||||
#include <torch/csrc/utils/cpp_stacktraces.h>
|
||||
|
||||
|
|
@ -321,10 +322,10 @@ static std::string dladdr_lookup(void* addr) {
|
|||
|
||||
struct Symbolizer {
|
||||
Symbolizer() {
|
||||
auto envar = std::getenv("TORCH_ADDR2LINE_BINARY");
|
||||
if (envar != nullptr) {
|
||||
auto envar = c10::utils::get_env("TORCH_ADDR2LINE_BINARY");
|
||||
if (envar.has_value()) {
|
||||
// currently we take user's input as is without checking
|
||||
addr2line_binary_ = envar;
|
||||
addr2line_binary_ = std::move(envar.value());
|
||||
TORCH_WARN("Use custom addr2line binary: ", addr2line_binary_);
|
||||
} else {
|
||||
addr2line_binary_ = "addr2line"; // default
|
||||
|
|
@ -379,7 +380,7 @@ struct Symbolizer {
|
|||
|
||||
private:
|
||||
static constexpr int BLOCK = 1024;
|
||||
const char* addr2line_binary_;
|
||||
std::string addr2line_binary_;
|
||||
struct Entry {
|
||||
std::unique_ptr<Communicate> comm;
|
||||
std::vector<void*> queried;
|
||||
|
|
@ -394,12 +395,13 @@ struct Symbolizer {
|
|||
if (it == entries_.end()) {
|
||||
// NOLINTNEXTLINE(*-c-arrays*)
|
||||
const char* args[] = {
|
||||
addr2line_binary_, "-C", "-f", "-e", name.c_str(), nullptr};
|
||||
addr2line_binary_.c_str(), "-C", "-f", "-e", name.c_str(), nullptr};
|
||||
it = entries_
|
||||
.insert_or_assign(
|
||||
name,
|
||||
Entry{
|
||||
std::make_unique<Communicate>(addr2line_binary_, args),
|
||||
std::make_unique<Communicate>(
|
||||
addr2line_binary_.c_str(), args),
|
||||
{}})
|
||||
.first;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user