mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Syncing nvfuser devel branch to upstream master. https://github.com/csarofeen/pytorch/ Code changes includes: - codegen improvements: 1. removes un-necessary sync from redundant thread compute analysis 2. symmetric API for BestEffortReplay 3. support merge on trivial reductions 4. Ampere async copy improvements - bug fixes: 1. vectorization bug fixes 2. type inference patch : fixes upstream #81725 3. segmenter bug fix with deterministic iteration ordering - parser update 1. added leaky_relu - scheduler 1. normalization scheduler clean up. 2. simplifies matmul scheduling with new transform propagator 3. merge all dimensions in PW scheduler 4. various gemm related improvements - debuggability 1. nsight compute support 2. debug dump for InlinePropagator 3. Add `UnaryOpType::Print` Squashed commits to WAR github API Commits that's actually in this PR from the devel branch: ``` dfe02f3faed4c64477e5f5c678f21f33415d0195 Merge remote-tracking branch 'csarofeen/devel' into HEAD 16173732ecfafc4797e93c2449cfb778015a6c7a Add `TensorViewBuilder::shape(std::vector<Val*> shape)` (#1884) 7cfb7796bdcf055eb61d600b7b5c9df292950290 Merge pull request #1887 from csarofeen/upstream_merge_0803 3399f6de62061d30781de50ef1862bbfb1615173 Merge remote-tracking branch 'origin/viable/strict' into HEAD 01208f5bba3bc158d41ccbefa0ee2c5ceea7aedb Add `UnaryOpType::Print` which can be helpful for debugging (#1878) 0646522454aa715ef164c88a73fb8bdddc706805 Remove redundant TORCH_INTERNAL_ASSERT in lower_magic_zero.cpp (#1881) 7bc76aa219293a59e4166e258d76289fe13633ca Fix most inlined propagator for mismatched dims (#1875) 501f4aa270bf4dd47b0d2f4860bc6f23ebc32a38 Nonaffine swizzle formulation ep.2: Loop swizzle variant. (#1826) d863d690f923047a85b5229a787118708f810741 Ampere async copy ep.2: circular buffering extension to support pipelined matmul operand load (#1827) e0ae11a61c87cd998e88ddd79a496548171c31e0 Larger sized mma instructions to support full vectorization (#1824) 9bb4cf7a66b098f04c9d95a2d34ab2bceee151b3 fragment iteration to support fully unrolled mma ops (#1823) a48270a18dc2d3accc2626758d14d5858ae55032 Merge all dims in pointwise scheduler (#1872) 172fb3673fb4aaf4c1e889922a4fc5c06cbd59f7 Make MostInlined and BestEffort inline propagation no longer assert replayed (#1868) a64462a5ac2fcf57a177bf36b0f26c61a4e252a4 Allow trivial reduction to be merged (#1871) 440102bcda6eb1dcd42d5fa5aeab9d6b049956bc Symmetric API for BestEffortReplay (#1870) d1caf330c08ea8002f7133ca655bbd5b28c4eb98 Some misc cleanups/refactor split out from #1854 (#1867) 1013eda50be38eac96c00ba781340ac199d5a136 Remove some welford specific logic. (#1864) 51589d36be5a101d06e641fe0400b39028b7cb81 Some cleanups on tests and heuristics params (#1866) a6b3e70da5dee51dbc246347228ea21384e46ac3 Segmenter bug fix, and deterministic iteration ordering. (#1865) 1b665b9b5e562d6f0caba5e7319e83e5df64104f Add nullptr checks to IrBuilder (#1861) 1cd9451d7493f631c2837ba07c1ea93a74e83a15 Simplify matmul scheduling with the new transform propagator. (#1817) bbc1fb9b8c454f557ab9fcf5b1c3cef9b9e136d0 Add leaky_relu operation (#1852) e842a9bab5e9f7289b7ce33ee37a682b22373f49 Minor cleanup in pointwise scheduler (#1858) 9ee850ca2f7f51dd5269bffb1255e485f809282d Fix stringstream usage (#1857) 20a36c1e4f28c4ff9837e56784be2686d17435f3 Improve nsight compute support (#1855) 405910308301097297b55c34d560aab6a360e897 Remove debugging `true ||` from getPointwiseHeuristics (#1822) 01117bfe8fdfacdbfdcfba9a624cdf900fe044d4 Misc cleanup (#1853) 5cc64943dc381a568223140bce0f22163c01e29f Apply the magic-zero protection to each indexed domain individually for predicate indexing (#1846) 92e6f0207e3a89fe90fd5cd3ffc575dfd766ba00 Cleanup normalization scheduler (#1845) db89c6591a2f21130599a93675e0615e55564e41 Type inference patch (#1848) 102fe93a4605ca465cda26ebaee4ba1af2026901 Add debug dump for InlinePropagator (#1847) b7a4d93d375a6e2ddef483763c93ffddc62ec452 Redundant thread compute analysis to avoid un-necessary sync insertion (#1687) 942be5b256056d0e02877361b814ae6af32ca15f Upstream ci build fixes (#1842) 0b83645915029d67f9345aa4649b8c6f62b0061b Fix vectorization bug introduced in #1831 (#1840) 63630f1ae091180e541932a9d9dc598e0a9902dd Move MaxProducerPosUpdater into InlinePropagator::tearDown (#1825) 9135a963c01d97ba34b1a7d2f106e78a13fd6651 Fix transpose benchmark dtype (#1839) 2c9a6c02312d5bf4f83cde653b847b4f85849432 Add extra configurability to `parallelizeAllLike` (#1831) ``` RUN_TORCHBENCH: nvfuser Differential Revision: [D38543000](https://our.internmc.facebook.com/intern/diff/D38543000) Pull Request resolved: https://github.com/pytorch/pytorch/pull/83067 Approved by: https://github.com/davidberard98
321 lines
12 KiB
C++
321 lines
12 KiB
C++
|
|
#include <torch/csrc/jit/codegen/cuda/utils.h>
|
|
|
|
#include <c10/util/string_view.h>
|
|
|
|
#include <cstdlib>
|
|
#include <iostream>
|
|
#include <unordered_map>
|
|
|
|
namespace torch {
|
|
namespace jit {
|
|
namespace fuser {
|
|
namespace cuda {
|
|
|
|
namespace {
|
|
|
|
auto parseDebugDumpOptions() {
|
|
std::unordered_map<DebugDumpOption, bool> options_map = {
|
|
{DebugDumpOption::FusionIr, false},
|
|
{DebugDumpOption::FusionIrMath, false},
|
|
{DebugDumpOption::KernelIr, false},
|
|
{DebugDumpOption::ComputeAtMap, false},
|
|
{DebugDumpOption::CudaKernel, false},
|
|
{DebugDumpOption::CudaFull, false},
|
|
{DebugDumpOption::CudaToFile, false},
|
|
{DebugDumpOption::DebugInfo, false},
|
|
{DebugDumpOption::LaunchParam, false},
|
|
{DebugDumpOption::FusionSegments, false},
|
|
{DebugDumpOption::FusionSegmenterLog, false},
|
|
{DebugDumpOption::FusionArgs, false},
|
|
{DebugDumpOption::KernelArgs, false},
|
|
{DebugDumpOption::EffectiveBandwidth, false},
|
|
{DebugDumpOption::FusionSegmentsDrawing, false},
|
|
{DebugDumpOption::PrintPtxasLog, false},
|
|
{DebugDumpOption::BufferReuseInfo, false},
|
|
{DebugDumpOption::SchedulerDebug, false},
|
|
{DebugDumpOption::ParallelDimensions, false},
|
|
{DebugDumpOption::Halo, false},
|
|
{DebugDumpOption::PerfDebugVerbose, false},
|
|
{DebugDumpOption::TransformPropagator, false},
|
|
{DebugDumpOption::InlinePropagator, false}};
|
|
|
|
if (const char* dump_options = std::getenv("PYTORCH_NVFUSER_DUMP")) {
|
|
c10::string_view options_view(dump_options);
|
|
while (!options_view.empty()) {
|
|
const auto end_pos = options_view.find_first_of(',');
|
|
const auto token = options_view.substr(0, end_pos);
|
|
if (token == "fusion_ir") {
|
|
options_map[DebugDumpOption::FusionIr] = true;
|
|
} else if (token == "fusion_ir_math") {
|
|
options_map[DebugDumpOption::FusionIrMath] = true;
|
|
} else if (token == "kernel_ir") {
|
|
options_map[DebugDumpOption::KernelIr] = true;
|
|
} else if (token == "ca_map") {
|
|
options_map[DebugDumpOption::ComputeAtMap] = true;
|
|
} else if (token == "cuda_kernel") {
|
|
options_map[DebugDumpOption::CudaKernel] = true;
|
|
} else if (token == "cuda_full") {
|
|
options_map[DebugDumpOption::CudaFull] = true;
|
|
} else if (token == "cuda_to_file") {
|
|
options_map[DebugDumpOption::CudaToFile] = true;
|
|
} else if (token == "debug_info") {
|
|
options_map[DebugDumpOption::DebugInfo] = true;
|
|
} else if (token == "launch_param") {
|
|
options_map[DebugDumpOption::LaunchParam] = true;
|
|
} else if (token == "segmented_fusion") {
|
|
options_map[DebugDumpOption::FusionSegments] = true;
|
|
} else if (token == "segmenter_logging") {
|
|
options_map[DebugDumpOption::FusionSegmenterLog] = true;
|
|
} else if (token == "fusion_args") {
|
|
options_map[DebugDumpOption::FusionArgs] = true;
|
|
} else if (token == "kernel_args") {
|
|
options_map[DebugDumpOption::KernelArgs] = true;
|
|
} else if (token == "dump_eff_bandwidth") {
|
|
options_map[DebugDumpOption::EffectiveBandwidth] = true;
|
|
} else if (token == "draw_segmented_fusion") {
|
|
options_map[DebugDumpOption::FusionSegmentsDrawing] = true;
|
|
} else if (token == "ptxas_verbose") {
|
|
options_map[DebugDumpOption::PrintPtxasLog] = true;
|
|
} else if (token == "buffer_reuse_verbose") {
|
|
options_map[DebugDumpOption::BufferReuseInfo] = true;
|
|
} else if (token == "scheduler_params") {
|
|
options_map[DebugDumpOption::SchedulerDebug] = true;
|
|
} else if (token == "parallel_dimensions") {
|
|
options_map[DebugDumpOption::ParallelDimensions] = true;
|
|
} else if (token == "halo") {
|
|
options_map[DebugDumpOption::Halo] = true;
|
|
} else if (token == "perf_debug_verbose") {
|
|
options_map[DebugDumpOption::PerfDebugVerbose] = true;
|
|
} else if (token == "transform_propagator") {
|
|
options_map[DebugDumpOption::TransformPropagator] = true;
|
|
} else if (token == "inline_propagator") {
|
|
options_map[DebugDumpOption::InlinePropagator] = true;
|
|
} else {
|
|
TORCH_CHECK(
|
|
false,
|
|
"Invalid debug dump option: '",
|
|
token,
|
|
"'\nAvailable options:\n",
|
|
"\tfusion_ir, fusion_ir_math, kernel_ir, ca_map, cuda_kernel, cuda_full,\n",
|
|
"\tcuda_to_file, debug_info, launch_param, segmented_fusion, fusion_args,\n",
|
|
"\tkernel_args, dump_eff_bandwidth, draw_segmented_fusion,\n",
|
|
"\tscheduler_params, parallel_dimensions, buffer_reuse_verbose,\n",
|
|
"\tptxas_verbose, halo, segmenter_logging, perf_debug_verbose\n",
|
|
"\ttransform_propagator, inline_propagator\n");
|
|
}
|
|
options_view = (end_pos != c10::string_view::npos)
|
|
? options_view.substr(end_pos + 1)
|
|
: "";
|
|
}
|
|
}
|
|
|
|
return options_map;
|
|
}
|
|
|
|
auto parseDisableOptions() {
|
|
std::unordered_map<DisableOption, bool> options_map = {
|
|
{DisableOption::ArchCheck, false},
|
|
{DisableOption::Fallback, false},
|
|
{DisableOption::Fma, false},
|
|
{DisableOption::IndexHoist, false},
|
|
{DisableOption::Nvtx, false},
|
|
{DisableOption::PredicateElimination, false},
|
|
{DisableOption::UnrollWithRng, false}};
|
|
|
|
if (const char* dump_options = std::getenv("PYTORCH_NVFUSER_DISABLE")) {
|
|
c10::string_view options_view(dump_options);
|
|
while (!options_view.empty()) {
|
|
const auto end_pos = options_view.find_first_of(',');
|
|
const auto token = options_view.substr(0, end_pos);
|
|
if (token == "arch_check") {
|
|
options_map[DisableOption::ArchCheck] = true;
|
|
} else if (token == "fallback") {
|
|
options_map[DisableOption::Fallback] = true;
|
|
} else if (token == "fma") {
|
|
TORCH_WARN(
|
|
"fmad is disabled for nvrtc, which could negatively affect performance. Try removing `fma` from env variable PYTORCH_NVFUSER_DISABLE for optimal performance.");
|
|
options_map[DisableOption::Fma] = true;
|
|
} else if (token == "index_hoist") {
|
|
options_map[DisableOption::IndexHoist] = true;
|
|
} else if (token == "nvtx") {
|
|
options_map[DisableOption::Nvtx] = true;
|
|
} else if (token == "predicate_elimination") {
|
|
options_map[DisableOption::PredicateElimination] = true;
|
|
} else if (token == "unroll_with_rng") {
|
|
options_map[DisableOption::UnrollWithRng] = true;
|
|
} else {
|
|
TORCH_CHECK(
|
|
false,
|
|
"Invalid disable option: '",
|
|
token,
|
|
"'\nAvailable options:\n",
|
|
"\tarch_check, fallback, fma, index_hoist, nvtx, predicate_elimination\n",
|
|
"unroll_with_rng");
|
|
}
|
|
options_view = (end_pos != c10::string_view::npos)
|
|
? options_view.substr(end_pos + 1)
|
|
: "";
|
|
}
|
|
}
|
|
|
|
return options_map;
|
|
}
|
|
|
|
auto parseEnableOptions() {
|
|
std::unordered_map<EnableOption, bool> options_map = {
|
|
{EnableOption::Complex, false},
|
|
{EnableOption::KernelProfile, false},
|
|
{EnableOption::LinearDecomposition, false},
|
|
{EnableOption::ConvDecomposition, false}};
|
|
|
|
if (const char* dump_options = std::getenv("PYTORCH_NVFUSER_ENABLE")) {
|
|
c10::string_view options_view(dump_options);
|
|
while (!options_view.empty()) {
|
|
const auto end_pos = options_view.find_first_of(',');
|
|
const auto token = options_view.substr(0, end_pos);
|
|
if (token == "complex") {
|
|
options_map[EnableOption::Complex] = true;
|
|
} else if (token == "kernel_profile") {
|
|
options_map[EnableOption::KernelProfile] = true;
|
|
} else if (token == "linear_decomposition") {
|
|
options_map[EnableOption::LinearDecomposition] = true;
|
|
} else if (token == "conv_decomposition") {
|
|
options_map[EnableOption::ConvDecomposition] = true;
|
|
} else {
|
|
TORCH_CHECK(
|
|
false,
|
|
"Invalid disable option: '",
|
|
token,
|
|
"'\nAvailable options:\n",
|
|
"\tcomplex, kernel_profile");
|
|
}
|
|
options_view = (end_pos != c10::string_view::npos)
|
|
? options_view.substr(end_pos + 1)
|
|
: "";
|
|
}
|
|
}
|
|
|
|
return options_map;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
#pragma clang diagnostic push
|
|
#pragma clang diagnostic ignored "-Wunused-function"
|
|
void debugPrint(const c10::TensorTypePtr& type) {
|
|
std::stringstream sizes_s;
|
|
if (auto sizes = type->symbolic_sizes().sizes()) {
|
|
for (const auto& shape_symbol : *sizes) {
|
|
if (shape_symbol.is_static()) {
|
|
sizes_s << shape_symbol.static_size() << ", ";
|
|
} else {
|
|
sizes_s << "s(" << *reinterpret_cast<const int64_t*>(&shape_symbol)
|
|
<< "), ";
|
|
}
|
|
}
|
|
} else {
|
|
sizes_s << "no size available";
|
|
}
|
|
std::cout << "sizes:" << sizes_s.str() << std::endl;
|
|
if (const auto& stride_properties = type->stride_properties().sizes()) {
|
|
std::stringstream stride_s;
|
|
std::stringstream index_s;
|
|
std::stringstream contig_s;
|
|
|
|
for (const auto& stride_property : *stride_properties) {
|
|
if (stride_property.has_value() && stride_property->stride_.has_value()) {
|
|
stride_s << *stride_property->stride_ << ", ";
|
|
} else {
|
|
stride_s << "?, ";
|
|
}
|
|
if (stride_property.has_value() &&
|
|
stride_property->stride_index_.has_value()) {
|
|
index_s << *stride_property->stride_index_ << ", ";
|
|
} else {
|
|
index_s << "?, ";
|
|
}
|
|
if (stride_property.has_value() &&
|
|
stride_property->contiguous_.has_value()) {
|
|
contig_s << *stride_property->contiguous_ << ", ";
|
|
} else {
|
|
contig_s << "?, ";
|
|
}
|
|
}
|
|
std::cout << "stride: " << stride_s.str() << std::endl;
|
|
std::cout << "stride index: " << index_s.str() << std::endl;
|
|
std::cout << "contiguous: " << contig_s.str() << std::endl;
|
|
} else {
|
|
std::cout << "no stride properties available" << std::endl;
|
|
}
|
|
}
|
|
#pragma clang diagnostic pop
|
|
|
|
bool is_zero_dim_tensor(const std::shared_ptr<c10::TensorType>& tensor_type) {
|
|
return tensor_type && tensor_type->dim().has_value() &&
|
|
tensor_type->dim().value() == 0;
|
|
}
|
|
|
|
bool is_zero_sized_tensor(const std::shared_ptr<c10::TensorType>& tensor_type) {
|
|
auto opt_sizes = tensor_type->sizes().concrete_sizes();
|
|
if (opt_sizes.has_value()) {
|
|
auto sizes = opt_sizes.value();
|
|
for (const auto& size : sizes) {
|
|
if (size == 0) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool is_cpu_scalar(const at::Tensor& tensor) {
|
|
return tensor.device().is_cpu() && tensor.numel() == 1 && tensor.dim() == 0;
|
|
}
|
|
|
|
bool is_cpu_scalar(const c10::TensorType& tensor_type) {
|
|
auto opt_device = tensor_type.device();
|
|
auto opt_dim = tensor_type.dim();
|
|
auto opt_numel = tensor_type.numel();
|
|
return opt_device.has_value() && opt_device.value().is_cpu() &&
|
|
opt_dim.has_value() && opt_numel.has_value() && opt_dim.value() == 0 &&
|
|
opt_numel.value() == 1;
|
|
}
|
|
|
|
bool isDebugDumpEnabled(DebugDumpOption option) {
|
|
const static auto dump_options = parseDebugDumpOptions();
|
|
return dump_options.at(option);
|
|
}
|
|
|
|
bool isDisabled(DisableOption option) {
|
|
const static auto options = parseDisableOptions();
|
|
return options.at(option);
|
|
}
|
|
|
|
bool isEnabled(EnableOption option) {
|
|
const static auto options = parseEnableOptions();
|
|
return options.at(option);
|
|
}
|
|
|
|
bool useFallback() {
|
|
// Keep this env var for compatibility
|
|
const char* disable_fb_env = getenv("PYTORCH_NVFUSER_DISABLE_FALLBACK");
|
|
bool fallback_disabled = disable_fb_env ? atoi(disable_fb_env) : false;
|
|
fallback_disabled = fallback_disabled || isDisabled(DisableOption::Fallback);
|
|
|
|
return !fallback_disabled;
|
|
}
|
|
|
|
std::vector<int64_t> getTensorSizes(TensorTypePtr const& tensor_type) {
|
|
TORCH_INTERNAL_ASSERT(tensor_type != nullptr, "Input must be a Tensor.");
|
|
auto optional_sizes = tensor_type->sizes().concrete_sizes();
|
|
TORCH_INTERNAL_ASSERT(
|
|
optional_sizes.has_value(), "Missing size information for the tensor.");
|
|
return optional_sizes.value();
|
|
}
|
|
|
|
} // namespace cuda
|
|
} // namespace fuser
|
|
} // namespace jit
|
|
} // namespace torch
|