mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
ead4407f57
427 Commits
| Author | SHA1 | Message | Date | |
|---|---|---|---|---|
|
|
ead4407f57 |
[inductor] Fix loop split optimization (#135303)
Fix https://github.com/pytorch/pytorch/issues/135274. Improve the check whether the div expr matches: add a check whether `split_var` is in `original_body.iter_vars`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/135303 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel |
||
|
|
67c7924ea1 |
[inductor] Fix gen_transposed_tile_load_store (#135307)
Recent PR: https://github.com/pytorch/pytorch/pull/131745 bring new VLA logical in cpp codegen. And it will raise build fail error on MSVC and error code is `Compiler Error C2131`: https://learn.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2131?view=msvc-170 reproduce UT: ```cmd pytest test\inductor\test_torchinductor_dynamic_shapes.py -v -k test_large_block_sizes_dynamic_shapes_cpu ``` Original generated code: ```c++ alignas(16) float tmp1[static_cast<int64_t>(((-256LL)*(c10::div_floor_integer(static_cast<int64_t>(ks1), static_cast<int64_t>(16LL)))) + (16LL*ks1))]; ``` Changes: allocate a large-enough fixed-sized buffer. New genarated code: ```c++ alignas(16) float tmp1[16*16]; ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/135307 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
758d515d98 |
[Inductor][CPP] Select tiling factor for lower precision data types (#133830)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/133830 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
52c7c89ea4 |
[Inductor][CPP] Leverage full bits for BF16/FP16 vectorization (#126502)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126502 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
05feb6e4ed |
[Inductor] support masked vectorization for the tail_loop for dynamic shapes (#131745)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/131745 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel |
||
|
|
13a4a0c60d |
[Inductor] Apply loop split optimization in codegen_node (#132389)
This PR applies loop split optimization in codegen_node to avoid non-contiguous load. When the vector is loaded in a non-contiguous manner due to a division in the index, we eliminate the division by splitting the loop to avoid non-contiguous load.
Example:
```
import torch
import torch.nn as nn
class GNReLU(torch.nn.Module):
def __init__(self, num_groups, num_channels):
super(GNReLU, self).__init__()
self.gn = nn.GroupNorm(num_groups, num_channels)
def forward(self, x):
return torch.nn.functional.relu(self.gn(x))
input = torch.randn(2, 960, 96, 96).to(memory_format=torch.channels_last)
m = GNReLU(32, 960).eval()
compiled_m = torch.compile(m)
with torch.no_grad():
compiled_m(input)
```
Generated code:
- Before:
```
cpp_fused_native_group_norm_relu_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/vu/cvuckxaygqfovv2zu2byqhcmiejbke7mdhf2rpgpr5mlscdev2hg.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(56)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
Welford<at::vec::Vectorized<float>> masked_tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> wrecps0(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &wrecps0);
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 14);
masked_tmp_acc0_vec = welford_combine(masked_tmp_acc0_vec, tmp0, 14, &wrecps0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(masked_tmp_acc0_vec));
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
for(long x2=static_cast<long>(0L); x2<static_cast<long>(960L); x2+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr0[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp3 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr1[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x2), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x2), 16);
auto tmp2 = tmp0 - tmp1;
auto tmp4 = static_cast<float>(276480.0);
auto tmp5 = at::vec::Vectorized<float>(tmp4);
auto tmp6 = tmp3 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = at::vec::Vectorized<float>(tmp7);
auto tmp9 = tmp6 + tmp8;
auto tmp10 = tmp9.rsqrt();
auto tmp11 = tmp2 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)));
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg2_1, = args
args.clear()
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_relu_0(arg2_1, _frozen_param3, _frozen_param2, buf0, buf1, buf3)
del arg2_1
return (buf3, )
```
- After:
```
cpp_fused_native_group_norm_relu_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/vu/cvuckxaygqfovv2zu2byqhcmiejbke7mdhf2rpgpr5mlscdev2hg.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(56)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
Welford<at::vec::Vectorized<float>> masked_tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> wrecps0(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &wrecps0);
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 14);
masked_tmp_acc0_vec = welford_combine(masked_tmp_acc0_vec, tmp0, 14, &wrecps0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(masked_tmp_acc0_vec));
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
#pragma GCC ivdep
for(long x2=static_cast<long>(0L); x2<static_cast<long>(32L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 = out_ptr0[static_cast<long>(x2 + (32L*x0))];
auto tmp4 = out_ptr1[static_cast<long>(x2 + (32L*x0))];
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x3 + (30L*x2)), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x3 + (30L*x2)), 16);
auto tmp2 = at::vec::Vectorized<float>(tmp1);
auto tmp3 = tmp0 - tmp2;
auto tmp5 = static_cast<float>(276480.0);
auto tmp6 = tmp4 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = decltype(tmp6)(tmp6 + tmp7);
auto tmp9 = 1 / std::sqrt(tmp8);
auto tmp10 = at::vec::Vectorized<float>(tmp9);
auto tmp11 = tmp3 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)));
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 14);
auto tmp1 = out_ptr0[static_cast<long>(x2 + (32L*x0))];
auto tmp4 = out_ptr1[static_cast<long>(x2 + (32L*x0))];
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x3 + (30L*x2)), 14);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x3 + (30L*x2)), 14);
auto tmp2 = at::vec::Vectorized<float>(tmp1);
auto tmp3 = tmp0 - tmp2;
auto tmp5 = static_cast<float>(276480.0);
auto tmp6 = tmp4 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = decltype(tmp6)(tmp6 + tmp7);
auto tmp9 = 1 / std::sqrt(tmp8);
auto tmp10 = at::vec::Vectorized<float>(tmp9);
auto tmp11 = tmp3 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 14);
}
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg2_1, = args
args.clear()
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_relu_0(arg2_1, _frozen_param3, _frozen_param2, buf0, buf1, buf3)
del arg2_1
return (buf3, )
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132389
Approved by: https://github.com/leslie-fang-intel, https://github.com/jansel
Co-authored-by: Jiong Gong <jiong.gong@intel.com>
|
||
|
|
f927bcb934 |
Revert "[Inductor] Apply loop split optimization in codegen_node (#132389)"
This reverts commit |
||
|
|
3cb5d25122 |
[Inductor] Apply loop split optimization in codegen_node (#132389)
This PR applies loop split optimization in codegen_node to avoid non-contiguous load. When the vector is loaded in a non-contiguous manner due to a division in the index, we eliminate the division by splitting the loop to avoid non-contiguous load.
Example:
```
import torch
import torch.nn as nn
class GNReLU(torch.nn.Module):
def __init__(self, num_groups, num_channels):
super(GNReLU, self).__init__()
self.gn = nn.GroupNorm(num_groups, num_channels)
def forward(self, x):
return torch.nn.functional.relu(self.gn(x))
input = torch.randn(2, 960, 96, 96).to(memory_format=torch.channels_last)
m = GNReLU(32, 960).eval()
compiled_m = torch.compile(m)
with torch.no_grad():
compiled_m(input)
```
Generated code:
- Before:
```
cpp_fused_native_group_norm_relu_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/vu/cvuckxaygqfovv2zu2byqhcmiejbke7mdhf2rpgpr5mlscdev2hg.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(56)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
Welford<at::vec::Vectorized<float>> masked_tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> wrecps0(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &wrecps0);
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 14);
masked_tmp_acc0_vec = welford_combine(masked_tmp_acc0_vec, tmp0, 14, &wrecps0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(masked_tmp_acc0_vec));
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
for(long x2=static_cast<long>(0L); x2<static_cast<long>(960L); x2+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr0[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp3 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr1[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x2), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x2), 16);
auto tmp2 = tmp0 - tmp1;
auto tmp4 = static_cast<float>(276480.0);
auto tmp5 = at::vec::Vectorized<float>(tmp4);
auto tmp6 = tmp3 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = at::vec::Vectorized<float>(tmp7);
auto tmp9 = tmp6 + tmp8;
auto tmp10 = tmp9.rsqrt();
auto tmp11 = tmp2 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)));
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg2_1, = args
args.clear()
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_relu_0(arg2_1, _frozen_param3, _frozen_param2, buf0, buf1, buf3)
del arg2_1
return (buf3, )
```
- After:
```
cpp_fused_native_group_norm_relu_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/vu/cvuckxaygqfovv2zu2byqhcmiejbke7mdhf2rpgpr5mlscdev2hg.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(56)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
Welford<at::vec::Vectorized<float>> masked_tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> wrecps0(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &wrecps0);
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 14);
masked_tmp_acc0_vec = welford_combine(masked_tmp_acc0_vec, tmp0, 14, &wrecps0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(masked_tmp_acc0_vec));
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
#pragma GCC ivdep
for(long x2=static_cast<long>(0L); x2<static_cast<long>(32L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 = out_ptr0[static_cast<long>(x2 + (32L*x0))];
auto tmp4 = out_ptr1[static_cast<long>(x2 + (32L*x0))];
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x3 + (30L*x2)), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x3 + (30L*x2)), 16);
auto tmp2 = at::vec::Vectorized<float>(tmp1);
auto tmp3 = tmp0 - tmp2;
auto tmp5 = static_cast<float>(276480.0);
auto tmp6 = tmp4 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = decltype(tmp6)(tmp6 + tmp7);
auto tmp9 = 1 / std::sqrt(tmp8);
auto tmp10 = at::vec::Vectorized<float>(tmp9);
auto tmp11 = tmp3 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)));
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 14);
auto tmp1 = out_ptr0[static_cast<long>(x2 + (32L*x0))];
auto tmp4 = out_ptr1[static_cast<long>(x2 + (32L*x0))];
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x3 + (30L*x2)), 14);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x3 + (30L*x2)), 14);
auto tmp2 = at::vec::Vectorized<float>(tmp1);
auto tmp3 = tmp0 - tmp2;
auto tmp5 = static_cast<float>(276480.0);
auto tmp6 = tmp4 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = decltype(tmp6)(tmp6 + tmp7);
auto tmp9 = 1 / std::sqrt(tmp8);
auto tmp10 = at::vec::Vectorized<float>(tmp9);
auto tmp11 = tmp3 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
auto tmp16 = at::vec::clamp_min(tmp15, decltype(tmp15)(0));
tmp16.store(out_ptr2 + static_cast<long>(x3 + (30L*x2) + (960L*x1) + (8847360L*x0)), 14);
}
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg2_1, = args
args.clear()
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_relu_0(arg2_1, _frozen_param3, _frozen_param2, buf0, buf1, buf3)
del arg2_1
return (buf3, )
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132389
Approved by: https://github.com/leslie-fang-intel, https://github.com/jansel
Co-authored-by: Jiong Gong <jiong.gong@intel.com>
|
||
|
|
f4641ca481 |
[Inductor] Remove VecChecker and fallback non-supported Vec op to Scalar impl with a for loop (#134569)
Fall back non-vectorized op by scalar impl + for loop.
Example code:
```
cpp_fused_igammac_0 = async_compile.cpp_pybinding(['const double*', 'const double*', 'double*'], '''
#include "/tmp/torchinductor_root/z4/cz4j2mmotlx3z2b7u4fbjtdt4x6plhd67ljwzg5bk7ekv4xz6y7q.h"
extern "C" void kernel(const double* in_ptr0,
const double* in_ptr1,
double* out_ptr0)
{
{
for(int64_t x0=static_cast<int64_t>(0L); x0<static_cast<int64_t>(48L); x0+=static_cast<int64_t>(8L))
{
auto tmp0 = at::vec::VectorizedN<double,2>::loadu(in_ptr0 + static_cast<int64_t>(x0), 8);
auto tmp1 = in_ptr1[static_cast<int64_t>(0L)];
auto tmp2 = at::vec::VectorizedN<double,2>(tmp1);
auto tmp3 =
[&]()
{
__at_align__ std::array<double, 8> tmpbuf0;
tmp0.store(tmpbuf0.data(), 8);
__at_align__ std::array<double, 8> tmpbuf1;
tmp2.store(tmpbuf1.data(), 8);
__at_align__ std::array<double, 8> tmpbuf_out;
for (int i = 0; i < 8; i++)
{
tmpbuf_out[i] = calc_igammac(tmpbuf0[i], tmpbuf1[i]);
}
return at::vec::VectorizedN<double, 2>::loadu(tmpbuf_out.data(), 8);
}
()
;
tmp3.store(out_ptr0 + static_cast<int64_t>(x0), 8);
}
#pragma omp simd simdlen(4)
for(int64_t x0=static_cast<int64_t>(48L); x0<static_cast<int64_t>(50L); x0+=static_cast<int64_t>(1L))
{
auto tmp0 = in_ptr0[static_cast<int64_t>(x0)];
auto tmp1 = in_ptr1[static_cast<int64_t>(0L)];
auto tmp2 = calc_igammac(tmp0, tmp1);
out_ptr0[static_cast<int64_t>(x0)] = tmp2;
}
}
}
''')
```
`frexp` are difficult to be handled by common `fallback` since it returns two `cse_var`
|
||
|
|
3775fc982d |
[Inductor][CPP] Fix Index name error (#134645)
**Summary** Fix the comment: https://github.com/pytorch/pytorch/pull/122961#issuecomment-2313930242. For all of the cases we see in the 3 test suits (TorchBench, Timms, Huggingface) we expect: * `_node` is a FX Node with target in ["index_expr", "load", "store"] * `_node.args[1 if _node.target == "index_expr" else 2]` is another FX node with target `get_index` * `_node.args[1 if _node.target == "index_expr" else 2].args[0]` is a str for the name of this index expression It seems not true in some FB internal testcase from the failure log posted in above link. So, add the condition check to work around it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/134645 Approved by: https://github.com/jgong5, https://github.com/masnesral |
||
|
|
cccb121d4e |
[Inductor] add inductor config: masked_vec (#134566)
This PR adds inductor config: masked_vec to control enable/disable masked vectorization for the tail_loop, and enable by default. Pull Request resolved: https://github.com/pytorch/pytorch/pull/134566 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
1ff226d88c |
[inductor] support vec for atomic add (#131314)
Depends on https://github.com/pytorch/pytorch/pull/130827 to have correct `index_expr` dtype Support vec for atomic add by scalar implementation. TestPlan: ``` python test/inductor/test_cpu_repro.py -k test_scatter_using_atomic_add_vec ``` Generated code for `test_scatter_using_atomic_add_vec` ``` cpp_fused_scatter_0 = async_compile.cpp_pybinding(['const float*', 'const int64_t*', 'const float*', 'float*'], ''' #include "/tmp/torchinductor_root/nn/cnnpkaxivwaa5rzng6qsyc4ao42vschogi3yk33ukwv3emlvxeqq.h" extern "C" void kernel(const float* in_ptr0, const int64_t* in_ptr1, const float* in_ptr2, float* out_ptr0) { { for(long x0=static_cast<long>(0L); x0<static_cast<long>(16L); x0+=static_cast<long>(16L)) { auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x0), 16); tmp0.store(out_ptr0 + static_cast<long>(x0)); } #pragma omp simd simdlen(8) for(long x0=static_cast<long>(16L); x0<static_cast<long>(25L); x0+=static_cast<long>(1L)) { auto tmp0 = in_ptr0[static_cast<long>(x0)]; out_ptr0[static_cast<long>(x0)] = tmp0; } } { for(long x0=static_cast<long>(0L); x0<static_cast<long>(16L); x0+=static_cast<long>(16L)) { auto tmp0 = at::vec::VectorizedN<int64_t,2>::loadu(in_ptr1 + static_cast<long>(x0), 16); auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x0), 16); auto tmp1 = 25L; auto tmp2 = c10::convert<int64_t>(tmp1); auto tmp3 = at::vec::VectorizedN<int64_t,2>(tmp2); auto tmp4 = tmp0 + tmp3; auto tmp5 = static_cast<int64_t>(0); auto tmp6 = at::vec::VectorizedN<int64_t,2>(tmp5); auto tmp7 = at::vec::VecMask<int64_t,2>(tmp0 < tmp6); auto tmp8 = decltype(tmp4)::blendv(tmp0, tmp4, tmp7.template cast<int64_t,2>()); auto tmp9 = [&] { __at_align__ std::array<int64_t, 16> tmpbuf; tmp8.store(tmpbuf.data()); return tmpbuf; } () ; auto tmp10 = [&] { __at_align__ std::array<int64_t, 16> tmpbuf; #pragma GCC unroll 16 for (long x0_inner = 0; x0_inner < 16; x0_inner++) { tmpbuf[x0_inner] = static_cast<long>(tmp9[x0_inner]); } return at::vec::VectorizedN<int64_t,2>::loadu(tmpbuf.data(), 16); } () ; TORCH_CHECK((at::vec::VecMask<int64_t,2>((at::vec::VectorizedN<int64_t,2>(0) <= tmp10) & (tmp10 < at::vec::VectorizedN<int64_t,2>(25L)))).all_masked(), "index out of bounds: 0 <= tmp10 < 25L"); atomic_add_vec(out_ptr0, tmp8, tmp12); } #pragma omp simd simdlen(8) for(long x0=static_cast<long>(16L); x0<static_cast<long>(20L); x0+=static_cast<long>(1L)) { auto tmp0 = in_ptr1[static_cast<long>(x0)]; auto tmp9 = in_ptr2[static_cast<long>(x0)]; auto tmp1 = 25L; auto tmp2 = c10::convert<int64_t>(tmp1); auto tmp3 = decltype(tmp0)(tmp0 + tmp2); auto tmp4 = tmp0 < 0; auto tmp5 = tmp4 ? tmp3 : tmp0; auto tmp6 = tmp5; auto tmp7 = c10::convert<int64_t>(tmp6); TORCH_CHECK((0 <= tmp7) & (tmp7 < 25L), "index out of bounds: 0 <= tmp7 < 25L"); atomic_add(&out_ptr0[static_cast<long>(tmp5)], static_cast<float>(tmp9)); } } } ''') ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/131314 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel |
||
|
|
aa9f4cc733 |
[Inductor][CPP] Support vectorization of remainder (#129849)
**Summary** When check the vectorization status among 3 test suit, we found some operators disabled vectorization with message `Disabled vectorization: op: remainder`. In this PR, we add vectorization support of this op. **Test Plan** ``` python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_vec_remainder python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_int_div_vec ``` Differential Revision: [D61147014](https://our.internmc.facebook.com/intern/diff/D61147014) Pull Request resolved: https://github.com/pytorch/pytorch/pull/129849 Approved by: https://github.com/jgong5, https://github.com/lezcano |
||
|
|
80846caa8c |
[inductor] fix dynamic size array(vla) build error on msvc v4 (#134221)
MSVC don't support dynamic array. Ref: https://stackoverflow.com/questions/56555406/creating-dynamic-sized-array-using-msvc-c-compiler We tried to solutions: 1. use std::vector to instead of it in previous PR: https://github.com/pytorch/pytorch/pull/134140, but it changed variable's type and failed at UTs. 2. Use `std::unique_ptr` to instead of it in PR: https://github.com/pytorch/pytorch/pull/134156, @jansel reviewed and give comments: https://github.com/pytorch/pytorch/pull/134156#pullrequestreview-2253091693. It is make sense, allocation memory maybe make code run slower. 3. Use fixed size array to instead of it in PR: https://github.com/pytorch/pytorch/pull/134210, fixed size is hard to process the situlation, reserved size if small than CPU number. > a. Use min() function limited is local test failed: https://github.com/pytorch/pytorch/pull/134210#issuecomment-2304447729 > b. Dynamic select fixed size or dynamic array: https://github.com/pytorch/pytorch/pull/134210#issuecomment-2304128666 . It makes code too complex to maintains. Discussed with origin PR(https://github.com/pytorch/pytorch/pull/115620) author @zhuhaozhe, we think: 1. MSVC it the only one compiler, which not support VLA. 2. MSVC it worse performance than other compilers, use `std::unique_ptr` for MSVC and make it works. 3. For other compilers, keep using current `VLA` code. 4. For Windows users, they can use `clang-cl` or `icx` to get better performance than MSVC. 5. Discussed with @jansel , we need to move compiler check to python side, and make output code cleaner. Reproduce UT: ```cmd pytest test/inductor/test_cpu_repro.py -v -k test_reduction_with_dynamic_threads ``` Error msg: ```cmd C:/Users/Xuhan/AppData/Local/Temp/tmpncykej5v/a4/ca4534cazplidnf7vopaaxaifqkjiyhxm3h2gsylgztputbaeybx.cpp(13): error C2131: expression did not evaluate to a constant C:/Users/Xuhan/AppData/Local/Temp/tmpncykej5v/a4/ca4534cazplidnf7vopaaxaifqkjiyhxm3h2gsylgztputbaeybx.cpp(13): note: failure was caused by a read of a variable outside its lifetime C:/Users/Xuhan/AppData/Local/Temp/tmpncykej5v/a4/ca4534cazplidnf7vopaaxaifqkjiyhxm3h2gsylgztputbaeybx.cpp(13): note: see usage of 'max_threads' C:/Users/Xuhan/AppData/Local/Temp/tmpncykej5v/a4/ca4534cazplidnf7vopaaxaifqkjiyhxm3h2gsylgztputbaeybx.cpp(16): error C3863: array type 'float [max_threads]' is not assignable ``` Genarated code: ```c++ #include "C:/Users/Xuhan/AppData/Local/Temp/tmpt6mxcjzi/j2/cj22tgrdgh42wbunl7gdptg2lintcziox2kmr7rdbcc6n2njrhgx.h" extern "C" __declspec(dllexport) void kernel(const float* in_ptr0, const float* in_ptr1, float* out_ptr0, float* out_ptr1) { { { float tmp_acc0 = 0; at::vec::Vectorized<float> tmp_acc0_vec = at::vec::Vectorized<float>(0); int max_threads = omp_get_max_threads(); float tmp_acc0_arr[max_threads]; for (int tid = 0; tid < max_threads; tid++) { tmp_acc0_arr[tid] = 0; } at::vec::Vectorized<float> tmp_acc0_vec_arr[max_threads]; for (int tid = 0; tid < max_threads; tid++) { tmp_acc0_vec_arr[tid] = at::vec::Vectorized<float>(0); } #pragma omp parallel ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/134221 Approved by: https://github.com/zhuhaozhe, https://github.com/jansel |
||
|
|
49b9f2d8b0 |
[inductor] fix signbit build fail on Windows. (#134229)
Reproduce UT:
```cmd
pytest test/inductor/test_torchinductor.py -v -k test_randint_int64_mod_cpu
```
Error message:
```cmd
cl : Command line warning D9025 : overriding '/openmp' with '/openmp:experimental'
c6airoloxwj4prmlejdyo5ybp43xa2yo5rbnpk4ttw3oifu6noor.cpp
C:/Users/Xuhan/AppData/Local/Temp/tmpx1fj2bd4/6a/c6airoloxwj4prmlejdyo5ybp43xa2yo5rbnpk4ttw3oifu6noor.cpp(23): error C2668: 'signbit': ambiguous call to overloaded function
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(309): note: could be 'bool signbit(float) noexcept'
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(314): note: or 'bool signbit(double) noexcept'
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(319): note: or 'bool signbit(long double) noexcept'
C:/Users/Xuhan/AppData/Local/Temp/tmpx1fj2bd4/6a/c6airoloxwj4prmlejdyo5ybp43xa2yo5rbnpk4ttw3oifu6noor.cpp(23): note: while trying to match the argument list '(__int64)'
C:/Users/Xuhan/AppData/Local/Temp/tmpx1fj2bd4/6a/c6airoloxwj4prmlejdyo5ybp43xa2yo5rbnpk4ttw3oifu6noor.cpp(24): error C2668: 'signbit': ambiguous call to overloaded function
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(309): note: could be 'bool signbit(float) noexcept'
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(314): note: or 'bool signbit(double) noexcept'
C:\Program Files (x86)\Windows Kits\10\include\10.0.22000.0\ucrt\corecrt_math.h(319): note: or 'bool signbit(long double) noexcept'
C:/Users/Xuhan/AppData/Local/Temp/tmpx1fj2bd4/6a/c6airoloxwj4prmlejdyo5ybp43xa2yo5rbnpk4ttw3oifu6noor.cpp(24): note: while trying to match the argument list '(int64_t)'
```
Genarated code:
```c++
#include "C:/Users/Xuhan/AppData/Local/Temp/tmpcjnxnvkl/4f/c4ff4q4pxgo3yprbo2nkfopkt3qgi6rmptfpgpl2iylgtunvizwn.h"
extern "C" __declspec(dllexport) void kernel(const int64_t* in_ptr0,
int64_t* out_ptr0)
{
#pragma omp parallel num_threads(8)
{
int tid = omp_get_thread_num();
{
#pragma omp for
for(int64_t x0=static_cast<int64_t>(0LL); x0<static_cast<int64_t>(20LL); x0+=static_cast<int64_t>(1LL))
{
auto tmp0 = in_ptr0[static_cast<int64_t>(0LL)];
auto tmp1 = x0;
auto tmp2 = c10::convert<int32_t>(tmp1);
auto tmp3 = static_cast<int64_t>(-5);
auto tmp4 = static_cast<int64_t>(5);
auto tmp5 = randint64_cpu(tmp0, tmp2, tmp3, tmp4);
auto tmp6 = static_cast<int64_t>(10);
auto tmp7 = mod(tmp5, tmp6);
auto tmp8 = static_cast<int32_t>(0);
auto tmp9 = tmp7 != tmp8;
auto tmp10 = std::signbit(tmp7);
auto tmp11 = std::signbit(tmp6);
auto tmp12 = tmp10 != tmp11;
auto tmp13 = tmp9 & tmp12;
auto tmp14 = decltype(tmp7)(tmp7 + tmp6);
auto tmp15 = tmp13 ? tmp14 : tmp7;
out_ptr0[static_cast<int64_t>(x0)] = tmp15;
}
}
}
}
```
Fixed by cast `std::signbit` to `long double`: https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/signbit?view=msvc-170
Local test passed:
<img width="848" alt="image" src="https://github.com/user-attachments/assets/e4467256-a068-40ef-a6ff-19b442e9116d">
Pull Request resolved: https://github.com/pytorch/pytorch/pull/134229
Approved by: https://github.com/jansel
|
||
|
|
2bffbe06bd |
[Inductor][CPP] Support vectorization of load_seed and randn (#130317)
**Summary** Enable the vectorization of `load_seed` and `randn`. For now, `randn` is using the reference implementation. **Test Plan** ``` python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_vec_randn ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/130317 Approved by: https://github.com/jgong5 ghstack dependencies: #122961 |
||
|
|
313bc11963 |
[inductor][cpp] complete vectorization for int32/int64 (#122961)
**Summary** Implement the complete vectorization of `index_expr` functionally. We also add heuristic from performance perspective to resolve the regressions posted below: https://github.com/pytorch/pytorch/pull/122961#issuecomment-2041336265 by disabling vectorization of specific (Fused) scheduler Node: - Heuristic 1: when the num of non-contiguous `index_expr/load/store` exceeds the threshold, we disable the vectorization. - Heuristic 2: when the total number of elements along the vec dim is less than `tiling_factor/2`, we disable the vectorization. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122961 Approved by: https://github.com/jansel Co-authored-by: leslie-fang-intel <leslie.fang@intel.com> |
||
|
|
76b0284744 |
Revert "[inductor][cpp] complete vectorization for int32/int64 (#122961)"
This reverts commit |
||
|
|
318d3b39c4 |
Revert "[Inductor][CPP] Support vectorization of load_seed and randn (#130317)"
This reverts commit |
||
|
|
42e61c783c |
[Inductor][CPP] Align Half load with BFloat16 load (#132011)
Remove `static_cast<float>` for Half load to align with BFloat16.
Before:
```
extern "C" void kernel(const half* in_ptr0,
half* out_ptr0)
{
{
#pragma GCC ivdep
for(long x0=static_cast<long>(0L); x0<static_cast<long>(20L); x0+=static_cast<long>(1L))
{
auto tmp0 = static_cast<float>(in_ptr0[static_cast<long>(x0)]);
out_ptr0[static_cast<long>(x0)] = tmp0;
}
}
}
```
After:
```
extern "C" void kernel(const half* in_ptr0,
half* out_ptr0)
{
{
#pragma GCC ivdep
for(long x0=static_cast<long>(0L); x0<static_cast<long>(20L); x0+=static_cast<long>(1L))
{
auto tmp0 = in_ptr0[static_cast<long>(x0)];
out_ptr0[static_cast<long>(x0)] = tmp0;
}
}
}
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132011
Approved by: https://github.com/jgong5, https://github.com/peterbell10
|
||
|
|
a0ef8888e6 |
[Inductor][CPP] Support vectorization of load_seed and randn (#130317)
**Summary** Enable the vectorization of `load_seed` and `randn`. For now, `randn` is using the reference implementation. **Test Plan** ``` python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_vec_randn ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/130317 Approved by: https://github.com/jgong5 ghstack dependencies: #122961 |
||
|
|
99b3b58f39 |
[inductor][cpp] complete vectorization for int32/int64 (#122961)
**Summary** Implement the complete vectorization of `index_expr` functionally. We also add heuristic from performance perspective to resolve the regressions posted below: https://github.com/pytorch/pytorch/pull/122961#issuecomment-2041336265 by disabling vectorization of specific (Fused) scheduler Node: - Heuristic 1: when the num of non-contiguous `index_expr/load/store` exceeds the threshold, we disable the vectorization. - Heuristic 2: when the total number of elements along the vec dim is less than `tiling_factor/2`, we disable the vectorization. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122961 Approved by: https://github.com/jansel Co-authored-by: leslie-fang-intel <leslie.fang@intel.com> |
||
|
|
648fc6c9c1 |
[Inductor][CPP] Refactor the tiling select into a standalone module to enhance its extensibility (#130892)
**Summary** After enabling more vectorization, we found that vectorization does not always bring performance benefits. For example, a kernel with several non-contiguous index computations or non-contiguous buffer load/store operations can experience performance regression. A typical case is what we observed in the next PR: after fully enabling vectorization of `index_expr`, we saw a performance regression of `hf_BigBird`. In this PR, we refactor the tiling select into a standalone module to enhance its extensibility for further advanced tiling select heuristic. A standalone class `TilingSelect` with its method `select_tiling` has been added. `select_tiling` accepts the inputs of `fn_list`, `var_sizes_list` and return `tiling_factors`, `tiling_indices`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/130892 Approved by: https://github.com/jgong5 |
||
|
|
19ff9059eb |
Revert "[Inductor][CPP] Support vectorization of remainder (#129849)"
This reverts commit
|
||
|
|
762b1b4c17 |
[inductor] [cpp] fix accuracy when template_buffer has users other than the epilogue nodes (#133073)
This PR fixes the accuracy issues when template_buffer has users other than the epilogue nodes. This will fix the accuracy failure of the below models using max-autotune: - MobileBertForMaskedLM - MobileBertForQuestionAnswering - convnext_base - swin_base_patch4_window7_224 ## Issue 1: Previously we always add `template_buffer` as an alias of `Y`. In case the `template_buffer` has users other than the epilogue nodes, we shouldn't set it as an alias of `Y`. This PR adds the check in such case. Wrong code before the fix where `tmp4` and `tmp9` are both stored to `Y` while we need 2 different buffers for them since `tmp4` will be used by nodes other than the epilogue node: ```cpp Y[static_cast<long>(n_start + x1 + (32L*m_start) + (32L*x0))] = tmp4; // tmp4 is the output of the template Y[static_cast<long>(n_start + x1 + (32L*m_start) + (32L*x0))] = tmp9; // tmp9 is the output of the epilogue node ``` Correct code after the fix: ```cpp out_ptr2[static_cast<long>(n_start + x1 + (32L*m_start) + (32L*x0))] = tmp4; Y[static_cast<long>(n_start + x1 + (32L*m_start) + (32L*x0))] = tmp9; ``` ## Issue 2: When fixing the above issue, we found that there's correctness issue when `bias` is `False`. The root cause is that in the case where `bias` is `False`, the `template_buffer` has users other than the epilogue nodes and the GEMM output buffer is localized, we need to add an extra copy epilogue to ensure that the GEMM output (a local buffer) is stored to the `template_buffer` that will be used later by other nodes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/133073 Approved by: https://github.com/jgong5 ghstack dependencies: #133070 |
||
|
|
8624a571b4 |
[Inductor][CPP] Support vectorization of remainder (#129849)
**Summary** When check the vectorization status among 3 test suit, we found some operators disabled vectorization with message `Disabled vectorization: op: remainder`. In this PR, we add vectorization support of this op. **Test Plan** ``` python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_vec_remainder python -u -m pytest -s -v test/inductor/test_cpu_repro.py -k test_int_div_vec ``` Differential Revision: [D61147014](https://our.internmc.facebook.com/intern/diff/D61147014) Pull Request resolved: https://github.com/pytorch/pytorch/pull/129849 Approved by: https://github.com/jgong5, https://github.com/lezcano |
||
|
|
7be77658e9 |
[Inductor] support masked vectorization for the tail_loop for INT8 datatype (#131155)
This PR supports masked vectorization for the tail_loop for torch.uint8 and torch.int8 datatype to improve performance. BTW, I fixed the UT of `byte` by setting the range of the sample inputs to [0, 255] since the range of `torch.uint8` is [0, 255]. Pull Request resolved: https://github.com/pytorch/pytorch/pull/131155 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel ghstack dependencies: #130724 |
||
|
|
370b072d8d |
[Inductor] support masked vectorization for the tail_loop of the 2d tiles kernel (#130724)
This PR supports masked vectorization for the tail_loop of the 2d tiles kernel to improve the performance.
Example:
```
import torch
def fn(a):
return torch.permute(a, (2, 0, 1)).contiguous()
input = torch.randn(2, 20, 40)
compiled_fn = torch.compile(fn)
with torch.no_grad():
for _ in range(3):
compiled_fn(input)
```
Generated code:
- Before:
```
cpp_fused_clone_0 = async_compile.cpp_pybinding(['const float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/z2/cz2ry4ghylembzwx7hkbanur76fi3mkiu7s6jm3zdi2amy5egq4b.h"
extern "C" void kernel(const float* in_ptr0,
float* out_ptr0)
{
{
#pragma GCC ivdep
for(long x0=static_cast<long>(0L); x0<static_cast<long>(32L); x0+=static_cast<long>(16L))
{
#pragma GCC ivdep
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(16L))
{
float tmp0[16*16] __attribute__ ((aligned (16)));
at::vec::transpose_mxn<float,16,16>(in_ptr0 + static_cast<long>(x0 + (40L*x1)), static_cast<long>(40L), tmp0, 16);
for (long x0_inner = 0; x0_inner < 16; x0_inner++)
{
auto tmp1 = at::vec::Vectorized<float>::loadu(tmp0 + static_cast<long>(16L*x0_inner), 16);
tmp1.store(out_ptr0 + static_cast<long>(x1 + (40L*x0) + (40L*x0_inner)));
}
}
#pragma GCC ivdep
for(long x1=static_cast<long>(32L); x1<static_cast<long>(40L); x1+=static_cast<long>(1L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x0 + (40L*x1)), 16);
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
tmp0.store(tmpbuf.data(), 16);
#pragma GCC unroll 16
for (long x0_inner = 0; x0_inner < 16; x0_inner++)
{
out_ptr0[static_cast<long>(x1 + (40L*x0) + (40L*x0_inner))] = tmpbuf[x0_inner];
}
}
()
;
}
}
#pragma GCC ivdep
for(long x0=static_cast<long>(32L); x0<static_cast<long>(40L); x0+=static_cast<long>(1L))
{
#pragma GCC ivdep
for(long x1=static_cast<long>(0L); x1<static_cast<long>(40L); x1+=static_cast<long>(1L))
{
auto tmp0 = in_ptr0[static_cast<long>(x0 + (40L*x1))];
out_ptr0[static_cast<long>(x1 + (40L*x0))] = tmp0;
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (2, 20, 40), (800, 40, 1))
buf0 = empty_strided_cpu((40, 2, 20), (40, 20, 1), torch.float32)
cpp_fused_clone_0(arg0_1, buf0)
del arg0_1
return (buf0, )
```
- After:
```
cpp_fused_clone_0 = async_compile.cpp_pybinding(['const float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/z2/cz2ry4ghylembzwx7hkbanur76fi3mkiu7s6jm3zdi2amy5egq4b.h"
extern "C" void kernel(const float* in_ptr0,
float* out_ptr0)
{
{
#pragma GCC ivdep
for(long x0=static_cast<long>(0L); x0<static_cast<long>(32L); x0+=static_cast<long>(16L))
{
#pragma GCC ivdep
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(16L))
{
float tmp0[16*16] __attribute__ ((aligned (16)));
at::vec::transpose_mxn<float,16,16>(in_ptr0 + static_cast<long>(x0 + (40L*x1)), static_cast<long>(40L), tmp0, 16);
for (long x0_inner = 0; x0_inner < 16; x0_inner++)
{
auto tmp1 = at::vec::Vectorized<float>::loadu(tmp0 + static_cast<long>(16L*x0_inner), 16);
tmp1.store(out_ptr0 + static_cast<long>(x1 + (40L*x0) + (40L*x0_inner)));
}
}
#pragma GCC ivdep
for(long x1=static_cast<long>(32L); x1<static_cast<long>(40L); x1+=static_cast<long>(8L))
{
float tmp0[16*8] __attribute__ ((aligned (16)));
at::vec::transpose_mxn<float,8,16>(in_ptr0 + static_cast<long>(x0 + (40L*x1)), static_cast<long>(40L), tmp0, 8);
for (long x0_inner = 0; x0_inner < 16; x0_inner++)
{
auto tmp1 = at::vec::Vectorized<float>::loadu(tmp0 + static_cast<long>(8L*x0_inner), 8);
tmp1.store(out_ptr0 + static_cast<long>(x1 + (40L*x0) + (40L*x0_inner)), 8);
}
}
}
#pragma GCC ivdep
for(long x0=static_cast<long>(32L); x0<static_cast<long>(40L); x0+=static_cast<long>(8L))
{
#pragma GCC ivdep
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(16L))
{
float tmp0[8*16] __attribute__ ((aligned (16)));
at::vec::transpose_mxn<float,16,8>(in_ptr0 + static_cast<long>(x0 + (40L*x1)), static_cast<long>(40L), tmp0, 16);
for (long x0_inner = 0; x0_inner < 8; x0_inner++)
{
auto tmp1 = at::vec::Vectorized<float>::loadu(tmp0 + static_cast<long>(16L*x0_inner), 16);
tmp1.store(out_ptr0 + static_cast<long>(x1 + (40L*x0) + (40L*x0_inner)));
}
}
#pragma GCC ivdep
for(long x1=static_cast<long>(32L); x1<static_cast<long>(40L); x1+=static_cast<long>(8L))
{
float tmp0[8*8] __attribute__ ((aligned (16)));
at::vec::transpose_mxn<float,8,8>(in_ptr0 + static_cast<long>(x0 + (40L*x1)), static_cast<long>(40L), tmp0, 8);
for (long x0_inner = 0; x0_inner < 8; x0_inner++)
{
auto tmp1 = at::vec::Vectorized<float>::loadu(tmp0 + static_cast<long>(8L*x0_inner), 8);
tmp1.store(out_ptr0 + static_cast<long>(x1 + (40L*x0) + (40L*x0_inner)), 8);
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (2, 20, 40), (800, 40, 1))
buf0 = empty_strided_cpu((40, 2, 20), (40, 20, 1), torch.float32)
cpp_fused_clone_0(arg0_1, buf0)
del arg0_1
return (buf0, )
```
Co-authored-by: CaoE <e.cao@intel.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/130724
Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel
|
||
|
|
78ccbad678 |
[inductor] remove dtype check/assert for reduction vec and support bool for min/max (#132473)
This PR is to remove the dtype check/assert for vectorized reduction. And support bool for min/max reduction.
After removing dtype check and assertion, failed on UT.
```
PYTORCH_OPINFO_SAMPLE_INPUT_INDEX=0 python test/inductor/test_torchinductor_opinfo.py -k TestInductorOpInfoCPU.test_comprehensive_max_reduction_no_dim_cpu_bool
```
Now it is supported, generated code:
```
cpp_fused_max_0 = async_compile.cpp_pybinding(['const bool*', 'bool*'], '''
#include "/tmp/torchinductor_root/xf/cxf75ftbahznonqovnsugw7v6sldrabizgtx3j4rhgdmu3r36wlu.h"
extern "C" void kernel(const bool* in_ptr0,
bool* out_ptr0)
{
{
{
bool tmp_acc0 = std::numeric_limits<bool>::min();
at::vec::VecMask<float,1> tmp_acc0_vec = at::vec::VecMask<float,1>::from(std::numeric_limits<bool>::min());
for(long x0=static_cast<long>(0L); x0<static_cast<long>(112L); x0+=static_cast<long>(16L))
{
auto tmp0 = at::vec::VecMask<float,1>::from(in_ptr0 + static_cast<long>(x0));
tmp_acc0_vec = tmp_acc0_vec | tmp0;
}
#pragma omp simd simdlen(8)
for(long x0=static_cast<long>(112L); x0<static_cast<long>(125L); x0+=static_cast<long>(1L))
{
auto tmp0 = in_ptr0[static_cast<long>(x0)];
tmp_acc0 = max_propagate_nan(tmp_acc0, tmp0);
}
tmp_acc0 = max_propagate_nan(tmp_acc0, tmp_acc0_vec.all_zero());
out_ptr0[static_cast<long>(0L)] = static_cast<bool>(tmp_acc0);
}
}
}
''')
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132473
Approved by: https://github.com/jgong5
|
||
|
|
636a7c4859 |
[13/N] Use std::optional (#132527)
Follows #132361 Pull Request resolved: https://github.com/pytorch/pytorch/pull/132527 Approved by: https://github.com/ezyang |
||
|
|
4faa0e3efb |
[Inductor] support masked vectorization for the tail_loop (#126526)
Currently the tail_loop always uses the scalar kernel. This PR supports masked vectorization for the tail_loop to improve the performance.
Example:
```
import torch
import torch.nn as nn
class GN(nn.Module):
def __init__(self, num_groups, num_channels):
super(GN, self).__init__()
self.gn = nn.GroupNorm(num_groups, num_channels)
def forward(self, x):
return self.gn(x)
input = torch.randn(2, 960, 96, 96).to(memory_format=torch.channels_last)
m = GN(32, 960).eval()
compiled_m = torch.compile(m)
with torch.no_grad():
for _ in range(3):
compiled_m(input)
```
Generated code:
- Before:
```
cpp_fused_native_group_norm_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/ky/cky2bufythacofebk7ujv36e4pxyqcqbpsy5r4vojoprjiwcwfxf.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(112)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> weight_recps(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &weight_recps);
}
#pragma omp simd simdlen(8)
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(1L))
{
auto tmp0 = in_ptr0[static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0))];
tmp_acc0 = welford_combine(tmp_acc0, tmp0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
for(long x2=static_cast<long>(0L); x2<static_cast<long>(960L); x2+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr0[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp3 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr1[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x2), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x2), 16);
auto tmp2 = tmp0 - tmp1;
auto tmp4 = static_cast<float>(276480.0);
auto tmp5 = at::vec::Vectorized<float>(tmp4);
auto tmp6 = tmp3 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = at::vec::Vectorized<float>(tmp7);
auto tmp9 = tmp6 + tmp8;
auto tmp10 = tmp9.rsqrt();
auto tmp11 = tmp2 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
tmp15.store(out_ptr2 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)));
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (960, ), (1, ))
assert_size_stride(arg1_1, (960, ), (1, ))
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_0(arg2_1, arg0_1, arg1_1, buf0, buf1, buf3)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
```
- After:
```
cpp_fused_native_group_norm_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'const float*', 'float*', 'float*', 'float*'], '''
#include "/tmp/torchinductor_jiayisun/em/cemtujj65j5txpqlxc7w4pcunpmvz3qtiudkc5ocxxhcmdlknw2m.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
const float* in_ptr2,
float* out_ptr0,
float* out_ptr1,
float* out_ptr2)
{
#pragma omp parallel num_threads(112)
{
int tid = omp_get_thread_num();
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(32L); x1+=static_cast<long>(1L))
{
{
Welford<float> tmp_acc0 = Welford<float>();
Welford<at::vec::Vectorized<float>> tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
Welford<at::vec::Vectorized<float>> masked_tmp_acc0_vec = Welford<at::vec::Vectorized<float>>();
static WeightRecp<at::vec::Vectorized<float>> wrecps0(static_cast<long>(17280L));
for(long x2=static_cast<long>(0L); x2<static_cast<long>(9216L); x2+=static_cast<long>(1L))
{
for(long x3=static_cast<long>(0L); x3<static_cast<long>(16L); x3+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 16);
tmp_acc0_vec = welford_combine(tmp_acc0_vec, tmp0, &wrecps0);
}
for(long x3=static_cast<long>(16L); x3<static_cast<long>(30L); x3+=static_cast<long>(14L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x3 + (30L*x1) + (960L*x2) + (8847360L*x0)), 14);
masked_tmp_acc0_vec = welford_combine(masked_tmp_acc0_vec, tmp0, 14, &wrecps0);
}
}
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(masked_tmp_acc0_vec));
tmp_acc0 = welford_combine(tmp_acc0, welford_vec_reduce_all(tmp_acc0_vec));
out_ptr0[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.mean);
out_ptr1[static_cast<long>(x1 + (32L*x0))] = static_cast<float>(tmp_acc0.m2);
}
}
}
}
{
#pragma omp for collapse(2)
for(long x0=static_cast<long>(0L); x0<static_cast<long>(2L); x0+=static_cast<long>(1L))
{
for(long x1=static_cast<long>(0L); x1<static_cast<long>(9216L); x1+=static_cast<long>(1L))
{
for(long x2=static_cast<long>(0L); x2<static_cast<long>(960L); x2+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)), 16);
auto tmp1 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr0[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp3 =
[&]
{
__at_align__ std::array<float, 16> tmpbuf;
#pragma GCC unroll 16
for (long x2_inner = 0; x2_inner < 16; x2_inner++)
{
tmpbuf[x2_inner] = out_ptr1[static_cast<long>((32L*x0) + (c10::div_floor_integer((x2 + x2_inner), 30L)))];
}
return at::vec::Vectorized<float>::loadu(tmpbuf.data(), 16);
}
()
;
auto tmp12 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x2), 16);
auto tmp14 = at::vec::Vectorized<float>::loadu(in_ptr2 + static_cast<long>(x2), 16);
auto tmp2 = tmp0 - tmp1;
auto tmp4 = static_cast<float>(276480.0);
auto tmp5 = at::vec::Vectorized<float>(tmp4);
auto tmp6 = tmp3 / tmp5;
auto tmp7 = static_cast<float>(1e-05);
auto tmp8 = at::vec::Vectorized<float>(tmp7);
auto tmp9 = tmp6 + tmp8;
auto tmp10 = tmp9.rsqrt();
auto tmp11 = tmp2 * tmp10;
auto tmp13 = tmp11 * tmp12;
auto tmp15 = tmp13 + tmp14;
tmp15.store(out_ptr2 + static_cast<long>(x2 + (960L*x1) + (8847360L*x0)));
}
}
}
}
}
}
''')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (960, ), (1, ))
assert_size_stride(arg1_1, (960, ), (1, ))
assert_size_stride(arg2_1, (2, 960, 96, 96), (8847360, 1, 92160, 960))
buf0 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf1 = empty_strided_cpu((2, 32, 1, 1), (32, 1, 64, 64), torch.float32)
buf3 = empty_strided_cpu((2, 960, 96, 96), (8847360, 1, 92160, 960), torch.float32)
cpp_fused_native_group_norm_0(arg2_1, arg0_1, arg1_1, buf0, buf1, buf3)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
```
Co-authored-by: CaoE <e.cao@intel.com>
Pull Request resolved: https://github.com/pytorch/pytorch/pull/126526
Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel
|
||
|
|
1e65ccc3de |
[inductor] export kernel for gemm template. (#132580)
Changes: 1. Move `get_export_declaration` to global scope. 2. Export kernel for gemm template. Pull Request resolved: https://github.com/pytorch/pytorch/pull/132580 Approved by: https://github.com/ezyang |
||
|
|
7100c36c8a |
Revert "[inductor] export kernel for gemm template. (#132580)"
This reverts commit
|
||
|
|
96471ea47c |
[inductor] support vectorization for torch.any(bool) -> bool (#132472)
Support reduction `any` by from `bool` to `bool`.
TestPlan:
```
python test/inductor/test_cpu_repro.py -k test_any_bool_vec
```
Generated code for `test_any_bool_vec`
```
cpp_fused_any_0 = async_compile.cpp_pybinding(['const float*', 'const float*', 'bool*', 'bool*'], '''
#include "/tmp/torchinductor_root/ky/cky2bufythacofebk7ujv36e4pxyqcqbpsy5r4vojoprjiwcwfxf.h"
extern "C" void kernel(const float* in_ptr0,
const float* in_ptr1,
bool* out_ptr0,
bool* out_ptr1)
{
{
{
bool tmp_acc0 = 0;
at::vec::VecMask<float,1> tmp_acc0_vec = at::vec::VecMask<float,1>::from(0);
bool tmp_acc0_arr[64];
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_arr[tid] = 0;
}
at::vec::VecMask<float,1> tmp_acc0_vec_arr[64];
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_vec_arr[tid] = at::vec::VecMask<float,1>::from(0);
}
#pragma omp parallel num_threads(64)
{
int tid = omp_get_thread_num();
bool tmp_acc0_local = 0;
at::vec::VecMask<float,1> tmp_acc0_vec_local = at::vec::VecMask<float,1>::from(0);
#pragma omp for
for(long x0=static_cast<long>(0L); x0<static_cast<long>(64L); x0+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(x0), 16);
auto tmp1 = at::vec::VecMask<float,1>::from<float,1>(tmp0);
tmp_acc0_vec_local = tmp_acc0_vec_local | tmp1;
}
tmp_acc0_arr[tid] = tmp_acc0_local;
tmp_acc0_vec_arr[tid] = tmp_acc0_vec_local;
}
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0 = tmp_acc0 || tmp_acc0_arr[tid];
}
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_vec = tmp_acc0_vec | tmp_acc0_vec_arr[tid];
}
tmp_acc0 = tmp_acc0 || at::vec::vec_reduce_all<bool>([](at::vec::Vectorized<bool>& x, at::vec::Vectorized<bool>& y) { return x | y; }, tmp_acc0_vec.to<bool, 1>());
out_ptr0[static_cast<long>(0L)] = static_cast<bool>(tmp_acc0);
}
}
{
{
bool tmp_acc0 = 0;
at::vec::VecMask<float,1> tmp_acc0_vec = at::vec::VecMask<float,1>::from(0);
bool tmp_acc0_arr[64];
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_arr[tid] = 0;
}
at::vec::VecMask<float,1> tmp_acc0_vec_arr[64];
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_vec_arr[tid] = at::vec::VecMask<float,1>::from(0);
}
#pragma omp parallel num_threads(64)
{
int tid = omp_get_thread_num();
bool tmp_acc0_local = 0;
at::vec::VecMask<float,1> tmp_acc0_vec_local = at::vec::VecMask<float,1>::from(0);
#pragma omp for
for(long x0=static_cast<long>(0L); x0<static_cast<long>(64L); x0+=static_cast<long>(16L))
{
auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr1 + static_cast<long>(x0), 16);
auto tmp1 = at::vec::VecMask<float,1>::from<float,1>(tmp0);
tmp_acc0_vec_local = tmp_acc0_vec_local | tmp1;
}
tmp_acc0_arr[tid] = tmp_acc0_local;
tmp_acc0_vec_arr[tid] = tmp_acc0_vec_local;
}
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0 = tmp_acc0 || tmp_acc0_arr[tid];
}
for (int tid = 0; tid < 64; tid++)
{
tmp_acc0_vec = tmp_acc0_vec | tmp_acc0_vec_arr[tid];
}
tmp_acc0 = tmp_acc0 || at::vec::vec_reduce_all<bool>([](at::vec::Vectorized<bool>& x, at::vec::Vectorized<bool>& y) { return x | y; }, tmp_acc0_vec.to<bool, 1>());
out_ptr1[static_cast<long>(0L)] = static_cast<bool>(tmp_acc0);
}
}
}
''')
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132472
Approved by: https://github.com/jgong5
|
||
|
|
ae44b8f410 |
[inductor] support vectorization for torch.argmax/min(float/int64_t)-> int64_t (#131016)
Support reduction argmin/max by scalar implementation. TestPlan: ``` python test/inductor/test_cpu_repro.py -k test_argmax_argmin_with_nan_value python test/inductor/test_cpu_repro.py -k test_argmin python test/inductor/test_cpu_repro.py -k test_reduction_cpu_only ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/131016 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
87d46d70d7 |
[inductor] export kernel for gemm template. (#132580)
Changes: 1. Move `get_export_declaration` to `cpp_utils.py` as basic function. 2. Export kernel for gemm template. Pull Request resolved: https://github.com/pytorch/pytorch/pull/132580 Approved by: https://github.com/ezyang |
||
|
|
6ec4af6865 |
[Inductor][CPP] Add vectorization support for double (#131886)
Before:
```
extern "C" void kernel(const double* in_ptr0, double* out_ptr0)
{
#pragma omp parallel num_threads(112)
{
int tid = omp_get_thread_num();
{
#pragma omp for
for(long x0=static_cast<long>(0L); x0<static_cast<long>(1024L); x0+=static_cast<long>(1L))
{
auto tmp0 = in_ptr0[static_cast<long>(x0)];
auto tmp1 = decltype(tmp0)(tmp0 * tmp0);
out_ptr0[static_cast<long>(x0)] = tmp1;
}
}
}
}
```
After:
```
extern "C" void kernel(const double* in_ptr0, double* out_ptr0)
{
#pragma omp parallel num_threads(112)
{
int tid = omp_get_thread_num();
{
#pragma omp for
for(long x0=static_cast<long>(0L); x0<static_cast<long>(1024L); x0+=static_cast<long>(16L))
{
auto tmp0 = at::vec::VectorizedN<double,2>::loadu(in_ptr0 + static_cast<long>(x0), 16);
auto tmp1 = tmp0 * tmp0;
tmp1.store(out_ptr0 + static_cast<long>(x0), 16);
}
}
}
}
```
Pull Request resolved: https://github.com/pytorch/pytorch/pull/131886
Approved by: https://github.com/jgong5, https://github.com/peterbell10
|
||
|
|
a4013e8b72 |
[inductor] cpp codegen alignas for all OSs. (#132387)
Changes: 1. Make cpp codegen alignas works for all OSs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/132387 Approved by: https://github.com/jgong5, https://github.com/desertfire |
||
|
|
aa1488fe02 |
[inductor] turn on enable_kernel_profile on Windows. (#132025)
Enable `TORCHINDUCTOR_CPP_ENABLE_KERNEL_PROFILE` on Windows inductor. Local tested pass:  Pull Request resolved: https://github.com/pytorch/pytorch/pull/132025 Approved by: https://github.com/jgong5, https://github.com/jansel |
||
|
|
f8e4060484 |
[Inductor][CPP] Enhance cppcsevar data type deduce (#130827)
**Summary**
Previously, we used `data_type_propagation` at the start of `codegen` to deduce the data type of each node and save this information in `node.meta[OptimizationContext.key]`. Then, we used this node metadata to update the cppcsevar data type in `update_on_args`. However, this method is not always correct. For example, in the codegen of `indirect_indexing` (see [here](
|
||
|
|
8b507a922a |
Mode to emulate amp numerics (#131595)
``` # Mode to emulate pytorch eager numerics for lower precision (fp16, bf16) # Pytorch eager computes bf16/fp16 by upcasting inputs to fp32 and downcasting after # For multiple, fused pointwise nodes, inductor will elide the intermediary upcasts and downcasts # Typically this should be closer to fp64 ref numerics. However, it can be useful for debugging # to emulate the eager numerics. ``` We add extra upcasts and downcasts for pointwise nodes that correspond to casts that existed in the original user program (excluding pointwise nodes that are emitted during decomposition). Since this is mostly for debugging, I added this information in the `meta` so that this mode does not have unintended side effects like changing pattern matching. in theory there could also be some other casts with fused reduction -> reduction, although i havent seen this in practice as much. could be done as follow up. note: only works with cuda backend right now. This mode was sufficient to eliminate compile differences from https://fb.workplace.com/groups/385893200869952/posts/464263173032954/?comment_id=465199259606012&reply_comment_id=465676792891592. Pull Request resolved: https://github.com/pytorch/pytorch/pull/131595 Approved by: https://github.com/shunting314, https://github.com/bdhirsh, https://github.com/jansel |
||
|
|
945bf78894 |
Revert "[BE] typing for decorators - fx/_compatibility (#131568)"
This reverts commit
|
||
|
|
193f62fde9 |
[BE] typing for decorators - fx/_compatibility (#131568)
See #131429 Pull Request resolved: https://github.com/pytorch/pytorch/pull/131568 Approved by: https://github.com/justinchuby, https://github.com/oulgen, https://github.com/zou3519 |
||
|
|
5772c13f56 |
Dont wrap negative indexing in scatter reduce (#131503)
Fix for https://github.com/pytorch/pytorch/issues/131321 Pull Request resolved: https://github.com/pytorch/pytorch/pull/131503 Approved by: https://github.com/shunting314 |
||
|
|
b6d477fd56 |
[BE][Easy][16/19] enforce style for empty lines in import segments in torch/_i*/ (#129768)
See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/129768 Approved by: https://github.com/jansel |
||
|
|
27c2a0d63b |
[inductor] Separate Buffer and Operation into two concepts (#130831)
Resubmit of #128893 Currently a buffer represents both a tensor with physical storage and a computation that produces the tensor as a result. This PR attempts to split these into two different concepts in the scheduler. This should allow us to have multiple outputs from a single operation. Differential Revision: [D59876059](https://our.internmc.facebook.com/intern/diff/D59876059) Pull Request resolved: https://github.com/pytorch/pytorch/pull/130831 Approved by: https://github.com/lezcano |
||
|
|
b7d2abd766 |
Fix vectorized ops.masked (#130130)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/130130 Approved by: https://github.com/jgong5, https://github.com/lezcano |
||
|
|
705da70f2c |
[inductor][cpp] align dtype convert cache between vec and scalar kernels (#130677)
The conversion cache used for fixing https://github.com/pytorch/pytorch/issues/115260 depended on "store" which might be removed and ignored. This would lead to inconsistent code generated between vec and scalar kernels since we generate scalar kernel first followed by the vector kernel and the store buffer might be removed by the scalar and impacts the vector kernel codegen. This PR move the caching from "store" to the "to_dtype" calls which won't be impacted by the removed buffers. `pytest -k test_consistent_remove_buffers test/inductor/test_cpu_repro.py` before ```c++ extern "C" void kernel(const bfloat16* in_ptr0, bfloat16* out_ptr1) { { for(long x0=static_cast<long>(0L); x0<static_cast<long>(64L); x0+=static_cast<long>(16L)) { auto tmp0 = at::vec::Vectorized<bfloat16>::loadu(in_ptr0 + static_cast<long>(x0), 16); auto tmp1 = at::vec::convert<float>(tmp0); auto tmp2 = tmp1 + tmp1; auto tmp3 = at::vec::convert<bfloat16>(tmp2); auto tmp4 = at::vec::convert<float>(tmp3); auto tmp5 = tmp1 + tmp4; auto tmp6 = at::vec::convert<bfloat16>(tmp5); tmp6.store(out_ptr1 + static_cast<long>(x0), 16); } #pragma omp simd simdlen(8) for(long x0=static_cast<long>(64L); x0<static_cast<long>(65L); x0+=static_cast<long>(1L)) { auto tmp0 = in_ptr0[static_cast<long>(x0)]; auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = decltype(tmp1)(tmp1 + tmp1); auto tmp3 = c10::convert<bfloat16>(tmp2); auto tmp4 = decltype(tmp1)(tmp1 + tmp2); auto tmp5 = c10::convert<bfloat16>(tmp4); out_ptr1[static_cast<long>(x0)] = tmp5; } } } ``` after ```c++ extern "C" void kernel(const bfloat16* in_ptr0, bfloat16* out_ptr1) { { for(long x0=static_cast<long>(0L); x0<static_cast<long>(64L); x0+=static_cast<long>(16L)) { auto tmp0 = at::vec::Vectorized<bfloat16>::loadu(in_ptr0 + static_cast<long>(x0), 16); auto tmp1 = at::vec::convert<float>(tmp0); auto tmp2 = tmp1 + tmp1; auto tmp3 = at::vec::convert<bfloat16>(tmp2); auto tmp4 = tmp1 + tmp2; auto tmp5 = at::vec::convert<bfloat16>(tmp4); tmp5.store(out_ptr1 + static_cast<long>(x0), 16); } #pragma omp simd simdlen(8) for(long x0=static_cast<long>(64L); x0<static_cast<long>(65L); x0+=static_cast<long>(1L)) { auto tmp0 = in_ptr0[static_cast<long>(x0)]; auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = decltype(tmp1)(tmp1 + tmp1); auto tmp3 = c10::convert<bfloat16>(tmp2); auto tmp4 = decltype(tmp1)(tmp1 + tmp2); auto tmp5 = c10::convert<bfloat16>(tmp4); out_ptr1[static_cast<long>(x0)] = tmp5; } } } ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/130677 Approved by: https://github.com/leslie-fang-intel |
||
|
|
81322aee74 |
[Inductor][CPP] Support more than one LocalBuffer (#129121)
**Summary** Support more than 1 Local Buffer in an outer loop fused node and also the case when multi global buffers sharing usage of same local buffer. **TestPlan** ``` python -u -m pytest -s -v inductor/test_cpu_repro.py -k test_two_local_buffers_in_outer_loop_fusion python -u -m pytest -s -v inductor/test_cpu_repro.py -k test_share_local_buffers_in_outer_loop_fusion ``` **Next Step** - [✓] Support more than one Local Buffer/Global Buffer Pull Request resolved: https://github.com/pytorch/pytorch/pull/129121 Approved by: https://github.com/jgong5, https://github.com/peterbell10 ghstack dependencies: #126967 |
||
|
|
adaa0fea5a |
[Inductor][CPP] Enable Local Buffer for Outer loop fusion (#126967)
**Summary** Currently, the Inductor CPP backend [generated code](https://gist.github.com/leslie-fang-intel/98f91d43dabed581a1ffe23daf133a65#file-bf16-softmax-generated-code-wo-local-buffer-py) for `Softmax` with BF16 data type is significantly slower than the [ATen Implementation]( |