mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
much less code. a followup PR will make these repro files even smaller. small is important since it reduces the time for users to understand what the repro is doing. here's a sample: ``` (/home/bobren/local/a/pytorch-env) [21:34] devgpu009:/home/bobren/local/a/pytorch/tools/experimental/dynamic_shapes/torchfuzz [130] python fuzzer.py --seed 42 Running single fuzz_and_execute... Using seed: 42, max_depth: 10 Running generated program... Selected CUDA_VISIBLE_DEVICES=2 === Program Output === ✅ eager success ✅ compile success =============================== === Program Source === import torch import sys import os fuzzer_dir = r'/home/bobren/local/a/pytorch/tools/experimental/dynamic_shapes/torchfuzz' if fuzzer_dir not in sys.path: sys.path.insert(0, fuzzer_dir) from tensor_fuzzer import fuzz_scalar, fuzz_tensor_simple, ScalarSpec, TensorSpec def fuzzed_program(arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9, arg_10, arg_11, arg_12, arg_13, arg_14, arg_15, arg_16, arg_17, arg_18, arg_19, arg_20, arg_21, arg_22, arg_23, arg_24, arg_25, arg_26): # Node node_4: arg (depth 6) var_node_4 = arg_0 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_7: constant (depth 4) var_node_7 = torch.full((1,), (-0.8353595860703585-0.8384634248041143j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_8: arg (depth 4) var_node_8 = arg_1 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_6: tensor_pointwise (depth 5) var_node_6 = torch.ops.aten.mul(var_node_7, var_node_8) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_9: constant (depth 5) var_node_9 = torch.full((1,), (-0.32478860712861235+0.033909682598544454j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_5: tensor_pointwise (depth 6) var_node_5 = torch.ops.aten.mul(var_node_6, var_node_9) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_3: tensor_pointwise (depth 7) var_node_3 = torch.ops.aten.sub(var_node_4, var_node_5) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_11: arg (depth 6) var_node_11 = arg_2 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_18: constant (depth 0) var_node_18 = torch.full((1,), (0.12855308616305575+1.5268033634325642j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_19: arg (depth 0) var_node_19 = arg_3 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_17: tensor_pointwise (depth 1) var_node_17 = torch.ops.aten.mul(var_node_18, var_node_19) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_21: arg (depth 0) var_node_21 = arg_4 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_22: arg (depth 0) var_node_22 = arg_5 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_20: tensor_pointwise (depth 1) var_node_20 = torch.ops.aten.sub(var_node_21, var_node_22) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_16: tensor_pointwise (depth 2) var_node_16 = torch.ops.aten.add(var_node_17, var_node_20) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_25: arg (depth 0) var_node_25 = arg_6 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_26: arg (depth 0) var_node_26 = arg_7 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_24: tensor_pointwise (depth 1) var_node_24 = torch.ops.aten.add(var_node_25, var_node_26) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_27: constant (depth 1) var_node_27 = torch.full((1,), (-0.6315711191260084+1.342004076501214j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_23: tensor_pointwise (depth 2) var_node_23 = torch.ops.aten.mul(var_node_24, var_node_27) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_15: tensor_pointwise (depth 3) var_node_15 = torch.ops.aten.mul(var_node_16, var_node_23) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_28: constant (depth 3) var_node_28 = torch.full((1,), (1.064498531874825-0.37289464356501284j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_14: tensor_pointwise (depth 4) var_node_14 = torch.ops.aten.mul(var_node_15, var_node_28) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_30: arg (depth 3) var_node_30 = arg_8 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_32: arg (depth 2) var_node_32 = arg_9 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_33: constant (depth 2) var_node_33 = torch.full((1,), (1.5815627438573372+0.5124667911691704j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_31: tensor_pointwise (depth 3) var_node_31 = torch.ops.aten.div(var_node_32, var_node_33) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_29: tensor_pointwise (depth 4) var_node_29 = torch.ops.aten.div(var_node_30, var_node_31) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_13: tensor_pointwise (depth 5) var_node_13 = torch.ops.aten.div(var_node_14, var_node_29) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_39: arg (depth 0) var_node_39 = arg_10 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_40: constant (depth 0) var_node_40 = torch.full((1,), (-0.5987350493494642-0.5711360569376475j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_38: tensor_pointwise (depth 1) var_node_38 = torch.ops.aten.mul(var_node_39, var_node_40) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_41: arg (depth 1) var_node_41 = arg_11 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_37: tensor_pointwise (depth 2) var_node_37 = torch.ops.aten.add(var_node_38, var_node_41) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_42: constant (depth 2) var_node_42 = torch.full((1,), (0.7246044564672116-0.5930730980273312j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_36: tensor_pointwise (depth 3) var_node_36 = torch.ops.aten.mul(var_node_37, var_node_42) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_43: constant (depth 3) var_node_43 = torch.full((1,), (-0.7582976293117148+1.1880929376258396j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_35: tensor_pointwise (depth 4) var_node_35 = torch.ops.aten.mul(var_node_36, var_node_43) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_45: constant (depth 3) var_node_45 = torch.full((1,), (1.0896212896322774+0.3124038130417098j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_46: arg (depth 3) var_node_46 = arg_12 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_44: tensor_pointwise (depth 4) var_node_44 = torch.ops.aten.add(var_node_45, var_node_46) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_34: tensor_pointwise (depth 5) var_node_34 = torch.ops.aten.div(var_node_35, var_node_44) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_12: tensor_pointwise (depth 6) var_node_12 = torch.ops.aten.div(var_node_13, var_node_34) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_10: tensor_pointwise (depth 7) var_node_10 = torch.ops.aten.mul(var_node_11, var_node_12) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_2: tensor_pointwise (depth 8) var_node_2 = torch.ops.aten.div(var_node_3, var_node_10) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_48: constant (depth 7) var_node_48 = torch.full((1,), (-1.047745491289218+0.279447315087422j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_54: arg (depth 2) var_node_54 = arg_13 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_55: arg (depth 2) var_node_55 = arg_14 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_53: tensor_pointwise (depth 3) var_node_53 = torch.ops.aten.div(var_node_54, var_node_55) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_56: arg (depth 3) var_node_56 = arg_15 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_52: tensor_pointwise (depth 4) var_node_52 = torch.ops.aten.div(var_node_53, var_node_56) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_59: arg (depth 2) var_node_59 = arg_16 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_60: arg (depth 2) var_node_60 = arg_17 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_58: tensor_pointwise (depth 3) var_node_58 = torch.ops.aten.div(var_node_59, var_node_60) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_61: constant (depth 3) var_node_61 = torch.full((1,), (-0.7386327586576402-0.027025998767172658j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_57: tensor_pointwise (depth 4) var_node_57 = torch.ops.aten.add(var_node_58, var_node_61) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_51: tensor_pointwise (depth 5) var_node_51 = torch.ops.aten.sub(var_node_52, var_node_57) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_64: arg (depth 3) var_node_64 = arg_18 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_67: arg (depth 1) var_node_67 = arg_19 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_68: constant (depth 1) var_node_68 = torch.full((1,), (-0.6840241429755998+1.327637020136433j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_66: tensor_pointwise (depth 2) var_node_66 = torch.ops.aten.mul(var_node_67, var_node_68) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_69: arg (depth 2) var_node_69 = arg_20 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_65: tensor_pointwise (depth 3) var_node_65 = torch.ops.aten.sub(var_node_66, var_node_69) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_63: tensor_pointwise (depth 4) var_node_63 = torch.ops.aten.sub(var_node_64, var_node_65) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_70: arg (depth 4) var_node_70 = arg_21 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_62: tensor_pointwise (depth 5) var_node_62 = torch.ops.aten.sub(var_node_63, var_node_70) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_50: tensor_pointwise (depth 6) var_node_50 = torch.ops.aten.mul(var_node_51, var_node_62) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_76: constant (depth 1) var_node_76 = torch.full((1,), (1.864651314238342+0.27066487315113186j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_77: arg (depth 1) var_node_77 = arg_22 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_75: tensor_pointwise (depth 2) var_node_75 = torch.ops.aten.mul(var_node_76, var_node_77) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_78: arg (depth 2) var_node_78 = arg_23 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_74: tensor_pointwise (depth 3) var_node_74 = torch.ops.aten.add(var_node_75, var_node_78) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_79: arg (depth 3) var_node_79 = arg_24 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_73: tensor_pointwise (depth 4) var_node_73 = torch.ops.aten.mul(var_node_74, var_node_79) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_80: arg (depth 4) var_node_80 = arg_25 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_72: tensor_pointwise (depth 5) var_node_72 = torch.ops.aten.mul(var_node_73, var_node_80) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_82: constant (depth 4) var_node_82 = torch.full((1,), (1.6341547018841247+0.3096989611326181j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_84: constant (depth 3) var_node_84 = torch.full((1,), (0.9609065596935821+0.2920229825681946j), dtype=torch.complex128) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_85: arg (depth 3) var_node_85 = arg_26 # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_83: tensor_pointwise (depth 4) var_node_83 = torch.ops.aten.add(var_node_84, var_node_85) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_81: tensor_pointwise (depth 5) var_node_81 = torch.ops.aten.sub(var_node_82, var_node_83) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_71: tensor_pointwise (depth 6) var_node_71 = torch.ops.aten.sub(var_node_72, var_node_81) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_49: tensor_pointwise (depth 7) var_node_49 = torch.ops.aten.mul(var_node_50, var_node_71) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_47: tensor_pointwise (depth 8) var_node_47 = torch.ops.aten.add(var_node_48, var_node_49) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_1: tensor_pointwise (depth 9) var_node_1 = torch.ops.aten.add(var_node_2, var_node_47) # size=(1,), stride=(1,), dtype=complex128, device=cuda # Node node_0: torch.ops.aten.item (depth 10) var_node_0 = var_node_1.item() # dtype=complex128 # Final result from root node return var_node_0 arg_0 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10042) arg_1 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10043) arg_2 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10044) arg_3 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10045) arg_4 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10046) arg_5 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10047) arg_6 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10048) arg_7 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10049) arg_8 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10050) arg_9 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10051) arg_10 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10052) arg_11 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10053) arg_12 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10054) arg_13 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10055) arg_14 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10056) arg_15 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10057) arg_16 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10058) arg_17 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10059) arg_18 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10060) arg_19 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10061) arg_20 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10062) arg_21 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10063) arg_22 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10064) arg_23 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10065) arg_24 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10066) arg_25 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10067) arg_26 = fuzz_tensor_simple((1,), (1,), torch.complex128, seed=10068) import torch import sys torch._dynamo.config.capture_scalar_outputs = True args = (arg_0, arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, arg_9, arg_10, arg_11, arg_12, arg_13, arg_14, arg_15, arg_16, arg_17, arg_18, arg_19, arg_20, arg_21, arg_22, arg_23, arg_24, arg_25, arg_26) result_original = fuzzed_program(*args) print('✅ eager success') sys.exit(1) compiled_program = torch.compile(fuzzed_program, fullgraph=False, dynamic=True) result_compiled = compiled_program(*args) print('✅ compile success') ====================== Program exited with code: 1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/163743 Approved by: https://github.com/pianpwk
131 lines
4.0 KiB
Python
131 lines
4.0 KiB
Python
"""
|
|
Program runner utilities for PyTorch fuzzer.
|
|
This module handles running and testing generated PyTorch programs.
|
|
"""
|
|
|
|
import os
|
|
import random
|
|
import subprocess
|
|
import sys
|
|
|
|
|
|
class ProgramRunner:
|
|
"""Runs generated PyTorch programs and handles output/error reporting."""
|
|
|
|
def __init__(self):
|
|
pass
|
|
|
|
def run_program(self, program_path):
|
|
"""
|
|
Run a generated Python program and handle output/errors.
|
|
|
|
Args:
|
|
program_path: Path to the Python program to run
|
|
|
|
Returns:
|
|
bool: True if program ran successfully, False otherwise
|
|
"""
|
|
abs_path = os.path.abspath(program_path)
|
|
print(f"Running: {abs_path}")
|
|
|
|
# Select a random CUDA device if available
|
|
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
|
|
if cuda_visible_devices:
|
|
devices = [d.strip() for d in cuda_visible_devices.split(",") if d.strip()]
|
|
else:
|
|
# Default to all GPUs if not set
|
|
try:
|
|
import torch
|
|
|
|
num_gpus = torch.cuda.device_count()
|
|
devices = [str(i) for i in range(num_gpus)]
|
|
except ImportError:
|
|
devices = []
|
|
if devices:
|
|
selected_device = random.choice(devices)
|
|
env = os.environ.copy()
|
|
env["CUDA_VISIBLE_DEVICES"] = selected_device
|
|
print(f"Selected CUDA_VISIBLE_DEVICES={selected_device}")
|
|
else:
|
|
env = None # No GPU available or torch not installed
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
[sys.executable, abs_path],
|
|
capture_output=True,
|
|
text=True,
|
|
check=True,
|
|
env=env,
|
|
)
|
|
print("=== Program Output ===")
|
|
print(result.stdout)
|
|
print(result.stderr)
|
|
return True
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
print("=== Program Output (Failure) ===")
|
|
print(e.stdout)
|
|
print(e.stderr)
|
|
print("===============================")
|
|
print("=== Program Source ===")
|
|
with open(abs_path) as f:
|
|
print(f.read())
|
|
print("======================")
|
|
print(f"Program exited with code: {e.returncode}")
|
|
sys.exit(1)
|
|
|
|
def run_and_validate(self, program_path):
|
|
"""
|
|
Run a program and return detailed results for validation.
|
|
|
|
Args:
|
|
program_path: Path to the Python program to run
|
|
|
|
Returns:
|
|
dict: Dictionary with 'success', 'stdout', 'stderr', 'returncode'
|
|
"""
|
|
abs_path = os.path.abspath(program_path)
|
|
|
|
# Select a random CUDA device if available
|
|
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
|
|
if cuda_visible_devices:
|
|
devices = [d.strip() for d in cuda_visible_devices.split(",") if d.strip()]
|
|
else:
|
|
try:
|
|
import torch
|
|
|
|
num_gpus = torch.cuda.device_count()
|
|
devices = [str(i) for i in range(num_gpus)]
|
|
except ImportError:
|
|
devices = []
|
|
if devices:
|
|
selected_device = random.choice(devices)
|
|
env = os.environ.copy()
|
|
env["CUDA_VISIBLE_DEVICES"] = selected_device
|
|
print(f"Selected CUDA_VISIBLE_DEVICES={selected_device}")
|
|
else:
|
|
env = None
|
|
|
|
try:
|
|
result = subprocess.run(
|
|
[sys.executable, abs_path],
|
|
capture_output=True,
|
|
text=True,
|
|
check=True,
|
|
env=env,
|
|
)
|
|
return {
|
|
"success": True,
|
|
"stdout": result.stdout,
|
|
"stderr": result.stderr,
|
|
"returncode": result.returncode,
|
|
}
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
return {
|
|
"success": False,
|
|
"stdout": e.stdout,
|
|
"stderr": e.stderr,
|
|
"returncode": e.returncode,
|
|
}
|