diff --git a/tools/experimental/dynamic_shapes/torchfuzz/codegen.py b/tools/experimental/dynamic_shapes/torchfuzz/codegen.py index 31f2db8e574..585cd152e04 100644 --- a/tools/experimental/dynamic_shapes/torchfuzz/codegen.py +++ b/tools/experimental/dynamic_shapes/torchfuzz/codegen.py @@ -55,11 +55,6 @@ def convert_graph_to_python_code( op_name = node.op_name output_spec = node.output_spec - # Generate comment for this operation - generated_code_lines.append( - f" # Node {node_id}: {op_name} (depth {node.depth})" - ) - # Generate output variable name output_var_name = f"var_{node_id}" @@ -91,7 +86,6 @@ def convert_graph_to_python_code( # Add proper indentation for function body generated_code_lines.extend([" " + line for line in operation_lines]) - generated_code_lines.append("") # Track this node's variable node_variables[node_id] = (output_var_name, output_spec) @@ -110,72 +104,61 @@ def convert_graph_to_python_code( else: function_signature = "def fuzzed_program()" - # Build the complete code - fuzzer_dir = os.path.dirname(os.path.abspath(__file__)) + # Build the complete code - all imports at the top code_lines = [ "import torch", - "import sys", - "import os", - "# Add fuzzer directory to path so we can import tensor_fuzzer", - f"fuzzer_dir = r'{fuzzer_dir}'", - "if fuzzer_dir not in sys.path:", - " sys.path.insert(0, fuzzer_dir)", - "from tensor_fuzzer import fuzz_scalar, fuzz_tensor_simple, ScalarSpec, TensorSpec", + "torch._dynamo.config.capture_scalar_outputs = True", "", - "# Generated fuzzed program code (topological order from operation graph)", - f"# Graph has {len(operation_graph.nodes)} nodes", - "", - function_signature + ":", ] + # Add single seed at the top if seed is provided + if seed is not None: + code_lines.append(f"torch.manual_seed({seed})") + code_lines.append("") + + code_lines.append(function_signature + ":") + # Add the generated operation code code_lines.extend(generated_code_lines) # Add return statement code_lines.extend( [ - " # Final result from root node", f" return {final_var_name}", "", ] ) - # Generate argument creation code with deterministic seeds + # Generate argument creation code without individual seeds if arg_operations: - code_lines.append("# Create arguments for the fuzzed program") for i, (node_id, spec) in enumerate(arg_operations): arg_name = f"arg_{i}" - # Use a deterministic seed based on the argument index and main seed - arg_seed = (seed + 10000 + i) if seed is not None else None if isinstance(spec, ScalarSpec): dtype_str = f"torch.{spec.dtype}".replace("torch.torch.", "torch.") - if arg_seed is not None: - code_lines.extend( - [ - f"scalar_spec = ScalarSpec(dtype={dtype_str})", - f"{arg_name} = fuzz_scalar(scalar_spec, seed={arg_seed})", - ] - ) - else: - code_lines.extend( - [ - f"scalar_spec = ScalarSpec(dtype={dtype_str})", - f"{arg_name} = fuzz_scalar(scalar_spec)", - ] - ) + code_lines.append( + f"{arg_name} = torch.tensor(torch.randn(()), dtype={dtype_str}).item()" + ) + elif isinstance(spec, TensorSpec): size_str = str(spec.size) stride_str = str(spec.stride) dtype_str = f"torch.{spec.dtype}".replace("torch.torch.", "torch.") - if arg_seed is not None: - code_lines.append( - f"{arg_name} = fuzz_tensor_simple({size_str}, {stride_str}, {dtype_str}, seed={arg_seed})" - ) + + # Calculate storage size needed for the strided tensor + if spec.size: + storage_size = 1 + for dim_size, stride in zip(spec.size, spec.stride): + if dim_size > 1: + storage_size = max( + storage_size, (dim_size - 1) * abs(stride) + 1 + ) else: - code_lines.append( - f"{arg_name} = fuzz_tensor_simple({size_str}, {stride_str}, {dtype_str})" - ) + storage_size = 1 + + code_lines.append( + f"{arg_name} = torch.as_strided(torch.randn({storage_size}).to({dtype_str}), {size_str}, {stride_str})" + ) # Generate the final execution with both normal and compiled versions if arg_operations: @@ -191,9 +174,6 @@ def convert_graph_to_python_code( code_lines.extend( [ - "import torch", - "import sys", - "torch._dynamo.config.capture_scalar_outputs = True", "", f"args = {args_tuple}", "result_original = fuzzed_program(*args)", @@ -201,7 +181,6 @@ def convert_graph_to_python_code( "compiled_program = torch.compile(fuzzed_program, fullgraph=False, dynamic=True)", "result_compiled = compiled_program(*args)", "print('✅ compile success')", - "", ] ) diff --git a/tools/experimental/dynamic_shapes/torchfuzz/fuzzer.py b/tools/experimental/dynamic_shapes/torchfuzz/fuzzer.py index 221206ddc99..239af3a3fad 100644 --- a/tools/experimental/dynamic_shapes/torchfuzz/fuzzer.py +++ b/tools/experimental/dynamic_shapes/torchfuzz/fuzzer.py @@ -47,7 +47,7 @@ def fuzz_and_execute( # Generate max_depth if not provided (range 3-12) if max_depth is None: random.seed(seed + 999) # Use seed offset for consistent depth selection - max_depth = random.randint(3, 12) + max_depth = random.randint(2, 4) else: # Clamp max_depth to valid range max_depth = max(1, max_depth)