From 454f20d6fc99b76a2ec5ea9bc37b809c64253010 Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Thu, 22 Jan 2026 16:39:30 -0800 Subject: [PATCH 1/6] first draft, need to refine --- docs/docs.json | 3 +- docs/support-for-jit/index.mdx | 85 ++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 docs/support-for-jit/index.mdx diff --git a/docs/docs.json b/docs/docs.json index a36fc82dc..43949abb2 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -66,7 +66,8 @@ "group": "🧠 Core Concepts", "pages": [ "codeflash-concepts/how-codeflash-works", - "codeflash-concepts/benchmarking" + "codeflash-concepts/benchmarking", + "support-for-jit/index" ] }, { diff --git a/docs/support-for-jit/index.mdx b/docs/support-for-jit/index.mdx new file mode 100644 index 000000000..3dfc68917 --- /dev/null +++ b/docs/support-for-jit/index.mdx @@ -0,0 +1,85 @@ +--- +title: "Support for Just-in-Time Compilation" +description: "Learn how Codeflash optimizes code using JIT compilation with Numba, PyTorch, TensorFlow, and JAX" +icon: "bolt" +sidebarTitle: "JIT Compilation" +keywords: ["JIT", "just-in-time", "numba", "pytorch", "tensorflow", "jax", "GPU", "CUDA", "compilation", "performance"] +--- + +# Support for Just-in-Time Compilation + +Codeflash supports optimizing code using Just-in-Time (JIT) compilation. This allows Codeflash to suggest optimizations that leverage JIT compilers from popular frameworks including **Numba**, **PyTorch**, **TensorFlow**, and **JAX**. + +## Supported JIT Frameworks + +Each framework uses different compilation strategies to accelerate Python code: + +### Numba + +Numba compiles Python functions to optimized machine code using the LLVM compiler infrastructure. Codeflash can suggest Numba optimizations that use: + +- **`@jit` / `@njit`** - General-purpose JIT compilation with `nopython` mode for removing Python interpreter overhead +- **`parallel=True`** - Enables automatic SIMD parallelization +- **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag +- **`@vectorize` / `@guvectorize`** - Creates NumPy universal functions (ufuncs) +- **`@cuda.jit`** - Compiles functions to run on NVIDIA GPUs + +### PyTorch + +PyTorch provides multiple compilation approaches: + +- **`torch.compile()`** - The recommended compilation API that uses TorchDynamo to trace operations and create optimized CUDA graphs +- **`torch.jit.script`** - Compiles functions using TorchScript +- **`torch.jit.trace`** - Traces tensor operations to create optimized execution graphs + +### TensorFlow + +TensorFlow uses the XLA (Accelerated Linear Algebra) backend for JIT compilation: + +- **`@tf.function`** - Compiles Python functions into optimized TensorFlow graphs using XLA + +### JAX + +JAX captures side-effect-free operations and optimizes them: + +- **`@jax.jit`** - JIT compiles functions using XLA, with automatic operation fusion for improved performance + +## How Codeflash Optimizes with JIT + +When Codeflash identifies a function that could benefit from JIT compilation, it: + +1. **Rewrites the code** in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components +2. **Generates appropriate tests** that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter type requirements +3. **Adds GPU synchronization calls** for accurate profiling when code runs on GPU, since GPU operations are inherently non-blocking + +## Accurate Benchmarking with GPU Code + +Since GPU operations execute asynchronously, Codeflash automatically inserts synchronization barriers before measuring performance. This ensures timing measurements reflect actual computation time rather than just the time to queue operations: + +- **PyTorch**: Uses `torch.cuda.synchronize()` or `torch.mps.synchronize()` depending on the device +- **JAX**: Uses `jax.block_until_ready()` to wait for computation to complete +- **TensorFlow**: Uses `tf.test.experimental.sync_devices()` for device synchronization + +## When JIT Compilation Helps + +JIT compilation is most effective for: + +- Numerical computations with loops that can't be easily vectorized +- Custom algorithms not covered by existing optimized libraries +- Functions that are called repeatedly with consistent input types +- Code that benefits from hardware-specific optimizations (SIMD, GPU acceleration) + +## When JIT Compilation May Not Help + +JIT compilation may not provide speedups when: + +- The code already uses highly optimized libraries (e.g., NumPy with MKL, cuBLAS, cuDNN) +- Functions have variable input types or shapes that prevent effective compilation +- The compilation overhead exceeds the runtime savings for short-running functions +- The code relies heavily on Python objects or dynamic features that JIT compilers can't optimize + +## Configuration + +JIT compilation support is **enabled automatically** in Codeflash. You don't need to modify any configuration to enable JIT-based optimizations. Codeflash will automatically detect when JIT compilation could improve performance and suggest appropriate optimizations. + +When running tests with coverage measurement, Codeflash temporarily disables JIT compilation to ensure accurate coverage data, then re-enables it for performance benchmarking. From 9fe6ef797aa8fd8d33a72530027542eda8fff35c Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Thu, 22 Jan 2026 17:20:52 -0800 Subject: [PATCH 2/6] todo write more about the flags in torch/tensorflow and jax --- docs/support-for-jit/index.mdx | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/docs/support-for-jit/index.mdx b/docs/support-for-jit/index.mdx index 3dfc68917..9aa91a6a4 100644 --- a/docs/support-for-jit/index.mdx +++ b/docs/support-for-jit/index.mdx @@ -8,49 +8,58 @@ keywords: ["JIT", "just-in-time", "numba", "pytorch", "tensorflow", "jax", "GPU" # Support for Just-in-Time Compilation -Codeflash supports optimizing code using Just-in-Time (JIT) compilation. This allows Codeflash to suggest optimizations that leverage JIT compilers from popular frameworks including **Numba**, **PyTorch**, **TensorFlow**, and **JAX**. +Codeflash supports optimizing numerical code using Just-in-Time (JIT) compilation via leveraging JIT compilers from popular frameworks including **Numba**, **PyTorch**, **TensorFlow**, and **JAX**. ## Supported JIT Frameworks Each framework uses different compilation strategies to accelerate Python code: -### Numba +### Numba (CPU Code) Numba compiles Python functions to optimized machine code using the LLVM compiler infrastructure. Codeflash can suggest Numba optimizations that use: -- **`@jit` / `@njit`** - General-purpose JIT compilation with `nopython` mode for removing Python interpreter overhead -- **`parallel=True`** - Enables automatic SIMD parallelization -- **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag -- **`@vectorize` / `@guvectorize`** - Creates NumPy universal functions (ufuncs) -- **`@cuda.jit`** - Compiles functions to run on NVIDIA GPUs +- **`@jit`** - General-purpose JIT compilation with optional flags. + - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. + - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). + - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag + - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. ### PyTorch PyTorch provides multiple compilation approaches: - **`torch.compile()`** - The recommended compilation API that uses TorchDynamo to trace operations and create optimized CUDA graphs -- **`torch.jit.script`** - Compiles functions using TorchScript -- **`torch.jit.trace`** - Traces tensor operations to create optimized execution graphs + - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. + - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). + - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag + - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. ### TensorFlow TensorFlow uses the XLA (Accelerated Linear Algebra) backend for JIT compilation: - **`@tf.function`** - Compiles Python functions into optimized TensorFlow graphs using XLA + - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. + - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). + - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag + - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. ### JAX JAX captures side-effect-free operations and optimizes them: - **`@jax.jit`** - JIT compiles functions using XLA, with automatic operation fusion for improved performance + - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. + - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). + - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag + - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. ## How Codeflash Optimizes with JIT When Codeflash identifies a function that could benefit from JIT compilation, it: -1. **Rewrites the code** in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components -2. **Generates appropriate tests** that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter type requirements -3. **Adds GPU synchronization calls** for accurate profiling when code runs on GPU, since GPU operations are inherently non-blocking +1. **Rewrites the code** in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components. +2. **Generates appropriate tests** that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter input type requirements. ## Accurate Benchmarking with GPU Code From 85344f5fd4ca8e5e6bef80832f4c19eae560f705 Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Thu, 22 Jan 2026 17:26:25 -0800 Subject: [PATCH 3/6] keep editing --- docs/support-for-jit/index.mdx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/support-for-jit/index.mdx b/docs/support-for-jit/index.mdx index 9aa91a6a4..ee03d5220 100644 --- a/docs/support-for-jit/index.mdx +++ b/docs/support-for-jit/index.mdx @@ -18,9 +18,8 @@ Each framework uses different compilation strategies to accelerate Python code: Numba compiles Python functions to optimized machine code using the LLVM compiler infrastructure. Codeflash can suggest Numba optimizations that use: -- **`@jit`** - General-purpose JIT compilation with optional flags. +- **`@jit`** - General-purpose JIT compilation with optional flags. Here is a non-exhaustive options which codeflash would apply on the function to optimize it via numba jit compilation. - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. - - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. From eb9b3dff1ad69cdb0b523bcce13a2a76799fe8ce Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Fri, 23 Jan 2026 18:32:05 -0800 Subject: [PATCH 4/6] add examples --- docs/support-for-jit/index.mdx | 243 ++++++++++++++++++++++++++++----- 1 file changed, 207 insertions(+), 36 deletions(-) diff --git a/docs/support-for-jit/index.mdx b/docs/support-for-jit/index.mdx index ee03d5220..2c7f0b1aa 100644 --- a/docs/support-for-jit/index.mdx +++ b/docs/support-for-jit/index.mdx @@ -18,40 +18,36 @@ Each framework uses different compilation strategies to accelerate Python code: Numba compiles Python functions to optimized machine code using the LLVM compiler infrastructure. Codeflash can suggest Numba optimizations that use: -- **`@jit`** - General-purpose JIT compilation with optional flags. Here is a non-exhaustive options which codeflash would apply on the function to optimize it via numba jit compilation. - - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. - - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag - - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. +- **`@jit`** - General-purpose JIT compilation with optional flags. + - **`nopython=True`** - Compiles to machine code without falling back to the Python interpreter. + - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag. + - **`cache=True`** - cache compiled function to disk which reduces future runtimes. + - **`parallel=True`** - Parallalizes code inside loops. ### PyTorch -PyTorch provides multiple compilation approaches: +PyTorch provides JIT compilation through `torch.compile()`, the recommended compilation API introduced in PyTorch 2.0. It uses TorchDynamo to capture Python bytecode and TorchInductor to generate optimized kernels. -- **`torch.compile()`** - The recommended compilation API that uses TorchDynamo to trace operations and create optimized CUDA graphs - - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. - - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). - - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag - - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. +- **`torch.compile()`** - Compiles a function or module for optimized execution. + - **`mode`** - Controls the compilation strategy: + - `"default"` - Balanced compilation with moderate optimization. + - `"reduce-overhead"` - Minimizes Python overhead using CUDA graphs, ideal for small batches. + - `"max-autotune"` - Spends more time autotuning to find the fastest kernels. + - **`fullgraph=True`** - Requires the entire function to be captured as a single graph. Raises an error if graph breaks occur, useful for ensuring complete optimization. + - **`dynamic=True`** - Enables dynamic shape support, allowing the compiled function to handle varying input sizes without recompilation. ### TensorFlow -TensorFlow uses the XLA (Accelerated Linear Algebra) backend for JIT compilation: +TensorFlow uses `@tf.function` to compile Python functions into optimized TensorFlow graphs. When combined with XLA (Accelerated Linear Algebra), it can generate highly optimized machine code for both CPU and GPU. -- **`@tf.function`** - Compiles Python functions into optimized TensorFlow graphs using XLA - - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. - - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). - - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag - - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. +- **`@tf.function`** - Converts Python functions into TensorFlow graphs for optimized execution. + - **`jit_compile=True`** - Enables XLA compilation, which performs whole-function optimization including operation fusion, memory layout optimization, and target-specific code generation. ### JAX -JAX captures side-effect-free operations and optimizes them: +JAX uses XLA to JIT compile pure functions into optimized machine code. It emphasizes functional programming patterns and captures side-effect-free operations for optimization. -- **`@jax.jit`** - JIT compiles functions using XLA, with automatic operation fusion for improved performance - - **`noython=True`** - Compiles to machine code without falling back to the python interpreter. - - **`parallel=True`** - Enables automatic thread-level parallelization of the function across multiple CPU cores (no GIL!). - - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag - - **`cache=True`** - Numba writes the result of function compilation to disk which significantly reduces future compilation times. +- **`@jax.jit`** - JIT compiles functions using XLA with automatic operation fusion. ## How Codeflash Optimizes with JIT @@ -60,34 +56,209 @@ When Codeflash identifies a function that could benefit from JIT compilation, it 1. **Rewrites the code** in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components. 2. **Generates appropriate tests** that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter input type requirements. -## Accurate Benchmarking with GPU Code +## Accurate Benchmarking on Non-CPU devices Since GPU operations execute asynchronously, Codeflash automatically inserts synchronization barriers before measuring performance. This ensures timing measurements reflect actual computation time rather than just the time to queue operations: -- **PyTorch**: Uses `torch.cuda.synchronize()` or `torch.mps.synchronize()` depending on the device -- **JAX**: Uses `jax.block_until_ready()` to wait for computation to complete -- **TensorFlow**: Uses `tf.test.experimental.sync_devices()` for device synchronization +- **PyTorch**: Uses `torch.cuda.synchronize()` (NVIDIA GPUs) or `torch.mps.synchronize()` (MacOS Metal Performance Shaders) depending on the device. +- **JAX**: Uses `jax.block_until_ready()` to wait for computation to complete. +- **TensorFlow**: Uses `tf.test.experimental.sync_devices()` for device synchronization. ## When JIT Compilation Helps JIT compilation is most effective for: -- Numerical computations with loops that can't be easily vectorized -- Custom algorithms not covered by existing optimized libraries -- Functions that are called repeatedly with consistent input types +- Numerical computations with loops that can't be easily vectorized. +- Custom algorithms not covered by existing optimized libraries. +- Functions that are called repeatedly with consistent input types. - Code that benefits from hardware-specific optimizations (SIMD, GPU acceleration) +### Example + +#### Function Definition + +```python +import torch +def complex_activation(x): + """A custom activation with many small operations - compile makes a huge difference""" + # Many sequential element-wise ops create kernel launch overhead + x = torch.sin(x) + x = x * torch.cos(x) + x = x + torch.exp(-x.abs()) + x = x / (1 + x.pow(2)) + x = torch.tanh(x) * torch.sigmoid(x) + x = x - 0.5 * x.pow(3) + return x +``` + +#### Benchmarking Snippet (replace `cuda` with `mps` to run on your Mac) + +```python +import time +# Create compiled version +complex_activation_compiled = torch.compile(complex_activation) + +# Benchmark +x = torch.randn(1000, 1000, device='cuda') + +# Warmup +for _ in range(10): + _ = complex_activation(x) + _ = complex_activation_compiled(x) + +# Time uncompiled +torch.cuda.synchronize() +start = time.time() +for _ in range(100): + y = complex_activation(x) +torch.cuda.synchronize() +uncompiled_time = time.time() - start + +# Time compiled +torch.cuda.synchronize() +start = time.time() +for _ in range(100): + y = complex_activation_compiled(x) +torch.cuda.synchronize() +compiled_time = time.time() - start + +print(f"Uncompiled: {uncompiled_time:.4f}s") +print(f"Compiled: {compiled_time:.4f}s") +print(f"Speedup: {uncompiled_time/compiled_time:.2f}x") +``` + +Expected Output on CUDA + +``` +Uncompiled: 0.0176s +Compiled: 0.0063s +Speedup: 2.80x +``` + +Here, JIT compilation via `torch.compile` is the only viable option because +1. Already vectorized - All operations are already PyTorch tensor ops. +2. Multiple Kernel Launches - Uncompiled code launches ~10 separate kernels. torch.compile fuses them into 1-2 kernels, eliminating kernel launch overhead. +3. No algorithmic improvement - The computation itself is already optimal. +4. Python overhead elimination - Removes Python interpreter overhead between operations. + + ## When JIT Compilation May Not Help JIT compilation may not provide speedups when: -- The code already uses highly optimized libraries (e.g., NumPy with MKL, cuBLAS, cuDNN) -- Functions have variable input types or shapes that prevent effective compilation -- The compilation overhead exceeds the runtime savings for short-running functions -- The code relies heavily on Python objects or dynamic features that JIT compilers can't optimize +- The code already uses highly optimized libraries (e.g., NumPy with MKL, cuBLAS, cuDNN). +- Functions have variable input types or shapes that prevent effective compilation. +- The compilation overhead exceeds the runtime savings for short-running functions. +- The code relies heavily on Python objects or dynamic features that JIT compilers can't optimize. -## Configuration +### Example + +#### Function Definition + +``` +def adaptive_processing(x, threshold=0.5): + """Function with data-dependent control flow - compile struggles here""" + # Check how many values exceed threshold (data-dependent!) + mask = x > threshold + num_large = mask.sum().item() # .item() causes graph break + + if num_large > x.numel() * 0.3: + # Path 1: Many large values - use expensive operation + result = torch.matmul(x, x.T) # Already optimized by cuBLAS + result = result.mean(dim=0) + else: + # Path 2: Few large values - use cheap operation + result = x.mean(dim=1) + + return result +``` + +#### Benchmarking Snippet (replace `cuda` with `mps` to run on your Mac) + +``` +# Create compiled version +adaptive_processing_compiled = torch.compile(adaptive_processing) -JIT compilation support is **enabled automatically** in Codeflash. You don't need to modify any configuration to enable JIT-based optimizations. Codeflash will automatically detect when JIT compilation could improve performance and suggest appropriate optimizations. +# Test with data that causes branch variation +x = torch.randn(500, 500, device='cuda') + +# Warmup +for _ in range(10): + _ = adaptive_processing(x) + _ = adaptive_processing_compiled(x) + +# Benchmark with varying data (causes recompilation) +torch.cuda.synchronize() +start = time.time() +for i in range(100): + # Vary the data to trigger different branches + x_test = torch.randn(500, 500, device='cuda') + (i % 2) + y = adaptive_processing(x_test) +torch.cuda.synchronize() +uncompiled_time = time.time() - start + +torch.cuda.synchronize() +start = time.time() +for i in range(100): + x_test = torch.randn(500, 500, device='cuda') + (i % 2) + y = adaptive_processing_compiled(x_test) # Recompiles frequently! +torch.cuda.synchronize() +compiled_time = time.time() - start + +print(f"Uncompiled: {uncompiled_time:.4f}s") +print(f"Compiled: {compiled_time:.4f}s") +print(f"Slowdown: {compiled_time/uncompiled_time:.2f}x") +``` + +Expected Output on CUDA + +``` +Uncompiled: 0.0296s +Compiled: 0.2847s +Slowdown: 9.63x +``` + +Why `torch.compile` is detrimental here: + +1. Graph breaks - `.item()` forces a graph break, negating compile benefits. +2. Recompilation overhead - Different branches cause expensive recompilation each time. +3. Dynamic control flow - Data-dependent conditionals can't be optimized away. +4. Already optimized ops - `matmul` already uses `cuBLAS`; compile adds overhead without benefit. + +#### Better Optimization Strategy + +```python +def optimized_version(x, threshold=0.5): + """Remove data-dependent control flow - vectorize instead""" + mask = (x > threshold).float() + weight = (mask.mean() > 0.3).float() # Keep on GPU + + # Compute both paths, blend based on weight (branchless) + expensive = torch.matmul(x, x.T).mean(dim=0) + cheap = x.mean(dim=1).squeeze() + + # Pad cheap result to match expensive dimensions + cheap_padded = cheap.expand(expensive.shape[0]) + + result = weight * expensive + (1 - weight) * cheap_padded + return result +``` + +Expected Output on CUDA + +``` +Optimized: 0.0277s +Speedup compared to Uncompiled: 1.57x +``` + + +Key improvements: + +1. Eliminate `.item()` - Keep computation on GPU. +2. Branchless execution - Compute both paths, blend results. +3. Vectorization - Replace conditionals with masked operations. +4. Reduce Python overhead - Minimize host-device synchronization. + +## Configuration -When running tests with coverage measurement, Codeflash temporarily disables JIT compilation to ensure accurate coverage data, then re-enables it for performance benchmarking. +JIT compilation support is **enabled automatically** in Codeflash. You don't need to modify any configuration to enable JIT-based optimizations. Codeflash will automatically detect when JIT compilation could improve performance and suggest appropriate optimizations. \ No newline at end of file From 15f4b6dd0e02373959da51a3e6ecfc94069dc67a Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Fri, 23 Jan 2026 18:47:08 -0800 Subject: [PATCH 5/6] typos --- docs/support-for-jit/index.mdx | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/support-for-jit/index.mdx b/docs/support-for-jit/index.mdx index 2c7f0b1aa..d84a75912 100644 --- a/docs/support-for-jit/index.mdx +++ b/docs/support-for-jit/index.mdx @@ -22,7 +22,7 @@ Numba compiles Python functions to optimized machine code using the LLVM compile - **`nopython=True`** - Compiles to machine code without falling back to the Python interpreter. - **`fastmath=True`** - Uses aggressive floating-point optimizations via LLVM's fastmath flag. - **`cache=True`** - cache compiled function to disk which reduces future runtimes. - - **`parallel=True`** - Parallalizes code inside loops. + - **`parallel=True`** - Parallelizes code inside loops. ### PyTorch @@ -53,12 +53,14 @@ JAX uses XLA to JIT compile pure functions into optimized machine code. It empha When Codeflash identifies a function that could benefit from JIT compilation, it: -1. **Rewrites the code** in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components. -2. **Generates appropriate tests** that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter input type requirements. +1. Rewrites the code in a JIT-compatible format, which may involve breaking down complex functions into separate JIT-compiled components. +2. Generates appropriate tests that are compatible with JIT-compiled code, carefully handling data types since JIT compilers have stricter input type requirements. +3. Disables JIT compilation while running coverage and tracer to get accurate coverage and trace information. Both of them rely on Python bytecode execution but JIT compiled code stops running as Python bytecode. +4. Disables Line Profiler information collection whenever presented with JIT compiled code. It could be possible to disable JIT compilation and run the line profiler, but that would lead to inaccurate information which could misguide the optimization process. ## Accurate Benchmarking on Non-CPU devices -Since GPU operations execute asynchronously, Codeflash automatically inserts synchronization barriers before measuring performance. This ensures timing measurements reflect actual computation time rather than just the time to queue operations: +Since Non-CPU operations execute asynchronously, Codeflash automatically inserts synchronization barriers before measuring performance. This ensures timing measurements reflect actual computation time rather than just the time to queue operations: - **PyTorch**: Uses `torch.cuda.synchronize()` (NVIDIA GPUs) or `torch.mps.synchronize()` (MacOS Metal Performance Shaders) depending on the device. - **JAX**: Uses `jax.block_until_ready()` to wait for computation to complete. @@ -71,7 +73,7 @@ JIT compilation is most effective for: - Numerical computations with loops that can't be easily vectorized. - Custom algorithms not covered by existing optimized libraries. - Functions that are called repeatedly with consistent input types. -- Code that benefits from hardware-specific optimizations (SIMD, GPU acceleration) +- Code that benefits from hardware-specific optimizations (SIMD, GPU acceleration). ### Example @@ -146,7 +148,7 @@ Here, JIT compilation via `torch.compile` is the only viable option because JIT compilation may not provide speedups when: -- The code already uses highly optimized libraries (e.g., NumPy with MKL, cuBLAS, cuDNN). +- The code already uses highly optimized libraries (e.g., `NumPy` with `MKL`, `cuBLAS`, `cuDNN`). - Functions have variable input types or shapes that prevent effective compilation. - The compilation overhead exceeds the runtime savings for short-running functions. - The code relies heavily on Python objects or dynamic features that JIT compilers can't optimize. From 06f5460803a56a2c4956ebab7458760e1dedf8c1 Mon Sep 17 00:00:00 2001 From: aseembits93 Date: Mon, 26 Jan 2026 10:01:07 -0800 Subject: [PATCH 6/6] start testing --- code_to_optimize/complex_activation.py | 11 +++ .../tests/pytest/test_complex_activation.py | 88 +++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 code_to_optimize/complex_activation.py create mode 100644 code_to_optimize/tests/pytest/test_complex_activation.py diff --git a/code_to_optimize/complex_activation.py b/code_to_optimize/complex_activation.py new file mode 100644 index 000000000..d9ed216d3 --- /dev/null +++ b/code_to_optimize/complex_activation.py @@ -0,0 +1,11 @@ +import torch +def complex_activation(x): + """A custom activation with many small operations - compile makes a huge difference""" + # Many sequential element-wise ops create kernel launch overhead + x = torch.sin(x) + x = x * torch.cos(x) + x = x + torch.exp(-x.abs()) + x = x / (1 + x.pow(2)) + x = torch.tanh(x) * torch.sigmoid(x) + x = x - 0.5 * x.pow(3) + return x \ No newline at end of file diff --git a/code_to_optimize/tests/pytest/test_complex_activation.py b/code_to_optimize/tests/pytest/test_complex_activation.py new file mode 100644 index 000000000..3fa8f0d12 --- /dev/null +++ b/code_to_optimize/tests/pytest/test_complex_activation.py @@ -0,0 +1,88 @@ +""" +Unit tests for complex_activation function. + +Tests run on CUDA device with a single tensor shape. +""" + +import pytest +import torch + +from code_to_optimize.complex_activation import complex_activation + + +@pytest.fixture +def cuda_device(): + """Return CUDA device, skip if not available.""" + if not torch.cuda.is_available(): + pytest.skip("CUDA not available") + return torch.device("cuda") + + +@pytest.fixture +def input_tensor(cuda_device): + """Create a fixed-shape input tensor on CUDA.""" + torch.manual_seed(42) + return torch.randn(32, 64, device=cuda_device, dtype=torch.float32) + + +class TestComplexActivation: + """Tests for the complex_activation function.""" + + def test_output_shape(self, input_tensor): + """Test that output shape matches input shape.""" + result = complex_activation(input_tensor) + assert result.shape == input_tensor.shape + + def test_output_dtype(self, input_tensor): + """Test that output dtype matches input dtype.""" + result = complex_activation(input_tensor) + assert result.dtype == input_tensor.dtype + + def test_output_device(self, input_tensor, cuda_device): + """Test that output is on the same device as input.""" + result = complex_activation(input_tensor) + assert result.device.type == cuda_device.type + + def test_deterministic(self, input_tensor): + """Test that the function produces deterministic results.""" + result1 = complex_activation(input_tensor.clone()) + result2 = complex_activation(input_tensor.clone()) + torch.testing.assert_close(result1, result2) + + def test_output_is_finite(self, input_tensor): + """Test that output contains no NaN or Inf values.""" + result = complex_activation(input_tensor) + assert torch.isfinite(result).all() + + def test_output_bounded(self, input_tensor): + """Test that output values are bounded (activation should not explode).""" + result = complex_activation(input_tensor) + assert result.abs().max() < 10.0 + + def test_zero_input(self, cuda_device): + """Test behavior with zero input.""" + x = torch.zeros(32, 64, device=cuda_device, dtype=torch.float32) + result = complex_activation(x) + assert torch.isfinite(result).all() + assert result.shape == x.shape + + def test_positive_input(self, cuda_device): + """Test behavior with all positive inputs.""" + x = torch.abs(torch.randn(32, 64, device=cuda_device, dtype=torch.float32)) + 0.1 + result = complex_activation(x) + assert torch.isfinite(result).all() + + def test_negative_input(self, cuda_device): + """Test behavior with all negative inputs.""" + x = -torch.abs(torch.randn(32, 64, device=cuda_device, dtype=torch.float32)) - 0.1 + result = complex_activation(x) + assert torch.isfinite(result).all() + + def test_gradient_flow(self, cuda_device): + """Test that gradients can flow through the activation.""" + x = torch.randn(32, 64, device=cuda_device, dtype=torch.float32, requires_grad=True) + result = complex_activation(x) + loss = result.sum() + loss.backward() + assert x.grad is not None + assert torch.isfinite(x.grad).all() \ No newline at end of file