From 5c71b419b2756bb9e613e30db1ba8b5aac8868e6 Mon Sep 17 00:00:00 2001 From: Songhao Jia Date: Fri, 30 Jan 2026 11:59:10 -0800 Subject: [PATCH] rename ExecutorTorch with ExecuTorch (#17060) Summary: as title. Reviewed By: rascani, GregoryComer Differential Revision: D91863418 --- backends/aoti/README.md | 2 +- backends/apple/metal/CMakeLists.txt | 2 +- backends/apple/metal/runtime/metal_backend.cpp | 12 ++++++------ backends/apple/metal/runtime/shims/et_metal_ops.h | 8 ++++---- backends/apple/metal/runtime/shims/et_metal_ops.mm | 8 ++++---- backends/apple/metal/runtime/shims/types.h | 4 ++-- .../cadence/generic/operators/cadence_type_util.h | 4 ++-- backends/cuda/CMakeLists.txt | 2 +- backends/cuda/runtime/shims/tensor_attribute.h | 2 +- examples/arm/zephyr/CMakeLists.txt | 2 +- examples/openvino/stable_diffusion/export_lcm.py | 6 +++--- 11 files changed, 26 insertions(+), 26 deletions(-) diff --git a/backends/aoti/README.md b/backends/aoti/README.md index 74b45a35e5d..7c38b125075 100644 --- a/backends/aoti/README.md +++ b/backends/aoti/README.md @@ -1,6 +1,6 @@ # AOTI Common Library -This directory contains **common library components** for AOTI (Ahead-of-Time Inference) driven backends in ExecutorTorch, **not a standalone backend**. +This directory contains **common library components** for AOTI (Ahead-of-Time Inference) driven backends in ExecuTorch, **not a standalone backend**. ## Purpose diff --git a/backends/apple/metal/CMakeLists.txt b/backends/apple/metal/CMakeLists.txt index 587098f8609..713f743806e 100644 --- a/backends/apple/metal/CMakeLists.txt +++ b/backends/apple/metal/CMakeLists.txt @@ -49,7 +49,7 @@ add_library(metal_backend STATIC ${_aoti_metal_sources}) target_include_directories( metal_backend PUBLIC $ $ - # PyTorch AOTI headers from ExecutorTorch's torch detection + # PyTorch AOTI headers from ExecuTorch's torch detection ${TORCH_INCLUDE_DIRS} ) diff --git a/backends/apple/metal/runtime/metal_backend.cpp b/backends/apple/metal/runtime/metal_backend.cpp index dfa148fd437..b395145675a 100644 --- a/backends/apple/metal/runtime/metal_backend.cpp +++ b/backends/apple/metal/runtime/metal_backend.cpp @@ -418,7 +418,7 @@ class ET_EXPERIMENTAL MetalBackend final int32_t mps_device_type = aoti_torch_device_type_mps(); // Returns 13 - // NOTE: ExecutorTorch tensors are always on CPU/host memory + // NOTE: ExecuTorch tensors are always on CPU/host memory // We need to create GPU copies for Metal kernel execution std::vector gpu_inputs( n_inputs); // GPU copies for kernel execution @@ -427,14 +427,14 @@ class ET_EXPERIMENTAL MetalBackend final ET_LOG(Debug, "MetalBackend input/output vectors generated"); - // Process input tensors: ExecutorTorch provides CPU tensors, create GPU + // Process input tensors: ExecuTorch provides CPU tensors, create GPU // copies for (int i = 0; i < n_inputs; i++) { ET_LOG(Debug, "Processing input %d from args to inputs vector", i); ET_LOG( Debug, "is %d input a tensor input? %d", i, int(args[i]->isTensor())); - // Get tensor dimensions and properties from ExecutorTorch CPU tensor + // Get tensor dimensions and properties from ExecuTorch CPU tensor auto cpu_tensor = &(args[i]->toTensor()); auto sizes = cpu_tensor->sizes(); auto scalar_type = cpu_tensor->scalar_type(); @@ -509,10 +509,10 @@ class ET_EXPERIMENTAL MetalBackend final ET_LOG(Debug, "MetalBackend GPU inputs generated"); - // Process output tensors: create GPU counterparts for ExecutorTorch CPU + // Process output tensors: create GPU counterparts for ExecuTorch CPU // tensors for (int i = 0; i < n_outputs; i++) { - // Get output tensor dimensions from ExecutorTorch CPU tensor + // Get output tensor dimensions from ExecuTorch CPU tensor auto cpu_output_tensor = &(args[i + n_inputs]->toTensor()); auto sizes = cpu_output_tensor->sizes(); auto scalar_type = cpu_output_tensor->scalar_type(); @@ -618,7 +618,7 @@ class ET_EXPERIMENTAL MetalBackend final ET_LOG(Debug, "Copied GPU output %d back to CPU", i); } - // Clean up GPU tensors that we created (ExecutorTorch tensors are always + // Clean up GPU tensors that we created (ExecuTorch tensors are always // CPU, so all GPU tensors are our copies) for (int i = 0; i < n_inputs; i++) { // All GPU input tensors were created by us, delete them diff --git a/backends/apple/metal/runtime/shims/et_metal_ops.h b/backends/apple/metal/runtime/shims/et_metal_ops.h index fcc6dfc03da..25c75f49b60 100644 --- a/backends/apple/metal/runtime/shims/et_metal_ops.h +++ b/backends/apple/metal/runtime/shims/et_metal_ops.h @@ -19,7 +19,7 @@ extern "C" { #endif /** - * ExecutorTorch implementation of aoti_torch_mps_mm_out. + * ExecuTorch implementation of aoti_torch_mps_mm_out. * Performs simple matrix multiplication: out = self @ mat2 */ AOTITorchError aoti_torch_mps_mm_out( @@ -28,7 +28,7 @@ AOTITorchError aoti_torch_mps_mm_out( AOTITensorHandle mat2); /** - * ExecutorTorch implementation of aoti_torch_mps_bmm_out. + * ExecuTorch implementation of aoti_torch_mps_bmm_out. * Performs batched matrix multiplication: out = self @ mat2 * All tensors must be 3-D with matching batch dimensions. */ @@ -38,7 +38,7 @@ AOTITorchError aoti_torch_mps_bmm_out( AOTITensorHandle mat2); /** - * ExecutorTorch implementation of aoti_torch_mps_convolution. + * ExecuTorch implementation of aoti_torch_mps_convolution. * Performs 2D convolution operation - matches PyTorch AOTI signature */ AOTITorchError aoti_torch_mps_convolution( @@ -58,7 +58,7 @@ AOTITorchError aoti_torch_mps_convolution( AOTITensorHandle* ret0); /** - * ExecutorTorch implementation of + * ExecuTorch implementation of * aoti_torch_mps__scaled_dot_product_attention_math_for_mps. Performs scaled * dot product attention calculation - matches PyTorch AOTI signature */ diff --git a/backends/apple/metal/runtime/shims/et_metal_ops.mm b/backends/apple/metal/runtime/shims/et_metal_ops.mm index 21989fa5665..a061bd46b23 100644 --- a/backends/apple/metal/runtime/shims/et_metal_ops.mm +++ b/backends/apple/metal/runtime/shims/et_metal_ops.mm @@ -366,7 +366,7 @@ AOTITorchError aoti_torch_mps_mm_out( @autoreleasepool { try { - // Convert AOTITensorHandle to ExecutorTorch tensors + // Convert AOTITensorHandle to ExecuTorch tensors auto out_tensor = reinterpret_cast(out); auto self_tensor = reinterpret_cast(self); auto mat2_tensor = reinterpret_cast(mat2); @@ -639,7 +639,7 @@ AOTITorchError aoti_torch_mps_bmm_out( @autoreleasepool { try { - // Convert AOTITensorHandle to ExecutorTorch tensors + // Convert AOTITensorHandle to ExecuTorch tensors auto out_tensor = reinterpret_cast(out); auto self_tensor = reinterpret_cast(self); auto mat2_tensor = reinterpret_cast(mat2); @@ -942,7 +942,7 @@ AOTITorchError aoti_torch_mps_convolution( @autoreleasepool { try { - // Convert AOTITensorHandle to ExecutorTorch tensors + // Convert AOTITensorHandle to ExecuTorch tensors auto input_tensor = reinterpret_cast(input); auto weight_tensor = reinterpret_cast(weight); @@ -1479,7 +1479,7 @@ AOTITorchError aoti_torch_mps__scaled_dot_product_attention_math_for_mps( try { @autoreleasepool { - // Convert AOTITensorHandle to ExecutorTorch tensors + // Convert AOTITensorHandle to ExecuTorch tensors auto* query_tensor = reinterpret_cast(query); auto* key_tensor = reinterpret_cast(key); auto* value_tensor = reinterpret_cast(value); diff --git a/backends/apple/metal/runtime/shims/types.h b/backends/apple/metal/runtime/shims/types.h index 07d377d7499..da368d3533f 100644 --- a/backends/apple/metal/runtime/shims/types.h +++ b/backends/apple/metal/runtime/shims/types.h @@ -16,14 +16,14 @@ namespace executorch { namespace backends { namespace metal { -// Common using declarations for ExecutorTorch types +// Common using declarations for ExecuTorch types using executorch::runtime::Error; using executorch::runtime::etensor::Tensor; extern "C" { // Common AOTI type aliases -// Note: AOTITensorHandle is aliased to Tensor* for ExecutorTorch compatibility +// Note: AOTITensorHandle is aliased to Tensor* for ExecuTorch compatibility using AOTITensorHandle = Tensor*; using AOTIRuntimeError = Error; using AOTITorchError = Error; diff --git a/backends/cadence/generic/operators/cadence_type_util.h b/backends/cadence/generic/operators/cadence_type_util.h index 43852277031..d80d816c550 100644 --- a/backends/cadence/generic/operators/cadence_type_util.h +++ b/backends/cadence/generic/operators/cadence_type_util.h @@ -25,7 +25,7 @@ * * Where MACRO is defined as: #define MACRO(ctype, name) ... * - ctype: C++ type (uint8_t or int8_t) - * - name: ExecutorTorch ScalarType name suffix (Byte or Char) + * - name: ExecuTorch ScalarType name suffix (Byte or Char) * * Example: * #define HANDLE_TYPE(ctype, name) \ @@ -52,7 +52,7 @@ * * Where MACRO is defined as: #define MACRO(ctype, name) ... * - ctype: C++ type (uint8_t, int8_t, or int16_t) - * - name: ExecutorTorch ScalarType name suffix (Byte, Char, or Short) + * - name: ExecuTorch ScalarType name suffix (Byte, Char, or Short) * * This macro includes int16_t support for operators that can handle 16-bit * quantized values (e.g., quantized_linear, quantized_fully_connected). diff --git a/backends/cuda/CMakeLists.txt b/backends/cuda/CMakeLists.txt index b5d8b0748a1..df4fc219fc5 100644 --- a/backends/cuda/CMakeLists.txt +++ b/backends/cuda/CMakeLists.txt @@ -33,7 +33,7 @@ set(CUDA_USE_STATIC_CUDA_RUNTIME OFF) find_package(CUDAToolkit REQUIRED) -# Use ExecutorTorch's standard way to find PyTorch libraries for AOTI +# Use ExecuTorch's standard way to find PyTorch libraries for AOTI include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake) find_package_torch() diff --git a/backends/cuda/runtime/shims/tensor_attribute.h b/backends/cuda/runtime/shims/tensor_attribute.h index 683f270ccda..efa15ca072a 100644 --- a/backends/cuda/runtime/shims/tensor_attribute.h +++ b/backends/cuda/runtime/shims/tensor_attribute.h @@ -15,7 +15,7 @@ namespace executorch::backends::cuda { -// Common using declarations for ExecutorTorch types +// Common using declarations for ExecuTorch types using executorch::runtime::Error; using executorch::runtime::etensor::Tensor; diff --git a/examples/arm/zephyr/CMakeLists.txt b/examples/arm/zephyr/CMakeLists.txt index 8255e6db83e..dfc97c32600 100644 --- a/examples/arm/zephyr/CMakeLists.txt +++ b/examples/arm/zephyr/CMakeLists.txt @@ -117,7 +117,7 @@ if(NOT DEFINED EXECUTORCH_DIR) else() message( FATAL_ERROR - "ExecutorTorch module not found. Ensure it's properly configured in your Zephyr workspace." + "ExecuTorch module not found. Ensure it's properly configured in your Zephyr workspace." ) endif() else() diff --git a/examples/openvino/stable_diffusion/export_lcm.py b/examples/openvino/stable_diffusion/export_lcm.py index 3917b1abf6d..d0d678c0b75 100644 --- a/examples/openvino/stable_diffusion/export_lcm.py +++ b/examples/openvino/stable_diffusion/export_lcm.py @@ -62,7 +62,7 @@ def export_text_encoder(self, output_path: str, device: str = "CPU") -> bool: exported_program, partitioner=[partitioner] ) - # Convert to ExecutorTorch program + # Convert to ExecuTorch program executorch_program = edge_manager.to_executorch( config=ExecutorchBackendConfig() ) @@ -102,7 +102,7 @@ def export_unet(self, output_path: str, device: str = "CPU") -> bool: exported_program, partitioner=[partitioner] ) - # Convert to ExecutorTorch program + # Convert to ExecuTorch program executorch_program = edge_manager.to_executorch( config=ExecutorchBackendConfig() ) @@ -142,7 +142,7 @@ def export_vae_decoder(self, output_path: str, device: str = "CPU") -> bool: exported_program, partitioner=[partitioner] ) - # Convert to ExecutorTorch program + # Convert to ExecuTorch program executorch_program = edge_manager.to_executorch( config=ExecutorchBackendConfig() )