Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/aoti/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# AOTI Common Library

This directory contains **common library components** for AOTI (Ahead-of-Time Inference) driven backends in ExecutorTorch, **not a standalone backend**.
This directory contains **common library components** for AOTI (Ahead-of-Time Inference) driven backends in ExecuTorch, **not a standalone backend**.

## Purpose

Expand Down
2 changes: 1 addition & 1 deletion backends/apple/metal/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ add_library(metal_backend STATIC ${_aoti_metal_sources})
target_include_directories(
metal_backend
PUBLIC $<BUILD_INTERFACE:${EXECUTORCH_ROOT}> $<INSTALL_INTERFACE:include>
# PyTorch AOTI headers from ExecutorTorch's torch detection
# PyTorch AOTI headers from ExecuTorch's torch detection
${TORCH_INCLUDE_DIRS}
)

Expand Down
12 changes: 6 additions & 6 deletions backends/apple/metal/runtime/metal_backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ class ET_EXPERIMENTAL MetalBackend final

int32_t mps_device_type = aoti_torch_device_type_mps(); // Returns 13

// NOTE: ExecutorTorch tensors are always on CPU/host memory
// NOTE: ExecuTorch tensors are always on CPU/host memory
// We need to create GPU copies for Metal kernel execution
std::vector<AOTITensorHandle> gpu_inputs(
n_inputs); // GPU copies for kernel execution
Expand All @@ -427,14 +427,14 @@ class ET_EXPERIMENTAL MetalBackend final

ET_LOG(Debug, "MetalBackend input/output vectors generated");

// Process input tensors: ExecutorTorch provides CPU tensors, create GPU
// Process input tensors: ExecuTorch provides CPU tensors, create GPU
// copies
for (int i = 0; i < n_inputs; i++) {
ET_LOG(Debug, "Processing input %d from args to inputs vector", i);
ET_LOG(
Debug, "is %d input a tensor input? %d", i, int(args[i]->isTensor()));

// Get tensor dimensions and properties from ExecutorTorch CPU tensor
// Get tensor dimensions and properties from ExecuTorch CPU tensor
auto cpu_tensor = &(args[i]->toTensor());
auto sizes = cpu_tensor->sizes();
auto scalar_type = cpu_tensor->scalar_type();
Expand Down Expand Up @@ -509,10 +509,10 @@ class ET_EXPERIMENTAL MetalBackend final

ET_LOG(Debug, "MetalBackend GPU inputs generated");

// Process output tensors: create GPU counterparts for ExecutorTorch CPU
// Process output tensors: create GPU counterparts for ExecuTorch CPU
// tensors
for (int i = 0; i < n_outputs; i++) {
// Get output tensor dimensions from ExecutorTorch CPU tensor
// Get output tensor dimensions from ExecuTorch CPU tensor
auto cpu_output_tensor = &(args[i + n_inputs]->toTensor());
auto sizes = cpu_output_tensor->sizes();
auto scalar_type = cpu_output_tensor->scalar_type();
Expand Down Expand Up @@ -618,7 +618,7 @@ class ET_EXPERIMENTAL MetalBackend final
ET_LOG(Debug, "Copied GPU output %d back to CPU", i);
}

// Clean up GPU tensors that we created (ExecutorTorch tensors are always
// Clean up GPU tensors that we created (ExecuTorch tensors are always
// CPU, so all GPU tensors are our copies)
for (int i = 0; i < n_inputs; i++) {
// All GPU input tensors were created by us, delete them
Expand Down
8 changes: 4 additions & 4 deletions backends/apple/metal/runtime/shims/et_metal_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ extern "C" {
#endif

/**
* ExecutorTorch implementation of aoti_torch_mps_mm_out.
* ExecuTorch implementation of aoti_torch_mps_mm_out.
* Performs simple matrix multiplication: out = self @ mat2
*/
AOTITorchError aoti_torch_mps_mm_out(
Expand All @@ -28,7 +28,7 @@ AOTITorchError aoti_torch_mps_mm_out(
AOTITensorHandle mat2);

/**
* ExecutorTorch implementation of aoti_torch_mps_bmm_out.
* ExecuTorch implementation of aoti_torch_mps_bmm_out.
* Performs batched matrix multiplication: out = self @ mat2
* All tensors must be 3-D with matching batch dimensions.
*/
Expand All @@ -38,7 +38,7 @@ AOTITorchError aoti_torch_mps_bmm_out(
AOTITensorHandle mat2);

/**
* ExecutorTorch implementation of aoti_torch_mps_convolution.
* ExecuTorch implementation of aoti_torch_mps_convolution.
* Performs 2D convolution operation - matches PyTorch AOTI signature
*/
AOTITorchError aoti_torch_mps_convolution(
Expand All @@ -58,7 +58,7 @@ AOTITorchError aoti_torch_mps_convolution(
AOTITensorHandle* ret0);

/**
* ExecutorTorch implementation of
* ExecuTorch implementation of
* aoti_torch_mps__scaled_dot_product_attention_math_for_mps. Performs scaled
* dot product attention calculation - matches PyTorch AOTI signature
*/
Expand Down
8 changes: 4 additions & 4 deletions backends/apple/metal/runtime/shims/et_metal_ops.mm
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ AOTITorchError aoti_torch_mps_mm_out(

@autoreleasepool {
try {
// Convert AOTITensorHandle to ExecutorTorch tensors
// Convert AOTITensorHandle to ExecuTorch tensors
auto out_tensor = reinterpret_cast<Tensor*>(out);
auto self_tensor = reinterpret_cast<Tensor*>(self);
auto mat2_tensor = reinterpret_cast<Tensor*>(mat2);
Expand Down Expand Up @@ -639,7 +639,7 @@ AOTITorchError aoti_torch_mps_bmm_out(

@autoreleasepool {
try {
// Convert AOTITensorHandle to ExecutorTorch tensors
// Convert AOTITensorHandle to ExecuTorch tensors
auto out_tensor = reinterpret_cast<Tensor*>(out);
auto self_tensor = reinterpret_cast<Tensor*>(self);
auto mat2_tensor = reinterpret_cast<Tensor*>(mat2);
Expand Down Expand Up @@ -942,7 +942,7 @@ AOTITorchError aoti_torch_mps_convolution(

@autoreleasepool {
try {
// Convert AOTITensorHandle to ExecutorTorch tensors
// Convert AOTITensorHandle to ExecuTorch tensors
auto input_tensor = reinterpret_cast<Tensor*>(input);
auto weight_tensor = reinterpret_cast<Tensor*>(weight);

Expand Down Expand Up @@ -1479,7 +1479,7 @@ AOTITorchError aoti_torch_mps__scaled_dot_product_attention_math_for_mps(

try {
@autoreleasepool {
// Convert AOTITensorHandle to ExecutorTorch tensors
// Convert AOTITensorHandle to ExecuTorch tensors
auto* query_tensor = reinterpret_cast<Tensor*>(query);
auto* key_tensor = reinterpret_cast<Tensor*>(key);
auto* value_tensor = reinterpret_cast<Tensor*>(value);
Expand Down
4 changes: 2 additions & 2 deletions backends/apple/metal/runtime/shims/types.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ namespace executorch {
namespace backends {
namespace metal {

// Common using declarations for ExecutorTorch types
// Common using declarations for ExecuTorch types
using executorch::runtime::Error;
using executorch::runtime::etensor::Tensor;

extern "C" {

// Common AOTI type aliases
// Note: AOTITensorHandle is aliased to Tensor* for ExecutorTorch compatibility
// Note: AOTITensorHandle is aliased to Tensor* for ExecuTorch compatibility
using AOTITensorHandle = Tensor*;
using AOTIRuntimeError = Error;
using AOTITorchError = Error;
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/generic/operators/cadence_type_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
*
* Where MACRO is defined as: #define MACRO(ctype, name) ...
* - ctype: C++ type (uint8_t or int8_t)
* - name: ExecutorTorch ScalarType name suffix (Byte or Char)
* - name: ExecuTorch ScalarType name suffix (Byte or Char)
*
* Example:
* #define HANDLE_TYPE(ctype, name) \
Expand All @@ -52,7 +52,7 @@
*
* Where MACRO is defined as: #define MACRO(ctype, name) ...
* - ctype: C++ type (uint8_t, int8_t, or int16_t)
* - name: ExecutorTorch ScalarType name suffix (Byte, Char, or Short)
* - name: ExecuTorch ScalarType name suffix (Byte, Char, or Short)
*
* This macro includes int16_t support for operators that can handle 16-bit
* quantized values (e.g., quantized_linear, quantized_fully_connected).
Expand Down
2 changes: 1 addition & 1 deletion backends/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ set(CUDA_USE_STATIC_CUDA_RUNTIME OFF)

find_package(CUDAToolkit REQUIRED)

# Use ExecutorTorch's standard way to find PyTorch libraries for AOTI
# Use ExecuTorch's standard way to find PyTorch libraries for AOTI
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)
find_package_torch()

Expand Down
2 changes: 1 addition & 1 deletion backends/cuda/runtime/shims/tensor_attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

namespace executorch::backends::cuda {

// Common using declarations for ExecutorTorch types
// Common using declarations for ExecuTorch types
using executorch::runtime::Error;
using executorch::runtime::etensor::Tensor;

Expand Down
2 changes: 1 addition & 1 deletion examples/arm/zephyr/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ if(NOT DEFINED EXECUTORCH_DIR)
else()
message(
FATAL_ERROR
"ExecutorTorch module not found. Ensure it's properly configured in your Zephyr workspace."
"ExecuTorch module not found. Ensure it's properly configured in your Zephyr workspace."
)
endif()
else()
Expand Down
6 changes: 3 additions & 3 deletions examples/openvino/stable_diffusion/export_lcm.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def export_text_encoder(self, output_path: str, device: str = "CPU") -> bool:
exported_program, partitioner=[partitioner]
)

# Convert to ExecutorTorch program
# Convert to ExecuTorch program
executorch_program = edge_manager.to_executorch(
config=ExecutorchBackendConfig()
)
Expand Down Expand Up @@ -102,7 +102,7 @@ def export_unet(self, output_path: str, device: str = "CPU") -> bool:
exported_program, partitioner=[partitioner]
)

# Convert to ExecutorTorch program
# Convert to ExecuTorch program
executorch_program = edge_manager.to_executorch(
config=ExecutorchBackendConfig()
)
Expand Down Expand Up @@ -142,7 +142,7 @@ def export_vae_decoder(self, output_path: str, device: str = "CPU") -> bool:
exported_program, partitioner=[partitioner]
)

# Convert to ExecutorTorch program
# Convert to ExecuTorch program
executorch_program = edge_manager.to_executorch(
config=ExecutorchBackendConfig()
)
Expand Down
Loading