Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions backends/apple/metal/runtime/shims/et_metal_ops.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#pragma once

#include <executorch/backends/apple/metal/runtime/shims/types.h>

namespace executorch {
namespace backends {
namespace metal {

#ifdef __cplusplus
extern "C" {
#endif

/**
* ExecutorTorch implementation of aoti_torch_mps_mm_out.
* Performs simple matrix multiplication: out = self @ mat2
*/
AOTITorchError aoti_torch_mps_mm_out(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does custom ops use caching mechanism like the ETMetalShaderLibrary?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, not yet. These fallback ops are implemented using MPSGraph, so, here we would be caching the graph. This is something I want to look into later when optimizing performance. But this deserves time. In particular, since I never understood why MPSGraph operations have a non-trivial CPU overhead in PyTorch, in spite of PyTorch having a caching mechanism for MPSGraphs.

AOTITensorHandle out,
AOTITensorHandle self,
AOTITensorHandle mat2);

/**
* ExecutorTorch implementation of aoti_torch_mps_convolution.
* Performs 2D convolution operation - matches PyTorch AOTI signature
*/
AOTITorchError aoti_torch_mps_convolution(
AOTITensorHandle input,
AOTITensorHandle weight,
AOTITensorHandle* bias,
const int64_t* stride,
int64_t stride_len_,
const int64_t* padding,
int64_t padding_len_,
const int64_t* dilation,
int64_t dilation_len_,
int32_t transposed,
const int64_t* output_padding,
int64_t output_padding_len_,
int64_t groups,
AOTITensorHandle* ret0);

/**
* ExecutorTorch implementation of
* aoti_torch_mps__scaled_dot_product_attention_math_for_mps. Performs scaled
* dot product attention calculation - matches PyTorch AOTI signature
*/
AOTITorchError aoti_torch_mps__scaled_dot_product_attention_math_for_mps(
AOTITensorHandle query,
AOTITensorHandle key,
AOTITensorHandle value,
AOTITensorHandle* attn_mask,
double dropout_p,
int32_t is_causal,
AOTITensorHandle* dropout_mask,
double* scale,
AOTITensorHandle* ret0,
AOTITensorHandle* ret1);

#ifdef __cplusplus
} // extern "C"
#endif

} // namespace metal
} // namespace backends
} // namespace executorch
Loading
Loading