Skip to content

Commit 4d46f0a

Browse files
committed
Update base for Update on "Remove usages of 'to_edge_with_preserve_ops'"
Replace with to_edge. Differential Revision: [D78311705](https://our.internmc.facebook.com/intern/diff/D78311705/) [ghstack-poisoned]
2 parents d92fa11 + 46ca782 commit 4d46f0a

File tree

131 files changed

+10013
-1297
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

131 files changed

+10013
-1297
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
7cda4017ddda554752e89069ae205be5e8388f59
1+
90f1e7bed15ca5e48c61c5b6dc5ad4810524f82f

.ci/scripts/unittest-buck2.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ set -eux
1010
# TODO: can't query cadence & vulkan backends
1111
# TODO: can't query //kernels/prim_ops because of non-buckified stuff in OSS.
1212
buck2 query "//backends/apple/... + //backends/example/... + \
13-
//backends/mediatek/... + //backends/test/... + //backends/transforms/... + \
13+
//backends/mediatek/... + //backends/transforms/... + \
1414
//backends/xnnpack/... + //configurations/... + //kernels/aten/... + \
1515
//kernels/optimized/... + //kernels/portable/... + //kernels/quantized/... + \
1616
//kernels/test/... + //runtime/... + //schema/... + //test/... + //util/..."

.github/workflows/android-release-artifacts.yml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,13 @@ jobs:
4747
name: build-aar
4848
needs: check-if-aar-exists
4949
if: ${{ !github.event.pull_request.head.repo.fork }}
50-
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@release/2.7
50+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
5151
secrets: inherit
5252
permissions:
5353
id-token: write
5454
contents: read
5555
with:
5656
secrets-env: EXECUTORCH_MAVEN_SIGNING_KEYID EXECUTORCH_MAVEN_SIGNING_PASSWORD EXECUTORCH_MAVEN_CENTRAL_PASSWORD EXECUTORCH_MAVEN_CENTRAL_USERNAME EXECUTORCH_MAVEN_SIGNING_GPG_KEY_CONTENTS
57-
# As this job has access to Maven credential, run this on a fresh ephemeral runner
58-
runner: ephemeral.linux.2xlarge
5957
docker-image: executorch-ubuntu-22.04-clang12-android
6058
submodules: 'recursive'
6159
ref: ${{ github.sha }}

.github/workflows/trunk.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ jobs:
197197
docker-image: executorch-ubuntu-22.04-arm-sdk
198198
submodules: 'recursive'
199199
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
200-
timeout: 90
200+
timeout: 120
201201
script: |
202202
# The generic Linux job chooses to use base env, not the one setup by the image
203203
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")

backends/arm/quantizer/arm_quantizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060

6161
@functools.lru_cache
6262
def get_symmetric_quantization_config(
63-
is_per_channel: bool = False,
63+
is_per_channel: bool = True,
6464
is_qat: bool = False,
6565
is_dynamic: bool = False,
6666
act_qmin: int = -128,

backends/arm/scripts/mlsdk_utils.sh

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
#!/usr/bin/env bash
2+
# Copyright 2025 Arm Limited and/or its affiliates.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
set -euo pipefail
8+
9+
# TODO
10+
mlsdk_manifest_url=""
11+
12+
script_dir=$(cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd)
13+
14+
source ${script_dir}/utils.sh
15+
16+
usage() { echo "Usage: $0 [-u <mlsdk-manifest-url>]" 1>&2; exit 1; }
17+
18+
while getopts ":u:" opt; do
19+
case "${opt}" in
20+
u)
21+
mlsdk_manifest_url=${OPTARG}
22+
;;
23+
*)
24+
usage
25+
;;
26+
esac
27+
done
28+
29+
function download_ai_mlsdk_manifest() {
30+
local _dada_dir="$1"
31+
32+
if [[ -z "${_dada_dir}" ]]; then
33+
echo "Error: _dada_dir parameter missing?"
34+
return 1
35+
fi
36+
37+
if [[ -z "${mlsdk_manifest_url}" ]]; then
38+
echo "Error: mlsdk_manifest_url parameter missing?"
39+
return 1
40+
fi
41+
42+
if [[ ! -d "${_dada_dir}" ]]; then
43+
mkdir -p "$_dada_dir"
44+
pushd "$_dada_dir" || exit 1
45+
46+
curl https://storage.googleapis.com/git-repo-downloads/repo > repo
47+
chmod u+x repo
48+
./repo init --no-repo-verify --depth=1 --manifest-url ${mlsdk_manifest_url} -g model-converter,emulation-layer,vgf-library
49+
./repo sync
50+
51+
popd
52+
fi
53+
}
54+
55+
function setup_model_converter() {
56+
local work_dir="$1"
57+
local manifest_dir="$2"
58+
local enable_vgf_lib="$3"
59+
local enable_emulation_layer="$4"
60+
61+
if [[ -z "$work_dir" ]]; then
62+
echo "Error: work_dir parameter is required."
63+
return 1
64+
fi
65+
66+
if [[ -z "$manifest_dir" ]]; then
67+
echo "Error: manifest_dir parameter is required."
68+
return 1
69+
fi
70+
71+
mkdir -p "$work_dir"
72+
pushd "$work_dir" || exit 1
73+
74+
download_ai_mlsdk_manifest ${manifest_dir}
75+
76+
pushd "$manifest_dir"
77+
78+
# model-converter
79+
# TODO: Remove macOS patch after mlsdk fully supports macOS
80+
if [[ "$(uname)" == "Darwin" ]]; then
81+
sed -i '' '/^ *print(f"Unsupported host platform/ i\
82+
if system == "Darwin":\
83+
# Use default Apple toolchain (Clang) on macOS\
84+
return True\
85+
\
86+
' sw/model-converter/scripts/build.py
87+
fi
88+
python sw/model-converter/scripts/build.py -j$(nproc)
89+
90+
# libvgf
91+
if [[ "${enable_vgf_lib}" -eq 1 ]]; then
92+
# TODO: Remove macOS patch after mlsdk fully supports macOS
93+
if [[ "$(uname)" == "Darwin" ]]; then
94+
sed -i '' '/^ *print(f"ERROR: Unsupported host platform/ i\
95+
if system == "Darwin":\
96+
# Use default Apple toolchain (Clang) on macOS\
97+
return True\
98+
\
99+
' sw/vgf-lib/scripts/build.py
100+
fi
101+
python sw/vgf-lib/scripts/build.py -j$(nproc)
102+
fi
103+
104+
# emu layer
105+
if [[ "${enable_emulation_layer}" -eq 1 ]]; then
106+
pushd sw/emulation-layer
107+
cmake -B build \
108+
-DGLSLANG_PATH=../../dependencies/glslang \
109+
-DSPIRV_CROSS_PATH=../../dependencies/SPIRV-Cross \
110+
-DSPIRV_HEADERS_PATH=../../dependencies/SPIRV-Headers \
111+
-DSPIRV_TOOLS_PATH=../../dependencies/SPIRV-Tools \
112+
-DVULKAN_HEADERS_PATH=../../dependencies/Vulkan-Headers
113+
cmake --build build
114+
popd
115+
fi
116+
117+
popd
118+
}
119+
120+
#setup_model_converter() $1
121+
# `"$manifest_dir"'

backends/arm/test/common.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
arm_executor_runner_exists,
1919
corstone300_installed,
2020
corstone320_installed,
21+
model_converter_installed,
2122
)
2223
from executorch.backends.arm.tosa_specification import TosaSpecification
2324
from executorch.exir.backend.compile_spec_schema import CompileSpec
@@ -245,6 +246,13 @@ def get_u85_compile_spec_unbuilt(
245246
)
246247
"""Xfails a test if Corsone320 FVP is not installed, or if the executor runner is not built"""
247248

249+
SkipIfNoModelConverter = pytest.mark.skipif(
250+
condition=not (model_converter_installed()),
251+
raises=FileNotFoundError,
252+
reason="Did not find model-converter on path",
253+
)
254+
"""Xfails a test if model-converter is not installed"""
255+
248256
xfail_type = str | tuple[str, type[Exception]]
249257

250258

backends/arm/test/misc/test_bn_relu_folding_qat.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,9 @@ def test_qat_tosa_BI(model: torch.nn.Module):
5959
"quantize",
6060
Quantize(
6161
quantizer=quantizer,
62-
quantization_config=get_symmetric_quantization_config(is_qat=True),
62+
quantization_config=get_symmetric_quantization_config(
63+
is_qat=True, is_per_channel=False
64+
),
6365
is_qat=True,
6466
),
6567
)

backends/arm/test/ops/test_add.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@
77

88
from typing import Tuple
99

10-
import pytest
11-
1210
import torch
1311
from executorch.backends.arm.arm_backend import get_tosa_spec
1412
from executorch.backends.arm.quantizer import arm_quantizer
@@ -190,7 +188,7 @@ def test_add_tensor_u85_BI_2(test_data: input_t2):
190188

191189

192190
@common.parametrize("test_data", Add.test_data)
193-
@pytest.mark.skip(reason="Model converter not yet made available")
191+
@common.SkipIfNoModelConverter
194192
def test_add_tensor_vgf_fp(test_data: input_t1):
195193
pipeline = VgfPipeline[input_t1](
196194
Add(), test_data(), aten_op, exir_op, tosa_version="TOSA-1.0+FP"
@@ -199,7 +197,7 @@ def test_add_tensor_vgf_fp(test_data: input_t1):
199197

200198

201199
@common.parametrize("test_data", Add.test_data)
202-
@pytest.mark.skip(reason="Model converter not yet made available")
200+
@common.SkipIfNoModelConverter
203201
def test_add_tensor_vgf_int(test_data: input_t1):
204202
pipeline = VgfPipeline[input_t1](
205203
Add(),

backends/arm/test/ops/test_multihead_attention.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,14 @@ def test_multihead_attention_tosa_MI(test_data: input_t1):
5353
)
5454
def test_multihead_attention_tosa_BI(test_data):
5555
test_data, module = test_data()
56-
pipeline = TosaPipelineBI(module, (*test_data, *test_data, *test_data), [], [])
56+
pipeline = TosaPipelineBI(
57+
module,
58+
(*test_data, *test_data, *test_data),
59+
[],
60+
[],
61+
# TODO: Per-channel quantization is broken (MLETORCH-1144)
62+
per_channel_quantization=False,
63+
)
5764
pipeline.run()
5865

5966

@@ -72,6 +79,8 @@ def test_multihead_attention_u55_BI(test_data: input_t1):
7279
[],
7380
use_to_edge_transform_and_lower=True,
7481
run_on_fvp=True,
82+
# TODO: Per-channel quantization is broken (MLETORCH-1144)
83+
per_channel_quantization=False,
7584
)
7685
pipeline.pop_stage("check_count.exir")
7786
pipeline.run()
@@ -92,5 +101,7 @@ def test_multihead_attention_u85_BI(test_data: input_t1):
92101
[],
93102
use_to_edge_transform_and_lower=True,
94103
run_on_fvp=True,
104+
# TODO: Per-channel quantization is broken (MLETORCH-1144)
105+
per_channel_quantization=False,
95106
)
96107
pipeline.run()

0 commit comments

Comments
 (0)