Skip to content

Commit ef17021

Browse files
srinathavaSrinath Avadhanula
and
Srinath Avadhanula
authored
Green commit tracker (#7)
`bazel test //test/...` passes with these commits/changes. --------- Co-authored-by: Srinath Avadhanula <srinath.avadhanula@getcruise.com>
1 parent 9350e60 commit ef17021

File tree

10 files changed

+98
-52
lines changed

10 files changed

+98
-52
lines changed

.github/workflows/bazelBuildAndTestTcp.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,11 @@ jobs:
5656
find . -type f -name "*.cpp" -o -name "*.h" | xargs clang-format -i
5757
if [ -n "$(git status --porcelain)" ]; then
5858
echo "Please run 'find . -type f -name "*.cpp" -o -name "*.h" | xargs clang-format -i' and commit changes."
59+
echo "git reports the following changes: "
60+
echo "$(git status --porcelain)"
61+
echo "$(git diff -u)"
62+
docker run --rm mlir-tcp:ci clang-format --version
63+
docker run --rm mlir-tcp:ci uname -a
5964
exit 1
6065
fi
6166

deps.bzl

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ def third_party_deps():
2222
path = local_llvm_repo_path(),
2323
)
2424
else:
25-
LLVM_COMMIT = "5d6d982df61d16b6d498e6d59dd91c059679d3d8"
26-
LLVM_SHA256 = "834184126812eecbdb2ed30de255554a6529295afaf44e9dfd3851d61195dbb5"
25+
LLVM_COMMIT = "72144d119a7291f8b6b8e022a2947fbe31e66afc"
26+
LLVM_SHA256 = "2caacb6925a13cb5886a5d7f225fa408b80ca8e1efe0736186954b2abc4ee1c3"
2727
http_archive(
2828
name = "llvm-raw",
2929
build_file_content = "# empty",
@@ -39,14 +39,17 @@ def third_party_deps():
3939
path = local_torch_mlir_repo_path(),
4040
)
4141
else:
42-
TORCH_MLIR_COMMIT = "169032010793ee7fe3e305ab920e4119fdfc3b11"
43-
TORCH_MLIR_SHA256 = "0f25459b0d6828983c8aa78d139adad4325508bff150b57e97345e9798377dd3"
42+
TORCH_MLIR_COMMIT = "9f2ba5abaa85cefd95cc85579fafd0c53c1101e8"
43+
TORCH_MLIR_SHA256 = "09444281839eeae4aff42c029d87b1728f307fa26511b896ff448d51aaa98049"
4444
http_archive(
4545
name = "torch-mlir-raw",
4646
build_file_content = "# empty",
4747
sha256 = TORCH_MLIR_SHA256,
4848
strip_prefix = "torch-mlir-" + TORCH_MLIR_COMMIT,
4949
urls = ["https://github.com/llvm/torch-mlir/archive/{commit}.tar.gz".format(commit = TORCH_MLIR_COMMIT)],
50+
patches = [
51+
"//third_party/patches:torch-mlir.1.patch",
52+
],
5053
)
5154

5255
if use_local_stablehlo_repo():
@@ -55,8 +58,8 @@ def third_party_deps():
5558
path = local_stablehlo_repo_path(),
5659
)
5760
else:
58-
STABLEHLO_COMMIT = "b62dc66da9946b4c400c0d99c9d5bb8e04edaee6"
59-
STABLEHLO_SHA256 = "a51842f5cbcccc2dc74de232793e6fdc0b4403b616281a73bbc704cd227b50db"
61+
STABLEHLO_COMMIT = "a54938f0651d3b4b7be9771848eda2463c92a8e7"
62+
STABLEHLO_SHA256 = "edab2288f0b19e3efbf08815d17d4efb106984aa6fe02fed0cb2165284e6a5b7"
6063
http_archive(
6164
name = "stablehlo",
6265
sha256 = STABLEHLO_SHA256,
@@ -168,3 +171,19 @@ def third_party_deps():
168171
strip_prefix = "cnpy-4e8810b1a8637695171ed346ce68f6984e585ef4",
169172
urls = ["https://github.com/rogersce/cnpy/archive/4e8810b1a8637695171ed346ce68f6984e585ef4.tar.gz"],
170173
)
174+
175+
http_archive(
176+
name = "nanobind",
177+
build_file = "@llvm-raw//utils/bazel/third_party_build:nanobind.BUILD",
178+
sha256 = "bb35deaed7efac5029ed1e33880a415638352f757d49207a8e6013fefb6c49a7",
179+
strip_prefix = "nanobind-2.4.0",
180+
url = "https://github.com/wjakob/nanobind/archive/refs/tags/v2.4.0.tar.gz",
181+
)
182+
183+
http_archive(
184+
name = "robin_map",
185+
build_file = "@llvm-raw//utils/bazel/third_party_build:robin_map.BUILD",
186+
sha256 = "a8424ad3b0affd4c57ed26f0f3d8a29604f0e1f2ef2089f497f614b1c94c7236",
187+
strip_prefix = "robin-map-1.3.0",
188+
url = "https://github.com/Tessil/robin-map/archive/refs/tags/v1.3.0.tar.gz",
189+
)

lib/Pipeline/Pipeline.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,10 @@ static void createTcpToLlvmPipeline(OpPassManager &pm) {
8080

8181
// One-shot bufferize tensor -> memref, from
8282
// https://mlir.llvm.org/docs/Bufferization/.
83-
bufferization::OneShotBufferizationOptions bufferizationOptions;
83+
bufferization::OneShotBufferizePassOptions bufferizationOptions;
8484
bufferizationOptions.bufferizeFunctionBoundaries = true;
85-
bufferizationOptions.setFunctionBoundaryTypeConversion(
86-
bufferization::LayoutMapOption::IdentityLayoutMap);
85+
bufferizationOptions.functionBoundaryTypeConversion =
86+
bufferization::LayoutMapOption::IdentityLayoutMap;
8787
pm.addPass(bufferization::createOneShotBufferizePass(bufferizationOptions));
8888
// Buffer deallocation pipeline for automatically inserting
8989
// buffer deallocation ops after one-shot bufferization.
@@ -95,14 +95,14 @@ static void createTcpToLlvmPipeline(OpPassManager &pm) {
9595
pm.addPass(bufferization::createLowerDeallocationsPass());
9696
pm.addPass(createCSEPass());
9797
pm.addPass(createCanonicalizerPass());
98-
pm.addPass(createBufferizationToMemRefPass());
98+
pm.addPass(createConvertBufferizationToMemRefPass());
9999

100100
// Blanket-convert any remaining linalg ops to loops if any remain.
101101
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
102102
// Blanket-convert any remaining affine ops if any remain.
103103
pm.addPass(createLowerAffinePass());
104104
// Convert SCF to CF (always needed).
105-
pm.addPass(createConvertSCFToCFPass());
105+
pm.addPass(createSCFToControlFlowPass());
106106

107107
// Sprinkle some cleanups.
108108
pm.addPass(createCanonicalizerPass());

requirements_lock.txt

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -157,28 +157,30 @@ sympy==1.13.3 \
157157
--hash=sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73 \
158158
--hash=sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9
159159
# via torch
160-
torch==2.7.0.dev20250221+cpu \
161-
--hash=sha256:03ed7e7e5186f2f0cadea798d1bbb249ca342618e90542b9634463c194ad8999 \
162-
--hash=sha256:08c6edef82a3a11afcc8ec86910eb41837ca7e646416280de6ef352f6e216370 \
163-
--hash=sha256:0fd00b3a1198f610fa706074243097d4448ee0a0efca96b809107c60af0a3f98 \
164-
--hash=sha256:17672aba174f465fe90bee6972ee9a542980b59f410772e68091a5d044604b9d \
165-
--hash=sha256:17db41853a494b1eb1b3c23e383297b2e4678d6629dfa539e8a9f71c4b05b32d \
166-
--hash=sha256:1b3c5bc3a52cdacee11794a5221aea89a6207439a24aedfd458d79b6b5d38ad1 \
167-
--hash=sha256:4201870a7d363dfb0c2015a26d847f71248685f352234c71834923f1b477b7ed \
168-
--hash=sha256:494d8fa9c469cdcb042f61cfbf2c505ebfd344e923af8ba7f6223cd8b2a7742c \
169-
--hash=sha256:5218559bd4c044977b3240aa0a2e188f7694865ef24611430bcb701ec10f5276 \
170-
--hash=sha256:73f82e45e5f1707100751bd0bedbb8bf242e32268913959dbe9f0e4ab3b3cb99 \
171-
--hash=sha256:9f80431e71a2e7d7795220eb9d549cf0490e6c5b36f0886539f7c9ef11b23a39 \
172-
--hash=sha256:a89ed8084b88720fed36655cd8fe49c1b5c135483c61fb9ac5231210a502ef9c \
173-
--hash=sha256:ac88604bf2dd8e4a53ed07877fb9e845a7c3c8a03aeeefe19f381655f19b056c \
174-
--hash=sha256:ae3698e5caa6ddf1ad40712924f45788e15e2a838793969b0477868e02953011 \
175-
--hash=sha256:aee87be29d490521806a414191e9e9afbc27e55167e79dbb66d06c18d87e0079 \
176-
--hash=sha256:b3a63e0b2e8c495d0781735720a063c8924014cfcae8a6f63a360a696b657730 \
177-
--hash=sha256:b8ac59ca5484c438a49b4d6a4ad3721256071c80687220ae8ffa355ea5c745ee \
178-
--hash=sha256:c61d02f308414e7a2b95972e2b950168af86d5b24dcedb332e29ecb8419b64cd \
179-
--hash=sha256:d9f4236bfb9b4dc39e7569ff5a2fb39f4b28b3f79dc2ff6b6ce70f7ca67fd40a \
180-
--hash=sha256:f15bc3e3e51227f18068da9cd5b8153734385d817723c153bb0cdea7285b1eae \
181-
--hash=sha256:f7433418e166b7a3e87e43f3e64115207a84dc50db8dc3b9a51428e737edcac7
160+
torch==2.8.0.dev20250506+cpu \
161+
--hash=sha256:02abfdcdbb9ca15e3c561d31b1617f9d88f978af49b3b76cc048a5159c4bbb19 \
162+
--hash=sha256:0304c11aa1a404a664a776dea4b61dab31707d5fecc1e165ea17b1c780049911 \
163+
--hash=sha256:081ecdc2ced1285b92cce4684922710af244ccf4e4430d36c746f025e6872a30 \
164+
--hash=sha256:0bdc6883695004803ea0e062382d21e432168d7ee93e6f77375d34fc43778ca8 \
165+
--hash=sha256:1c82f3cd449bee2adcfc8c1dc25b087fc3ed9eba239ea46449e1a087ddbf5f97 \
166+
--hash=sha256:370ae6fb1c8c132c4578973eb6066f14d10fb6cdc05a89e44660fec15bbce9a4 \
167+
--hash=sha256:3c68844186c4d43db95f096b120b91c530c4e92540eeeece90e59fd6ec078f03 \
168+
--hash=sha256:4017473f0a77cd2774a3c8245032fb9979ac08f92831f94f70d9e22612e2d5c1 \
169+
--hash=sha256:4575a76e5459285311d1f94fb8835fec81d5509321192716fcff8631aa258ae3 \
170+
--hash=sha256:48c682f8f369b573045d5922e989812b77183f4020a750b3339c3e64e42fd733 \
171+
--hash=sha256:4a64fd103df112e2dbfb00ab04ffef839bc1838caa40ff8bf86647eb39daa7ad \
172+
--hash=sha256:5f2a251b87dc7a359fe5b83772cb2830e01b0d75a585edc1ffe659a3e59ae17b \
173+
--hash=sha256:690f44ae8974588810a6c58052e908fb1abc7c3d34e335faccec0baba852596b \
174+
--hash=sha256:810c8106d575256c6e429e26a8edf58e4ab43fea0b10c4d56eed011f0712ee90 \
175+
--hash=sha256:8701a35246db0aa148ea3bb6edb022a639c16115912d2dc90cbad9a56c0ded2e \
176+
--hash=sha256:a5974f2958d12d01577e206417ee4d04dc2f2275505d266323cf23e828e46d96 \
177+
--hash=sha256:b17959e888c65cef0765bfef3e4813f3dad7d3d55f73c976ca33a47d2ff875b5 \
178+
--hash=sha256:b91059dce8f9c97fce586b1367c91c64912ba0866e2213510a3ffe522cee3aee \
179+
--hash=sha256:c8a7058db5c6c478d2f93a14f911dcc045d5470ed0920797ec5a6008a0bce354 \
180+
--hash=sha256:ce7960db4fb7899626a4a94c361b0d2c80c9b3bd6907b929380a176df27b9908 \
181+
--hash=sha256:e23ba269a7f189dc65c1b0ff937beb0630dfbe9a810cd307d284a51cbc8409d6 \
182+
--hash=sha256:e2cccdcc64938ede25afc43efaa4e70fdf45709c3f2b48549adc0d163aa7fadf \
183+
--hash=sha256:fb30a20142ed498569649208d67f03e9e9f345be79ab340ceec734439a475d9a
182184
# via -r requirements.txt
183185
torch-mlir==20250127.357 \
184186
--hash=sha256:43c2362b6a5265405ac5d2291982d6b0d83afafc7ee37165f4cc6b845dec4c15 \

test/Pipeline/tcp_to_llvm_pipeline.mlir

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
// CHECK-LABEL: llvm.func @main
44
// CHECK: llvm.mlir.constant
5-
// CHECK: llvm.mlir.undef
65
// CHECK: llvm.insertvalue
76
// CHECK: llvm.extractvalue
87
// CHECK: llvm.alloca

test/python_lit/fx_import/basic_test.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,17 @@ def run(f):
2424
@run
2525
# CHECK-LABEL: test_import_frozen_exported_program
2626
# CHECK: func.func @main(%[[ARG0:[a-zA-Z0-9]+]]: !torch.vtensor<[3,4],f32>) -> !torch.vtensor<[3,4],f32>
27-
# CHECK-DAG: %[[a:.+]] = torch.vtensor.literal(dense_resource<torch_tensor_1_4_torch.float32> : tensor<1x4xf32>) : !torch.vtensor<[1,4],f32>
27+
# CHECK-DAG: %[[tanh:.+]] = torch.aten.tanh %[[ARG0]]
28+
# CHECK-DAG: %[[a:.+]] = torch.aten.rand{{.*}} -> !torch.vtensor<[1,4],f32>
2829
# CHECK-DAG: %[[b:.+]] = torch.vtensor.literal(dense_resource<torch_tensor_3_1_torch.float32> : tensor<3x1xf32>) : !torch.vtensor<[3,1],f32>
2930
# CHECK-DAG: %[[p:.+]] = torch.vtensor.literal(dense<{{.*>+}} : tensor<1x1xf32>) : !torch.vtensor<[1,1],f32>
30-
# CHECK-DAG: %[[tanh:.+]] = torch.aten.tanh %[[ARG0]]
3131
# CHECK-DAG: %[[mul_a:.+]] = torch.aten.mul.Tensor %[[tanh]], %[[a]]
3232
# CHECK-DAG: %[[mul_b:.+]] = torch.aten.mul.Tensor %[[mul_a]], %[[b]]
3333
# CHECK-DAG: %[[mul_p:.+]] = torch.aten.mul.Tensor %[[mul_b]], %[[p]]
3434
# CHECK: return %[[mul_p]]
3535
#
3636
# Validate dialect resources exist.
3737
# CHECK: dialect_resources:
38-
# CHECK-DAG: torch_tensor_1_4_torch.float32
3938
# CHECK-DAG: torch_tensor_3_1_torch.float32
4039
def test_import_frozen_exported_program():
4140
# Tests the basic structural premises of import_frozen_exported_program,

test/python_lit/fx_import/custom_op_test.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,15 @@ def run(f):
2626
# CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
2727
# CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
2828
# CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>) -> !torch.vtensor<[?,?,3],f32> {
29-
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 5, max_val = 10} : !torch.int
30-
# CHECK: %[[S1:.+]] = torch.symbolic_int "s1" {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31-
# CHECK: %[[S2:.+]] = torch.symbolic_int "s3" {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32-
# CHECK: %[[S3:.+]] = torch.symbolic_int "s5" {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33-
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S0]], %[[S1]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
29+
# CHECK: %[[S0:.+]] = torch.symbolic_int "s{{.*}}" {min_val = 5, max_val = 10} : !torch.int
30+
# CHECK: %[[S1:.+]] = torch.symbolic_int "s{{.*}}" {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31+
# CHECK: %[[S2:.+]] = torch.symbolic_int "s{{.*}}" {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32+
# CHECK: %[[S3:.+]] = torch.symbolic_int "s{{.*}}" {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33+
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S1]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
3434
# CHECK: torch.bind_symbolic_shape %[[ARG1]], [%[[S0]], %[[S2]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
35-
# CHECK: torch.bind_symbolic_shape %[[ARG2]], [%[[S0]], %[[S3]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
35+
# CHECK: torch.bind_symbolic_shape %[[ARG2]], [%[[S3]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
3636
# CHECK: %[[OP:.+]] = torch.operator "torch.my_custom_library.tanh_sigmoid_cat_op"(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (!torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>) -> !torch.vtensor<[?,?,3],f32>
37-
# CHECK: torch.bind_symbolic_shape %[[OP]], [%[[S0]], %[[S1]], %[[S2]], %[[S3]]], affine_map<()[s0, s1, s2, s3] -> (s0, s2 + s3 + s1 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
37+
# CHECK: torch.bind_symbolic_shape %[[OP]], [%[[S1]], %[[S3]], %[[S0]], %[[S2]]], affine_map<()[s0, s1, s2, s3] -> (s2, s1 + s3 + s0 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
3838
# CHECK: return %[[OP]] : !torch.vtensor<[?,?,3],f32>
3939
def test_tanh_sigmoid_cat_custom_op():
4040

test/python_lit/fx_import/symbolic_shape_expr_test.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,20 +26,20 @@ def run(f):
2626
# CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
2727
# CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>,
2828
# CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: !torch.vtensor<[?,?,3],f32>) -> !torch.vtensor<[?,?,3],f32> {
29-
# CHECK: %[[S0:.+]] = torch.symbolic_int "s0" {min_val = 5, max_val = 10} : !torch.int
30-
# CHECK: %[[S1:.+]] = torch.symbolic_int "s1" {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31-
# CHECK: %[[S2:.+]] = torch.symbolic_int "s3" {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32-
# CHECK: %[[S3:.+]] = torch.symbolic_int "s5" {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33-
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S0]], %[[S1]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
29+
# CHECK: %[[S0:.+]] = torch.symbolic_int "s{{[0-9]+}}" {min_val = 5, max_val = 10} : !torch.int
30+
# CHECK: %[[S1:.+]] = torch.symbolic_int "s{{[0-9]+}}" {min_val = {{[0-9]+}}, max_val = 100} : !torch.int
31+
# CHECK: %[[S2:.+]] = torch.symbolic_int "s{{[0-9]+}}" {min_val = {{[0-9]+}}, max_val = 50} : !torch.int
32+
# CHECK: %[[S3:.+]] = torch.symbolic_int "s{{[0-9]+}}" {min_val = {{[0-9]+}}, max_val = {{[0-9]+}}} : !torch.int
33+
# CHECK: torch.bind_symbolic_shape %[[ARG0]], [%[[S1]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
3434
# CHECK: torch.bind_symbolic_shape %[[ARG1]], [%[[S0]], %[[S2]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
35-
# CHECK: torch.bind_symbolic_shape %[[ARG2]], [%[[S0]], %[[S3]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
35+
# CHECK: torch.bind_symbolic_shape %[[ARG2]], [%[[S3]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
3636
# CHECK: %[[TANH:.+]] = torch.aten.tanh %[[ARG0]] : !torch.vtensor<[?,?,3],f32> -> !torch.vtensor<[?,?,3],f32>
37-
# CHECK: torch.bind_symbolic_shape %[[TANH]], [%[[S0]], %[[S1]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
37+
# CHECK: torch.bind_symbolic_shape %[[TANH]], [%[[S1]], %[[S0]]], affine_map<()[s0, s1] -> (s1, s0, 3)> : !torch.vtensor<[?,?,3],f32>
3838
# CHECK: %[[SIG:.+]] = torch.aten.sigmoid %[[ARG1]] : !torch.vtensor<[?,?,3],f32> -> !torch.vtensor<[?,?,3],f32>
3939
# CHECK: torch.bind_symbolic_shape %[[SIG]], [%[[S0]], %[[S2]]], affine_map<()[s0, s1] -> (s0, s1, 3)> : !torch.vtensor<[?,?,3],f32>
4040
# CHECK: %[[LIST:.+]] = torch.prim.ListConstruct %[[TANH]], %[[TANH]], %[[SIG]], %[[ARG2]] : (!torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>, !torch.vtensor<[?,?,3],f32>) -> !torch.list<vtensor>
4141
# CHECK: %[[CAT:.+]] = torch.aten.cat %[[LIST]], {{.*}} : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[?,?,3],f32>
42-
# CHECK: torch.bind_symbolic_shape %[[CAT]], [%[[S0]], %[[S1]], %[[S2]], %[[S3]]], affine_map<()[s0, s1, s2, s3] -> (s0, s2 + s3 + s1 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
42+
# CHECK: torch.bind_symbolic_shape %[[CAT]], [%[[S1]], %[[S3]], %[[S0]], %[[S2]]], affine_map<()[s0, s1, s2, s3] -> (s2, s1 + s3 + s0 * 2, 3)> : !torch.vtensor<[?,?,3],f32>
4343
# CHECK: return %[[CAT]] : !torch.vtensor<[?,?,3],f32>
4444
def test_tanh_sigmoid_cat():
4545
class TanhSigmoidCat(nn.Module):

third_party/patches/BUILD

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Licensed under the Apache License v2.0 with LLVM Exceptions.
2+
# See https://llvm.org/LICENSE.txt for license information.
3+
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4+
# Also available under a BSD-style license. See LICENSE.
5+
6+
filegroup(
7+
name = "all_files",
8+
srcs = glob(["*"]),
9+
visibility = ["//visibility:public"],
10+
)
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
diff --git lib/InitAll.cpp lib/InitAll.cpp
2+
index d9096929..2a9be6cc 100644
3+
--- lib/InitAll.cpp
4+
+++ lib/InitAll.cpp
5+
@@ -33,6 +33,7 @@
6+
#ifdef TORCH_MLIR_ENABLE_STABLEHLO
7+
#include "stablehlo/conversions/linalg/transforms/Passes.h"
8+
#include "stablehlo/transforms/Passes.h"
9+
+#include "stablehlo/transforms/optimization/Passes.h"
10+
#endif
11+
12+
#ifdef TORCH_MLIR_ENABLE_TOSA

0 commit comments

Comments
 (0)