Skip to content

Commit 5b292cd

Browse files
authored
[SYCL][E2E] Replace any-device-is- with target- features in %if markup (#16889)
Using `target-` features is preferable for `%if` statements because this feature will only evaluate to true if a device with the corresponding build target is selected, or if we are in `build-only` and the corresponding triple was selected to build for the test. `any-device-is` on the other hand evaluates to true any time we have the device available on the system, even if it is marked as XFAIL/UNSUPPORTED, and on `build-only` it is never true because we have no devices in that mode.
1 parent 549e165 commit 5b292cd

27 files changed

+32
-32
lines changed

sycl/test-e2e/AtomicRef/atomic_memory_order_acq_rel.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %{build} -O3 -o %t.out %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
1+
// RUN: %{build} -O3 -o %t.out %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
22
// RUN: %{run} %t.out
33

44
// NOTE: Tests fetch_add for acquire and release memory ordering.

sycl/test-e2e/AtomicRef/atomic_memory_order_seq_cst.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %{build} -O3 -o %t.out %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
1+
// RUN: %{build} -O3 -o %t.out %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
22
// RUN: %{run} %t.out
33
// UNSUPPORTED: arch-intel_gpu_bmg_g21
44
// UNSUPPORTED-TRACKER: https://github.com/intel/llvm/issues/16924

sycl/test-e2e/BFloat16/bfloat16_builtins.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
// + below sm_80 always uses generic impls
66

77
// DEFINE: %{mathflags} = %if cl_options %{/clang:-fno-fast-math%} %else %{-fno-fast-math%}
8-
// RUN: %clangxx -fsycl %{sycl_target_opts} %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 %} %s -o %t.out %{mathflags}
8+
// RUN: %clangxx -fsycl %{sycl_target_opts} %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 %} %s -o %t.out %{mathflags}
99
// RUN: %{run} %t.out
1010

1111
// Test "new" (ABI breaking) for all platforms ( sm_80/native if CUDA )
12-
// RUN: %if preview-breaking-changes-supported %{ %clangxx -fsycl -fpreview-breaking-changes %{sycl_target_opts} %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 %} %s -o %t2.out %{mathflags} %}
12+
// RUN: %if preview-breaking-changes-supported %{ %clangxx -fsycl -fpreview-breaking-changes %{sycl_target_opts} %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 %} %s -o %t2.out %{mathflags} %}
1313
// RUN: %if preview-breaking-changes-supported %{ %{run} %t2.out %}
1414

1515
// Flaky timeout on CPU. Enable when fixed.

sycl/test-e2e/BFloat16/bfloat16_builtins_cuda_generic.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
// DEFINE: %{mathflags} = %if cl_options %{/clang:-fno-fast-math%} %else %{-fno-fast-math%}
88

99
// If CUDA, test "new" again for sm_75/generic
10-
// RUN: %if any-device-is-cuda %{ %if preview-breaking-changes-supported %{ %clangxx -fsycl -fpreview-breaking-changes %{sycl_target_opts} -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_75 %s -o %t3.out %{mathflags} %} %}
11-
// RUN: %if any-device-is-cuda %{ %if preview-breaking-changes-supported %{ %{run} %t3.out %} %}
10+
// RUN: %if target-nvidia %{ %if preview-breaking-changes-supported %{ %clangxx -fsycl -fpreview-breaking-changes %{sycl_target_opts} -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_75 %s -o %t3.out %{mathflags} %} %}
11+
// RUN: %if target-nvidia %{ %if preview-breaking-changes-supported %{ %{run} %t3.out %} %}
1212

1313
#include "bfloat16_builtins.hpp"
1414

sycl/test-e2e/BFloat16/bfloat16_type.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %if any-device-is-cuda %{ %{build} -DUSE_CUDA_SM80=1 -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 -o %t.cuda.out %}
1+
// RUN: %if target-nvidia %{ %{build} -DUSE_CUDA_SM80=1 -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_80 -o %t.cuda.out %}
22
// RUN: %if cuda %{ %{run} %t.cuda.out %}
33
// RUN: %{build} -o %t.out
44
// RUN: %{run} %t.out

sycl/test-e2e/Basic/interop/interop_all_backends.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
// XFAIL: any-device-is-cuda
1+
// XFAIL: target-nvidia
22
// XFAIL-TRACKER: https://github.com/intel/llvm/issues/16070
3-
// RUN: %if any-device-is-opencl %{ %{build} -o %t-opencl.out %}
4-
// RUN: %if any-device-is-cuda %{ %{build} -isystem %sycl_include -DBUILD_FOR_CUDA -o %t-cuda.out %}
5-
// RUN: %if any-device-is-hip %{ %{build} -DBUILD_FOR_HIP -o %t-hip.out %}
3+
// RUN: %if target-spir %{ %{build} -o %t-opencl.out %}
4+
// RUN: %if target-nvidia %{ %{build} -isystem %sycl_include -DBUILD_FOR_CUDA -o %t-cuda.out %}
5+
// RUN: %if target-amd %{ %{build} -DBUILD_FOR_HIP -o %t-hip.out %}
66

77
#include <sycl/backend.hpp>
88
#include <sycl/detail/core.hpp>

sycl/test-e2e/DeviceLib/cmath_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
// RUN: %if !gpu %{ %{run} %t2.out %}
88
//
99
// // Check that --fast-math works with cmath funcs for CUDA
10-
// RUN: %if any-device-is-cuda %{ %{build} -Wno-nan-infinity-disabled -fno-builtin %{mathflags} -o %t3.out -ffast-math -DSYCL_E2E_FASTMATH %}
10+
// RUN: %if target-nvidia %{ %clangxx -fsycl -fsycl-targets=nvptx64-nvidia-cuda %s -Wno-nan-infinity-disabled -fno-builtin %{mathflags} -o %t3.out -ffast-math -DSYCL_E2E_FASTMATH %}
1111
// RUN: %if cuda %{ %{run} %t3.out %}
1212

1313
#include "math_utils.hpp"

sycl/test-e2e/GroupAlgorithm/root_group.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
// TODO: Currently using the -Wno-deprecated-declarations flag due to issue
66
// https://github.com/intel/llvm/issues/16451. Rewrite testRootGroup() amd
77
// remove the flag once the issue is resolved.
8-
// RUN: %{build} -I . -o %t.out -Wno-deprecated-declarations %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
8+
// RUN: %{build} -I . -o %t.out -Wno-deprecated-declarations %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 %}
99
// RUN: %{run} %t.out
1010

1111
// Disabled temporarily while investigation into the failure is ongoing.

sycl/test-e2e/NewOffloadDriver/lit.local.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,5 @@ import platform
33
config.unsupported_features += ['accelerator']
44

55
config.substitutions.append(
6-
("%{embed-ir}", "%if any-device-is-hip || any-device-is-cuda %{ -fsycl-embed-ir %}")
6+
("%{embed-ir}", "%if target-amd || target-nvidia %{ -fsycl-embed-ir %}")
77
)

sycl/test-e2e/Reduction/reduction_range_1d_dw.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %{build} -DENABLE_64_BIT=false -o %t.out %if any-device-is-cuda %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_60 %}
1+
// RUN: %{build} -DENABLE_64_BIT=false -o %t.out %if target-nvidia %{ -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_60 %}
22
// RUN: %{run} %t.out
33

44
// Windows doesn't yet have full shutdown().

0 commit comments

Comments
 (0)