Skip to content

Commit 1781089

Browse files
committed
s/backend::cuda/backend::ext_oneapi_cuda/g
1 parent 94b96b4 commit 1781089

File tree

11 files changed

+29
-29
lines changed

11 files changed

+29
-29
lines changed

examples/MPI/SYCL-MPI-Sample.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,9 @@ int main(int argc, char *argv[]) {
7878
sycl::accessor local_acc{local_buffer, h, sycl::read_write};
7979
h.host_task([=](sycl::interop_handle ih) {
8080
auto cuda_ptr = reinterpret_cast<double *>(
81-
ih.get_native_mem<sycl::backend::cuda>(input_acc));
81+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(input_acc));
8282
auto cuda_local_ptr = reinterpret_cast<double *>(
83-
ih.get_native_mem<sycl::backend::cuda>(local_acc));
83+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(local_acc));
8484
MPI_Scatter(cuda_ptr, local_size, MPI_DOUBLE, cuda_local_ptr, local_size,
8585
MPI_DOUBLE, 0, MPI_COMM_WORLD);
8686
});
@@ -122,9 +122,9 @@ int main(int argc, char *argv[]) {
122122
sycl::accessor global_sum_acc{global_sum, h, sycl::read_write};
123123
h.host_task([=](sycl::interop_handle ih) {
124124
auto cuda_out_ptr = reinterpret_cast<double *>(
125-
ih.get_native_mem<sycl::backend::cuda>(out_acc));
125+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(out_acc));
126126
auto cuda_global_sum_ptr = reinterpret_cast<double *>(
127-
ih.get_native_mem<sycl::backend::cuda>(global_sum_acc));
127+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(global_sum_acc));
128128
MPI_Allreduce(cuda_out_ptr, cuda_global_sum_ptr, 1, MPI_DOUBLE, MPI_SUM,
129129
MPI_COMM_WORLD);
130130
});
@@ -153,9 +153,9 @@ int main(int argc, char *argv[]) {
153153
sycl::accessor local_acc{local_buffer, h, sycl::read_write};
154154
h.host_task([=](sycl::interop_handle ih) {
155155
auto cuda_local_ptr = reinterpret_cast<double *>(
156-
ih.get_native_mem<sycl::backend::cuda>(local_acc));
156+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(local_acc));
157157
auto cuda_input_ptr = reinterpret_cast<double *>(
158-
ih.get_native_mem<sycl::backend::cuda>(input_acc));
158+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(input_acc));
159159
MPI_Gather(cuda_local_ptr, local_size, MPI_DOUBLE, cuda_input_ptr,
160160
local_size, MPI_DOUBLE, 0, MPI_COMM_WORLD);
161161
});

examples/cuda_interop/vec_add.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
class CUDASelector : public sycl::device_selector {
1212
public:
1313
int operator()(const sycl::device &device) const override {
14-
if(device.get_platform().get_backend() == sycl::backend::cuda){
14+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
1515
std::cout << " CUDA device found " << std::endl;
1616
return 1;
1717
} else{
@@ -63,9 +63,9 @@ int main(int argc, char *argv[]) {
6363
auto accC = bC.get_access<access::mode::write>(h);
6464

6565
h.host_task([=](interop_handle ih) {
66-
auto dA = reinterpret_cast<double*>(ih.get_native_mem<backend::cuda>(accA));
67-
auto dB = reinterpret_cast<double*>(ih.get_native_mem<backend::cuda>(accB));
68-
auto dC = reinterpret_cast<double*>(ih.get_native_mem<backend::cuda>(accC));
66+
auto dA = reinterpret_cast<double*>(ih.get_native_mem<backend::ext_oneapi_cuda>(accA));
67+
auto dB = reinterpret_cast<double*>(ih.get_native_mem<backend::ext_oneapi_cuda>(accB));
68+
auto dC = reinterpret_cast<double*>(ih.get_native_mem<backend::ext_oneapi_cuda>(accC));
6969

7070
int blockSize, gridSize;
7171
// Number of threads in each thread block

examples/cuda_interop/vec_add_usm.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
class CUDASelector : public sycl::device_selector {
88
public:
99
int operator()(const sycl::device &device) const override {
10-
if(device.get_platform().get_backend() == sycl::backend::cuda){
10+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
1111
std::cout << " CUDA device found " << std::endl;
1212
return 1;
1313
} else{

examples/distrib_batch_gemm/distributed-batch-gemm.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -99,9 +99,9 @@ int main(int argc, char **argv) {
9999
h);
100100
h.interop_task([=](sycl::interop_handle ih) {
101101
auto global_a_ptr = reinterpret_cast<float *>(
102-
ih.get_native_mem<sycl::backend::cuda>(global_a_acc));
102+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(global_a_acc));
103103
auto local_a_ptr = reinterpret_cast<float *>(
104-
ih.get_native_mem<sycl::backend::cuda>(local_a_acc));
104+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(local_a_acc));
105105
MPI_Scatter(global_a_ptr, lda * k, MPI_FLOAT, local_a_ptr, lda * k,
106106
MPI_FLOAT, 0, MPI_COMM_WORLD);
107107
});
@@ -117,9 +117,9 @@ int main(int argc, char **argv) {
117117
h);
118118
h.interop_task([=](sycl::interop_handle ih) {
119119
auto global_b_ptr = reinterpret_cast<float *>(
120-
ih.get_native_mem<sycl::backend::cuda>(global_b_acc));
120+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(global_b_acc));
121121
auto local_b_ptr = reinterpret_cast<float *>(
122-
ih.get_native_mem<sycl::backend::cuda>(local_b_acc));
122+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(local_b_acc));
123123
MPI_Scatter(global_b_ptr, ldb * n, MPI_FLOAT, local_b_ptr, ldb * n,
124124
MPI_FLOAT, 0, MPI_COMM_WORLD);
125125
});
@@ -144,9 +144,9 @@ int main(int argc, char **argv) {
144144
h);
145145
h.interop_task([=](sycl::interop_handle ih) {
146146
auto local_c_ptr = reinterpret_cast<float *>(
147-
ih.get_native_mem<sycl::backend::cuda>(local_c_acc));
147+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(local_c_acc));
148148
auto global_c_ptr = reinterpret_cast<float *>(
149-
ih.get_native_mem<sycl::backend::cuda>(global_c_acc));
149+
ih.get_native_mem<sycl::backend::ext_oneapi_cuda>(global_c_acc));
150150
MPI_Gather(local_c_ptr, ldc * n, MPI_FLOAT, global_c_ptr, ldc * n,
151151
MPI_FLOAT, 0, MPI_COMM_WORLD);
152152
});

examples/hashing/include/tools/sycl_queue_helpers.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class cuda_selector : public sycl::device_selector {
1818
public:
1919
int operator()(const sycl::device &device) const override {
2020
#if defined(SYCL_IMPLEMENTATION_ONEAPI) || defined(SYCL_IMPLEMENTATION_INTEL)
21-
return device.get_platform().get_backend() == sycl::backend::cuda && device.get_info<sycl::info::device::is_available>() ? 1 : -1;
21+
return device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda && device.get_info<sycl::info::device::is_available>() ? 1 : -1;
2222
#else
2323
return device.is_gpu() && (device.get_info<sycl::info::device::name>().find("NVIDIA") != std::string::npos) ? 1 : -1;
2424
#endif

examples/sgemm_interop/sycl_sgemm.cpp

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ void inline checkCudaErrorMsg(CUresult status, const char *msg) {
3434
class CUDASelector : public sycl::device_selector {
3535
public:
3636
int operator()(const sycl::device &device) const override {
37-
if(device.get_platform().get_backend() == sycl::backend::cuda){
37+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
3838
std::cout << " CUDA device found " << std::endl;
3939
return 1;
4040
} else{
@@ -81,13 +81,12 @@ int main() {
8181
auto d_C = b_C.get_access<sycl::access::mode::write>(h);
8282

8383
h.host_task([=](sycl::interop_handle ih) {
84+
cuCtxSetCurrent(ih.get_native_context<backend::ext_oneapi_cuda>());
8485
auto cuStream = ih.get_native_queue<backend::ext_oneapi_cuda>();
8586
cublasSetStream(handle, cuStream);
86-
cuCtxSetCurrent(ih.get_native_context<backend::cuda>());
87-
cublasSetStream(handle, ih.get_native_queue<backend::cuda>());
88-
auto cuA = reinterpret_cast<float *>(ih.get_native_mem<backend::cuda>(d_A));
89-
auto cuB = reinterpret_cast<float *>(ih.get_native_mem<backend::cuda>(d_B));
90-
auto cuC = reinterpret_cast<float *>(ih.get_native_mem<backend::cuda>(d_C));
87+
auto cuA = reinterpret_cast<float *>(ih.get_native_mem<backend::ext_oneapi_cuda>(d_A));
88+
auto cuB = reinterpret_cast<float *>(ih.get_native_mem<backend::ext_oneapi_cuda>(d_B));
89+
auto cuC = reinterpret_cast<float *>(ih.get_native_mem<backend::ext_oneapi_cuda>(d_C));
9190

9291
CHECK_ERROR(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, WIDTH, HEIGHT,
9392
WIDTH, &ALPHA, cuA, WIDTH, cuB, WIDTH, &BETA,

examples/sgemm_interop/sycl_sgemm_usm.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ void inline checkCudaErrorMsg(CUresult status, const char *msg) {
3434
class CUDASelector : public sycl::device_selector {
3535
public:
3636
int operator()(const sycl::device &device) const override {
37-
if(device.get_platform().get_backend() == sycl::backend::cuda){
37+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
3838
std::cout << " CUDA device found " << std::endl;
3939
return 1;
4040
} else{
@@ -86,6 +86,7 @@ int main() {
8686
h.host_task([=](sycl::interop_handle ih) {
8787

8888
// Set the correct cuda context & stream
89+
cuCtxSetCurrent(ih.get_native_context<backend::ext_oneapi_cuda>());
8990
auto cuStream = ih.get_native_queue<backend::ext_oneapi_cuda>();
9091
cublasSetStream(handle, cuStream);
9192

examples/vector_addition/vector_addition.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
class CUDASelector : public sycl::device_selector {
2828
public:
2929
int operator()(const sycl::device &device) const override {
30-
if(device.get_platform().get_backend() == sycl::backend::cuda){
30+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
3131
std::cout << " CUDA device found " << std::endl;
3232
return 1;
3333
} else{

examples/vector_addition/vector_addition_usm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
class CUDASelector : public sycl::device_selector {
2828
public:
2929
int operator()(const sycl::device &device) const override {
30-
if(device.get_platform().get_backend() == sycl::backend::cuda){
30+
if(device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda){
3131
std::cout << " CUDA device found " << std::endl;
3232
return 1;
3333
} else{

setup-script/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ config file.
2929
class CUDADeviceSelector : public sycl::device_selector {
3030
public:
3131
int operator()(const sycl::device &device) const override {
32-
   return device.get_platform().get_backend() == sycl::backend::cuda ? 1 : -1;
32+
   return device.get_platform().get_backend() == sycl::backend::ext_oneapi_cuda ? 1 : -1;
3333
}
3434
};
3535
```

0 commit comments

Comments
 (0)