Skip to content

Commit 798741e

Browse files
authored
Merge pull request #2677 from JuliaGPU/ksh/cusparse_again
More tests, better errors, more exclusions for CUSPARSE
2 parents ee58829 + d675c68 commit 798741e

File tree

5 files changed

+33
-8
lines changed

5 files changed

+33
-8
lines changed

lib/cusparse/conversions.jl

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ function SparseArrays.sparse(x::DenseCuMatrix; fmt=:csc)
2323
elseif fmt == :coo
2424
return CuSparseMatrixCOO(x)
2525
else
26-
error("Format :$fmt not available, use :csc, :csr, :bsr or :coo.")
26+
throw(ArgumentError("Format :$fmt not available, use :csc, :csr, :bsr or :coo."))
2727
end
2828
end
2929

@@ -42,6 +42,7 @@ function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{T
4242
# otherwise, it will contain gaps of zeros that indicates duplicate values.
4343
coo = sort_coo(coo, 'R')
4444
groups = similar(I, Int)
45+
# COV_EXCL_START
4546
function find_groups(groups, I, J)
4647
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
4748
if i > length(groups)
@@ -54,6 +55,7 @@ function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{T
5455

5556
return
5657
end
58+
# COV_EXCL_STOP
5759
kernel = @cuda launch=false find_groups(groups, coo.rowInd, coo.colInd)
5860
config = launch_configuration(kernel.fun)
5961
threads = min(length(groups), config.threads)
@@ -83,6 +85,7 @@ function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{T
8385

8486
# use one thread per (old) value, and if it's at the start of a group,
8587
# combine (if needed) all values and update the output vectors.
88+
# COV_EXCL_START
8689
function combine_groups(groups, indices, oldI, oldJ, oldV, newI, newJ, newV, combine)
8790
i = threadIdx().x + (blockIdx().x - 1) * blockDim().x
8891
if i > length(groups)
@@ -107,6 +110,7 @@ function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{T
107110

108111
return
109112
end
113+
# COV_EXCL_STOP
110114
kernel = @cuda launch=false combine_groups(groups, indices, coo.rowInd, coo.colInd, coo.nzVal, I, J, V, combine)
111115
config = launch_configuration(kernel.fun)
112116
threads = min(length(groups), config.threads)
@@ -123,7 +127,7 @@ function SparseArrays.sparse(I::CuVector{Cint}, J::CuVector{Cint}, V::CuVector{T
123127
elseif fmt == :csr
124128
return CuSparseMatrixCSR(coo)
125129
else
126-
error("Format :$fmt not available, use :csc, :csr, or :coo.")
130+
throw(ArgumentError("Format :$fmt not available, use :csc, :csr, or :coo."))
127131
end
128132
end
129133

lib/cusparse/linalg.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ function LinearAlgebra.opnorm(A::CuSparseMatrixCSR, p::Real=2)
77
elseif p == 1
88
return maximum(sum(abs, A; dims=1))
99
else
10-
error("p=$p is not supported")
10+
throw(ArgumentError("p=$p is not supported"))
1111
end
1212
end
1313

test/libraries/cusparse.jl

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,14 @@ blockdim = 5
2020
@test size(d_x,1) == m
2121
@test size(d_x,2) == 1
2222
@test ndims(d_x) == 1
23+
dense_d_x = CuVector(x)
2324
CUDA.@allowscalar begin
2425
@test Array(d_x[:]) == x[:]
2526
@test d_x[firstindex(d_x)] == x[firstindex(x)]
2627
@test d_x[div(end, 2)] == x[div(end, 2)]
2728
@test d_x[end] == x[end]
2829
@test Array(d_x[firstindex(d_x):end]) == x[firstindex(x):end]
30+
@test Array(dense_d_x[firstindex(d_x):end]) == x[firstindex(x):end]
2931
end
3032
@test_throws BoundsError d_x[firstindex(d_x) - 1]
3133
@test_throws BoundsError d_x[end + 1]
@@ -1232,3 +1234,17 @@ end
12321234
@test convert(CUSPARSE.cusparseSpSMUpdate_t, CUSPARSE.SparseChar('D')) == CUSPARSE.CUSPARSE_SPSV_UPDATE_DIAGONAL
12331235
@test_throws ArgumentError("Unknown update X") convert(CUSPARSE.cusparseSpSMUpdate_t, CUSPARSE.SparseChar('X'))
12341236
end
1237+
1238+
@testset "CuSparseArrayCSR" begin
1239+
x = sprand(n, m, 0.2)
1240+
d_x = CuSparseArrayCSR(CuArray(x.colptr), CuArray(x.rowval), CuArray(x.nzval), (m, n))
1241+
@test d_x isa CuSparseArrayCSR
1242+
@test length(d_x) == m*n
1243+
@test CuSparseArrayCSR(d_x) === d_x
1244+
@test size(similar(d_x)) == size(d_x)
1245+
@test size(d_x, 3) == 1
1246+
@test_throws ArgumentError("dimension must be ≥ 1, got 0") size(d_x, 0)
1247+
CUDA.@allowscalar begin
1248+
@test d_x[1, 2] == x[2, 1]
1249+
end
1250+
end

test/libraries/cusparse/conversions.jl

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ using CUDA
3737
@test collect(x) == collect(dense)
3838
end
3939
end
40+
@test_throws ArgumentError("Format :bad not available, use :csc, :csr, :bsr or :coo.") sparse(dense; fmt=:bad)
41+
@test_throws ArgumentError("Format :bad not available, use :csc, :csr, or :coo.") sparse(I, J, V; fmt=:bad)
4042
end
4143

4244
@testset "unsorted sparse (CUDA.jl#1407)" begin
@@ -54,10 +56,12 @@ end
5456
end
5557

5658
@testset "CuSparseMatrix(::Adjoint/::Transpose)" begin
57-
A = sprand(5, 5, 0.2)
58-
for T in (CuSparseMatrixCSC, CuSparseMatrixCSR, CuSparseMatrixCOO), f in (transpose, adjoint)
59-
dA = T(f(A))
60-
@test Array(dA) == f(A)
59+
for typ in (Float32, ComplexF32, Float64, ComplexF64)
60+
A = sprand(typ, 5, 5, 0.2)
61+
for T in (CuSparseMatrixCSC{typ}, CuSparseMatrixCSR{typ}, CuSparseMatrixCOO{typ}), f in (transpose, adjoint)
62+
dA = T(f(A))
63+
@test Array(dA) == f(A)
64+
end
6165
end
6266
end
6367

test/libraries/cusparse/linalg.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ using LinearAlgebra, SparseArrays
99
@test opnorm(S, Inf) opnorm(dS_csr, Inf)
1010
@test opnorm(S, 1) opnorm(dS_csc, 1)
1111
@test opnorm(S, 1) opnorm(dS_csr, 1)
12+
@test_throws ArgumentError("p=2 is not supported") opnorm(dS_csr, 2)
1213
@test norm(S, 0) norm(dS_csc, 0)
1314
@test norm(S, 0) norm(dS_csr, 0)
1415
@test norm(S, 1) norm(dS_csc, 1)
@@ -80,4 +81,4 @@ end
8081
A2 = typ(A)
8182

8283
@test dot(x, A, y) dot(x2, A2, y2)
83-
end
84+
end

0 commit comments

Comments
 (0)