Skip to content

Commit 30bd453

Browse files
unbreak some data movement cuda tests (#2504)
1 parent 93e1de7 commit 30bd453

File tree

2 files changed

+12
-10
lines changed

2 files changed

+12
-10
lines changed

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ ChainRulesCore = "1.12"
4848
Compat = "4.10.0"
4949
Enzyme = "0.12, 0.13"
5050
Functors = "0.4"
51-
MLDataDevices = "1.4.0"
51+
MLDataDevices = "1.4.2"
5252
MLUtils = "0.4"
5353
MPI = "0.20.19"
5454
MacroTools = "0.5"

test/ext_cuda/cuda.jl

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1-
using Flux, Test
1+
using Flux, Test, Zygote
22
using Flux: cpu, gpu
33
using Statistics: mean
4-
using LinearAlgebra: I, cholesky, Cholesky
4+
using LinearAlgebra: I, cholesky, Cholesky, Adjoint
55
using SparseArrays: sparse, SparseMatrixCSC, AbstractSparseArray
6+
using CUDA
7+
CUDA.allowscalar(false)
68

79
@testset "CUDA" begin
810
x = randn(5, 5)
@@ -48,11 +50,11 @@ end
4850
# construct from CuArray
4951
x = [1, 3, 2]
5052
y = Flux.onehotbatch(x, 0:3)
51-
@test_skip begin # https://github.com/FluxML/OneHotArrays.jl/issues/16
53+
54+
# https://github.com/FluxML/OneHotArrays.jl/issues/16
5255
y2 = Flux.onehotbatch(x |> gpu, 0:3)
5356
@test y2.indices isa CuArray
5457
@test y2 |> cpu == y
55-
end
5658
end
5759

5860
@testset "onecold gpu" begin
@@ -104,19 +106,19 @@ end
104106
# Trivial functions
105107
@test gradient(x -> sum(abs, gpu(x)), a)[1] isa Matrix
106108
@test gradient(x -> sum(gpu(x)), a)[1] isa Matrix
107-
@test_skip gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill
109+
@test_broken gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill
108110
@test gradient(x -> sum(abs, cpu(x)), ca)[1] isa CuArray
109111
# This test should really not go through indirections and pull out Fills for efficiency
110112
# but we forcefully materialise. TODO: remove materialising CuArray here
111113
@test gradient(x -> sum(cpu(x)), ca)[1] isa CuArray # This involves FillArray, which should be GPU compatible
112-
@test gradient(x -> sum(cpu(x)), ca')[1] isa Adjoint{Float32, <:CuArray}
114+
@test gradient(x -> sum(cpu(x)), ca')[1] isa CuArray
113115

114116
# Even more trivial: no movement
115117
@test gradient(x -> sum(abs, cpu(x)), a)[1] isa Matrix
116-
@test_broken gradient(x -> sum(abs, cpu(x)), a')[1] isa Matrix
118+
@test gradient(x -> sum(abs, cpu(x)), a')[1] isa Matrix
117119
@test gradient(x -> sum(cpu(x)), a)[1] isa typeof(gradient(sum, a)[1]) # FillArray
118120
@test gradient(x -> sum(abs, gpu(x)), ca)[1] isa CuArray
119-
@test_broken gradient(x -> sum(abs, gpu(x)), ca')[1] isa CuArray
121+
@test gradient(x -> sum(abs, gpu(x)), ca')[1] isa CuArray
120122

121123
# More complicated, Array * CuArray is an error
122124
g0 = gradient(x -> sum(abs, (a * (a * x))), a)[1]
@@ -198,7 +200,7 @@ end
198200
post2 = Flux.DataLoader((x=X, y=Y); batchsize=7, shuffle=false) |> gpu
199201
for (p, q) in zip(pre2, post2)
200202
@test p.x == q.x
201-
@test_skip p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous
203+
@test_broken p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous
202204
end
203205

204206
@test collect(pre2) isa Vector{<:NamedTuple{(:x, :y)}}

0 commit comments

Comments
 (0)