Skip to content

Commit 7514dae

Browse files
cuda 4.0 compat (#2177)
* cuda 4.0 compat * add news item
1 parent 4d508bc commit 7514dae

File tree

7 files changed

+8
-20
lines changed

7 files changed

+8
-20
lines changed

NEWS.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
# Flux Release Notes
22

3+
## v0.13.8
4+
* CUDA.jl 4.0 compatibility.
5+
36
## v0.13.7
47
* Added [`@autosize` macro](https://github.com/FluxML/Flux.jl/pull/2078)
58
* New method of `train!` using Zygote's "explicit" mode. Part of a move away from "implicit" `Params`.

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
2525

2626
[compat]
2727
Adapt = "3.0"
28-
CUDA = "3"
28+
CUDA = "3, 4"
2929
ChainRulesCore = "1.12"
3030
Functors = "0.3, 0.4"
3131
MLUtils = "0.2, 0.3.1, 0.4"

src/functor.jl

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,6 @@ end
206206
function check_use_cuda()
207207
if use_cuda[] === nothing
208208
use_cuda[] = CUDA.functional()
209-
if use_cuda[] && !CUDA.has_cudnn()
210-
@warn "CUDA.jl found cuda, but did not find libcudnn. Some functionality will not be available."
211-
end
212209
if !(use_cuda[])
213210
@info """The GPU function is being called but the GPU is not accessible.
214211
Defaulting back to the CPU. (No action is required if you want to run on the CPU).""" maxlog=1

src/losses/functions.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,8 @@ julia> Flux.huber_loss(ŷ, 1:3, δ=0.05) # changes behaviour as |ŷ - y| > δ
9595
function huber_loss(ŷ, y; agg = mean, δ = ofeltype(ŷ, 1))
9696
_check_sizes(ŷ, y)
9797
abs_error = abs.(ŷ .- y)
98-
#TODO: remove dropgrad when Zygote can handle this function with CuArrays
99-
temp = Zygote.dropgrad(abs_error .< δ)
98+
#TODO: remove ignore_derivatives when Zygote can handle this function with CuArrays
99+
temp = Zygote.ignore_derivatives(abs_error .< δ)
100100
x = ofeltype(ŷ, 0.5)
101101
agg(((abs_error .^ 2) .* temp) .* x .+ δ * (abs_error .- x * δ) .* (1 .- temp))
102102
end

test/cuda/runtests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ include("cuda.jl")
1111
include("losses.jl")
1212
include("layers.jl")
1313

14-
if CUDA.has_cudnn()
14+
if CUDA.functional()
1515
@info "Testing Flux/CUDNN"
1616
include("cudnn.jl")
1717
include("curnn.jl")

test/runtests.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
using Flux
2-
using Flux.Data
32
using Flux: OneHotArray, OneHotMatrix, OneHotVector
43
using Flux: params
54
using Test

test/utils.jl

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ using Flux
22
using Flux: throttle, nfan, glorot_uniform, glorot_normal,
33
kaiming_normal, kaiming_uniform, orthogonal, truncated_normal,
44
sparse_init, identity_init, unstack, batch, unbatch,
5-
unsqueeze, params, loadparams!, loadmodel!
5+
unsqueeze, params, loadmodel!
66
using MLUtils
77
using StatsBase: var, std
88
using Statistics, LinearAlgebra
@@ -385,17 +385,6 @@ end
385385
@test_skip typeof(l1.bias) === typeof(l2.bias)
386386
end
387387

388-
@testset "loadparams!" begin
389-
pars(w, b) = [w, b]
390-
pars(l) = pars(l.weight, l.bias)
391-
pararray(m) = mapreduce(pars, vcat, m)
392-
weights(m) = mapreduce(l -> [l.weight], vcat, m)
393-
@testset "Bias type $bt" for bt in (Flux.zeros32, nobias)
394-
m = dm(bt)
395-
Flux.loadparams!(m, params(m))
396-
testdense(m, bt)
397-
end
398-
end
399388

400389
@testset "loadmodel!(dst, src)" begin
401390
m1 = Chain(Dense(10, 5), Dense(5, 2, relu))

0 commit comments

Comments
 (0)