diff --git a/src/deprecations.jl b/src/deprecations.jl index ab8f711f89..8b27256170 100644 --- a/src/deprecations.jl +++ b/src/deprecations.jl @@ -83,16 +83,48 @@ function params!(p::Zygote.Params, x, seen = IdSet()) end end +""" + params(model) + +Returns a `Zygote.Params` object containing all parameter arrays from the model. +This is deprecated! + +This function was the cornerstone of how Flux used Zygote's implicit mode gradients, +but since Flux 0.13 we use explicit mode `gradient(m -> loss(m, x, y), model)` instead. + +To collect all the parameter arrays for other purposes, use `Flux.trainables(model)`. +""" function params(m...) - Base.depwarn(""" - Flux.params(m...) is deprecated. Use `Flux.trainable(model)` for parameters' collection - and the explicit `gradient(m -> loss(m, x, y), model)` for gradient computation. - """, :params) + @warn """`Flux.params(m...)` is deprecated. Use `Flux.trainable(model)` for parameter collection, + and the explicit `gradient(m -> loss(m, x, y), model)` for gradient computation.""" maxlog=1 ps = Params() params!(ps, m) return ps end + +""" + @functor MyLayer + +Flux used to require the use of `Functors.@functor` to mark any new layer-like struct. +This allowed it to explore inside the struct, and update any trainable parameters within. +Flux@0.15 removes this requirement. This is because Functors@0.5 changed ist behaviour +to be opt-out instead of opt-in. Arbitrary structs will now be explored without special marking. +Hence calling `@functor` is no longer required. + +Calling `Flux.@layer MyLayer` is, however, still recommended. This adds various convenience methods +for your layer type, such as pretty printing, and use with Adapt.jl. +""" +macro functor(ex) + @warn """The use of `Flux.@functor` is deprecated. + Most likely, you should write `Flux.@layer MyLayer` which will add various convenience methods for your type, + such as pretty-printing, and use with Adapt.jl. + However, this is not required. Flux.jl v0.15 uses Functors.jl v0.5, which makes exploration of most nested `struct`s + opt-out instead of opt-in... so Flux will automatically see inside any custom struct definitions. + """ maxlog=1 + _layer_macro(ex) +end + # Allows caching of the parameters when params is called within gradient() to fix #2040. # @non_differentiable params(m...) # https://github.com/FluxML/Flux.jl/pull/2054 # That speeds up implicit use, and silently breaks explicit use. @@ -101,6 +133,14 @@ Zygote._pullback(::Zygote.Context{true}, ::typeof(params), m...) = params(m), _ include("optimise/Optimise.jl") ## deprecated Module +function Optimiser(rules...) + @warn "`Flux.Optimiser(...)` has been removed, please call `OptimiserChain(...)`, exported by Flux from Optimisers.jl" maxlog=1 + OptimiserChain(rules...) +end +function ClipValue(val) + @warn "`Flux.ClipValue(...)` has been removed, please call `ClipGrad(...)`, exported by Flux from Optimisers.jl" maxlog=1 + ClipGrad(val) +end # TODO this friendly error should go in Optimisers.jl. # remove after https://github.com/FluxML/Optimisers.jl/pull/181 @@ -119,9 +159,6 @@ end ### v0.16 deprecations #################### -# Enable these when 0.16 is released, and delete const ClipGrad = Optimise.ClipValue etc: -# Base.@deprecate_binding Optimiser OptimiserChain -# Base.@deprecate_binding ClipValue ClipGrad # train!(loss::Function, ps::Zygote.Params, data, opt) = throw(ArgumentError( # """On Flux 0.16, `train!` no longer accepts implicit `Zygote.Params`. diff --git a/src/optimise/train.jl b/src/optimise/train.jl index 52488107ef..d38cbe408d 100644 --- a/src/optimise/train.jl +++ b/src/optimise/train.jl @@ -5,6 +5,10 @@ function update!(opt::AbstractOptimiser, x::AbstractArray, x̄) end function update!(opt::AbstractOptimiser, xs::Params, gs) + @warn """The method `Flux.update!(optimiser, ps::Params, grads)` is deprecated, + as part of Flux's move away from Zyote's implicit mode. + Please use explicit-style `update!(opt_state, model, grad)` instead, + where `grad = Flux.gradient(m -> loss(m,x,y), model)` and `opt_state = Flux.setup(rule, model)`.""" maxlog=1 for x in xs isnothing(gs[x]) && continue update!(opt, x, gs[x]) @@ -21,6 +25,10 @@ batchmemaybe(x) = tuple(x) batchmemaybe(x::Tuple) = x function train!(loss, ps::Params, data, opt::AbstractOptimiser; cb = () -> ()) + @warn """The method `Flux.train!(loss2, ps::Params, data, optimiser)` is deprecated, + as part of Flux's move away from Zyote's implicit parameters. + Please use explicit-style `train!(loss, model, data, opt_state)` instead, + where `loss(m, xy...)` accepts the model, and `opt_state = Flux.setup(rule, model)`.""" maxlog=1 cb = runall(cb) itrsz = Base.IteratorSize(typeof(data)) n = (itrsz == Base.HasLength()) || (itrsz == Base.HasShape{1}()) ? length(data) : 0