diff --git a/src/Flux.jl b/src/Flux.jl index 3b4b4b9237..afa47f6fc0 100644 --- a/src/Flux.jl +++ b/src/Flux.jl @@ -24,7 +24,7 @@ export Chain, Dense, Embedding, Maxout, SkipConnection, Parallel, PairwiseFusion AdaptiveMaxPool, AdaptiveMeanPool, GlobalMaxPool, GlobalMeanPool, MaxPool, MeanPool, Dropout, AlphaDropout, LayerNorm, BatchNorm, InstanceNorm, GroupNorm, Upsample, PixelShuffle, - fmap, cpu, gpu, f32, f64, + fmap, cpu, gpu, f32, f64, rand32, randn32, zeros32, ones32, testmode!, trainmode! include("optimise/Optimise.jl") diff --git a/src/train.jl b/src/train.jl index a18b1db59e..90bec2534b 100644 --- a/src/train.jl +++ b/src/train.jl @@ -27,7 +27,7 @@ It differs from `Optimisers.setup` in that it: # Example ```jldoctest -julia> model = Dense(2=>1, leakyrelu; init=Flux.ones32); +julia> model = Dense(2=>1, leakyrelu; init=ones32); julia> opt_state = Flux.setup(Momentum(0.1), model) # this encodes the optimiser and its state (weight = Leaf(Momentum{Float64}(0.1, 0.9), Float32[0.0 0.0]), bias = Leaf(Momentum{Float64}(0.1, 0.9), Float32[0.0]), σ = ())