Skip to content

Commit 31dccd1

Browse files
some cleanup (#2503)
* cleanup * fix doctest
1 parent 0360155 commit 31dccd1

File tree

4 files changed

+18
-23
lines changed

4 files changed

+18
-23
lines changed

docs/src/tutorials/logistic_regression.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ Note, all the `flux_*` variables in this tutorial would be general, that is, the
140140
julia> flux_model = Chain(Dense(4 => 3), softmax)
141141
Chain(
142142
Dense(4 => 3), # 15 parameters
143-
NNlib.softmax,
144-
)
143+
softmax,
144+
)
145145
```
146146

147147
A [`Dense(4 => 3)`](@ref Dense) layer denotes a layer with four inputs (four features in every data point) and three outputs (three classes or labels). This layer is the same as the mathematical model defined by us above. Under the hood, Flux too calculates the output using the same expression, but we don't have to initialize the parameters ourselves this time, instead Flux does it for us.

test/runtests.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ using Functors: fmapstructure_with_path
1111

1212
## Uncomment below to change the default test settings
1313
# ENV["FLUX_TEST_AMDGPU"] = "true"
14-
ENV["FLUX_TEST_CUDA"] = "true"
14+
# ENV["FLUX_TEST_CUDA"] = "true"
1515
# ENV["FLUX_TEST_METAL"] = "true"
16-
ENV["FLUX_TEST_CPU"] = "false"
16+
# ENV["FLUX_TEST_CPU"] = "false"
1717
# ENV["FLUX_TEST_DISTRIBUTED_MPI"] = "true"
1818
# ENV["FLUX_TEST_DISTRIBUTED_NCCL"] = "true"
1919
ENV["FLUX_TEST_ENZYME"] = "false" # We temporarily disable Enzyme tests since they are failing

test/train.jl

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -155,18 +155,15 @@ for (trainfn!, name) in ((Flux.train!, "Zygote"), (train_enzyme!, "Enzyme"))
155155
pen2(x::AbstractArray) = sum(abs2, x)/2
156156
opt = Flux.setup(Adam(0.1), model)
157157

158-
@test begin
159-
trainfn!(model, data, opt) do m, x, y
160-
err = Flux.mse(m(x), y)
161-
l2 = sum(pen2, Flux.params(m))
162-
err + 0.33 * l2
163-
end
164-
165-
diff2 = model.weight .- init_weight
166-
@test diff1 diff2
167-
168-
true
169-
end broken = VERSION >= v"1.11"
158+
trainfn!(model, data, opt) do m, x, y
159+
err = Flux.mse(m(x), y)
160+
l2 = sum(pen2, Flux.params(m))
161+
err + 0.33 * l2
162+
end
163+
164+
diff2 = model.weight .- init_weight
165+
@test diff1 diff2
166+
170167
end
171168

172169
# Take 3: using WeightDecay instead. Need the /2 above, to match exactly.

test/utils.jl

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -273,13 +273,11 @@ end
273273
@testset "params gradient" begin
274274
m = (x=[1,2.0], y=[3.0]);
275275

276-
@test begin
277-
# Explicit -- was broken by #2054 / then fixed / now broken again on julia v1.11
278-
gnew = gradient(m -> (sum(norm, Flux.params(m))), m)[1]
279-
@test gnew.x [0.4472135954999579, 0.8944271909999159]
280-
@test gnew.y [1.0]
281-
true
282-
end broken = VERSION >= v"1.11"
276+
# Explicit -- was broken by #2054 / then fixed / now broken again on julia v1.11
277+
gnew = gradient(m -> (sum(norm, Flux.params(m))), m)[1]
278+
@test gnew.x [0.4472135954999579, 0.8944271909999159]
279+
@test gnew.y [1.0]
280+
283281

284282
# Implicit
285283
gold = gradient(() -> (sum(norm, Flux.params(m))), Flux.params(m))

0 commit comments

Comments
 (0)