|
53 | 53 | # Test NaN / Inf early stop
|
54 | 54 | # Test that loss is returned
|
55 | 55 | end
|
56 |
| - |
57 |
| -import Tracker |
58 |
| -Flux.@train_autodiff Tracker |
59 |
| - |
60 |
| -@testset "Explicit Flux.train! with Tracker" begin |
61 |
| - Random.seed!(84) |
62 |
| - w = randn(10, 10) |
63 |
| - w2 = randn(10, 10) # NB outside the inner @testset, else it will be exactly == w, as the RNG seed is reset. |
64 |
| - @testset for rule in [Descent(0.1), Adam(), AdamW()] |
65 |
| - |
66 |
| - loss(m, x) = begin |
67 |
| - Flux.istraining() && error("This test is not in fact using Tracker!") |
68 |
| - Flux.Losses.mse(w*x, m.weight*x .+ m.bias) |
69 |
| - end |
70 |
| - model = (weight=copy(w2), bias=zeros(10), ignore=nothing) |
71 |
| - @test loss(model, rand(10, 10)) > 1 |
72 |
| - |
73 |
| - opt = Flux.setup(rule, model) |
74 |
| - Flux.train!(loss, model, ((rand(10),) for _ in 1: 10^5), opt) |
75 |
| - @test loss(model, rand(10, 10)) < 0.01 |
76 |
| - end |
77 |
| - |
78 |
| - # Test 3-arg `Flux.train!` method: |
79 |
| - @testset for rule in [Descent(0.1), Adam()] |
80 |
| - |
81 |
| - loss(m) = let x = rand(10) |
82 |
| - Flux.istraining() && error("This test is not in fact using Tracker!") |
83 |
| - Flux.Losses.mse(w*x, m.weight*x .+ m.bias) |
84 |
| - end |
85 |
| - model = (weight=copy(w2), bias=zeros(10), ignore=nothing) |
86 |
| - @test loss(model) > 1 |
87 |
| - |
88 |
| - opt = Flux.setup(rule, model) |
89 |
| - for i in 1:10^5 |
90 |
| - Flux.train!(loss, model, opt) |
91 |
| - end |
92 |
| - @test loss(model) < 0.01 |
93 |
| - end |
94 |
| -end |
95 |
| - |
96 |
| -import Yota |
97 |
| -Flux.@train_autodiff Yota |
98 |
| - |
99 |
| -@testset "Explicit Flux.train! with Yota" begin |
100 |
| - Random.seed!(84) |
101 |
| - w = randn(10, 10) |
102 |
| - w2 = randn(10, 10) # NB outside the inner @testset, else it will be exactly == w, as the RNG seed is reset. |
103 |
| - @testset for rule in [Descent(0.1), Adam(), AdamW()] |
104 |
| - |
105 |
| - loss(m, x) = Flux.Losses.mse(w*x, m.weight*x .+ m.bias) |
106 |
| - model = (weight=copy(w2), bias=zeros(10), ignore=nothing) |
107 |
| - @test loss(model, rand(10, 10)) > 1 |
108 |
| - |
109 |
| - opt = Flux.setup(rule, model) |
110 |
| - Flux.train!(loss, model, ((rand(10),) for _ in 1: 10^5), opt) |
111 |
| - @test loss(model, rand(10, 10)) < 0.01 |
112 |
| - end |
113 |
| - |
114 |
| - # Test 3-arg `Flux.train!` method: |
115 |
| - @testset for rule in [Descent(0.1), Adam()] |
116 |
| - |
117 |
| - loss(m) = let x = rand(10) |
118 |
| - Flux.Losses.mse(w*x, m.weight*x .+ m.bias) |
119 |
| - end |
120 |
| - model = (weight=copy(w2), bias=zeros(10), ignore=nothing) |
121 |
| - @test loss(model) > 1 |
122 |
| - |
123 |
| - opt = Flux.setup(rule, model) |
124 |
| - for i in 1:10^5 |
125 |
| - Flux.train!(loss, model, opt) |
126 |
| - end |
127 |
| - @test loss(model) < 0.01 |
128 |
| - end |
129 |
| -end |
130 |
| - |
131 |
| -Flux.@train_autodiff Zygote |
0 commit comments