From 29ab7f8d20f53fc06faa7ea0b8f8cbc60476028b Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Sun, 10 Dec 2023 22:44:09 -0800 Subject: [PATCH 01/21] Switch to (Re)TestItems --- .buildkite/pipeline.yml | 2 +- .github/workflows/CI.yml | 53 ++++++------ test/Project.toml | 13 ++- test/{convnets.jl => convnet_tests.jl} | 109 ++++++++++--------------- test/mixer_tests.jl | 26 ++++++ test/mixers.jl | 8 -- test/model_tests.jl | 78 ++++++++++++++++++ test/runtests.jl | 89 ++------------------ test/vit_tests.jl | 8 ++ test/vits.jl | 8 -- 10 files changed, 203 insertions(+), 191 deletions(-) rename test/{convnets.jl => convnet_tests.jl} (82%) create mode 100644 test/mixer_tests.jl delete mode 100644 test/mixers.jl create mode 100644 test/model_tests.jl create mode 100644 test/vit_tests.jl delete mode 100644 test/vits.jl diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d24cb7637..54024e34d 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -13,7 +13,7 @@ steps: queue: "juliagpu" cuda: "*" env: - GROUP: "CUDA" + GROUP: "CUDA" # TODO there are zero tests under this group if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 53d936c62..a5ac46ac2 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -24,6 +24,8 @@ jobs: test: name: ${{ matrix.suite }} - Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} runs-on: ${{ matrix.os }} + env: + GROUP: ${{ matrix.suite }} strategy: fail-fast: false matrix: @@ -37,18 +39,19 @@ jobs: arch: - x64 suite: - - '["AlexNet", "VGG"]' - - '["GoogLeNet", "SqueezeNet", "MobileNets"]' - - '"EfficientNet"' - - 'r"/*/ResNet*"' - - 'r"/*/SEResNet*"' - - '[r"Res2Net", r"Res2NeXt"]' - - '"Inception"' - - '"DenseNet"' - - '"UNet"' - - '["ConvNeXt", "ConvMixer"]' - - 'r"Mixers"' - - 'r"ViTs"' + - 'AlexNet|VGG' + - 'GoogLeNet|SqueezeNet|MobileNet|MNASNet' + - 'EfficientNet' + - 'ResNet|WideResNet' + - 'ResNeXt' # split off from ResNet to reduce overall runtime + - 'SEResNet|SEResNeXt' + - 'Res2Net|Res2NeXt' + - 'Inception' + - 'DenseNet' + - 'UNet' + - 'ConvNeXt|ConvMixer' + - 'Mixers' + - 'ViTs' steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@v1 @@ -57,20 +60,14 @@ jobs: arch: ${{ matrix.arch }} - uses: julia-actions/cache@v1 - uses: julia-actions/julia-buildpkg@v1 - - name: "Setup environment" - run: | - julia --project=./test -e 'using Pkg; Pkg.develop(path = ".")' - - name: "Run tests + coverage" - if: matrix.version == '1' && matrix.os == 'ubuntu-latest' - run: | - julia --code-coverage=user --color=yes --depwarn=yes --project=./test -e 'include("test/retest.jl"); retest(${{ matrix.suite }})' - shell: bash - - name: "Run tests only" - if: ${{ !(matrix.version == '1' && matrix.os == 'ubuntu-latest') }} - run: | - julia --color=yes --depwarn=yes --project=./test -e 'include("test/retest.jl"); retest(${{ matrix.suite }})' - continue-on-error: ${{ matrix.version == 'nightly' }} - shell: bash + + - name: Run tests + uses: julia-actions/julia-runtest@v1 + continue-on-error: ${{ !(matrix.version == '1' && matrix.os == 'ubuntu-latest') && matrix.version == 'nightly' }} + with: + coverage: ${{ matrix.version == '1' && matrix.os == 'ubuntu-latest' }} + # run: | + # julia --color=yes --depwarn=yes --project=./test -e 'include("test/retest.jl"); retest(${{ matrix.suite }})' - uses: actions/upload-artifact@v3 with: name: coverage-${{ hashFiles('**/*.cov') }} @@ -78,7 +75,7 @@ jobs: if: matrix.version == '1' && matrix.os == 'ubuntu-latest' coverage: - name: "Coverage" + name: Coverage runs-on: ubuntu-latest needs: test steps: @@ -94,7 +91,7 @@ jobs: cp -r coverage-*/* . rm -rf coverage-* - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v2 + - uses: codecov/codecov-action@v3 with: file: lcov.info diff --git a/test/Project.toml b/test/Project.toml index b121720e7..da890733e 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,5 +2,16 @@ CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" -ReTest = "e0db7c4e-2690-44b9-bad6-7687da720f89" +ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TestItems = "1c621080-faea-4a02-84b6-bbd5e436b8fe" +cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" + +[compat] +CUDA = "4, 5" +Flux = "0.13.16, 0.14" +Images = "0.26" +Test = "1" +TestItems = "0.1" +ReTestItems = "1" +cuDNN = "1" diff --git a/test/convnets.jl b/test/convnet_tests.jl similarity index 82% rename from test/convnets.jl rename to test/convnet_tests.jl index 774a44b25..6ea61d7ed 100644 --- a/test/convnets.jl +++ b/test/convnet_tests.jl @@ -1,4 +1,4 @@ -@testset "AlexNet" begin +@testitem "AlexNet" setup=[TestModels] begin model = AlexNet() @test size(model(x_256)) == (1000, 1) @test_throws ArgumentError AlexNet(pretrain = true) @@ -6,7 +6,7 @@ _gc() end -@testset "VGG" begin +@testitem "VGG" setup=[TestModels] begin @testset "VGG($sz, batchnorm=$bn)" for sz in [11, 13, 16, 19], bn in [true, false] m = VGG(sz, batchnorm = bn) @test size(m(x_224)) == (1000, 1) @@ -20,7 +20,7 @@ end end end -@testset "ResNet" begin +@testitem "ResNet" setup=[TestModels] begin # Tests for pretrained ResNets @testset "ResNet($sz)" for sz in [18, 34, 50, 101, 152] m = ResNet(sz) @@ -58,7 +58,7 @@ end end -@testset "WideResNet" begin +@testitem "WideResNet" setup=[TestModels] begin @testset "WideResNet($sz)" for sz in [50, 101] m = WideResNet(sz) @test size(m(x_224)) == (1000, 1) @@ -72,7 +72,7 @@ end end end -@testset "ResNeXt" begin +@testitem "ResNeXt" setup=[TestModels] begin @testset for depth in [50, 101, 152] @testset for cardinality in [32, 64] @testset for base_width in [4, 8] @@ -90,7 +90,7 @@ end end end -@testset "SEResNet" begin +@testitem "SEResNet" setup=[TestModels] begin @testset for depth in [18, 34, 50, 101, 152] m = SEResNet(depth) @test size(m(x_224)) == (1000, 1) @@ -104,7 +104,7 @@ end end end -@testset "SEResNeXt" begin +@testitem "SEResNeXt" setup=[TestModels] begin @testset for depth in [50, 101, 152] @testset for cardinality in [32, 64] @testset for base_width in [4, 8] @@ -122,7 +122,7 @@ end end end -@testset "Res2Net" begin +@testitem "Res2Net" setup=[TestModels] begin @testset for (base_width, scale) in [(26, 4), (48, 2), (14, 8), (26, 6), (26, 8)] m = Res2Net(50; base_width, scale) @test size(m(x_224)) == (1000, 1) @@ -147,7 +147,7 @@ end end end -@testset "Res2NeXt" begin +@testitem "Res2NeXt" setup=[TestModels] begin @testset for depth in [50, 101] m = Res2NeXt(depth) @test size(m(x_224)) == (1000, 1) @@ -161,7 +161,7 @@ end end end -@testset "EfficientNet" begin +@testitem "EfficientNet" setup=[TestModels] begin @testset "EfficientNet($config)" for config in [:b0, :b1, :b2, :b3, :b4, :b5,] #:b6, :b7, :b8] # preferred image resolution scaling r = Metalhead.EFFICIENTNET_GLOBAL_CONFIGS[config][1] @@ -178,7 +178,7 @@ end end end -@testset "EfficientNetv2" begin +@testitem "EfficientNetv2" setup=[TestModels] begin @testset for config in [:small, :medium, :large] # :xlarge] m = EfficientNetv2(config) @test size(m(x_224)) == (1000, 1) @@ -192,7 +192,7 @@ end end end -@testset "GoogLeNet" begin +@testitem "GoogLeNet" setup=[TestModels] begin @testset for bn in [true, false] m = GoogLeNet(batchnorm = bn) @test size(m(x_224)) == (1000, 1) @@ -206,55 +206,22 @@ end end end -@testset "Inception" begin +@testitem "Inception" setup=[TestModels] begin x_299 = rand(Float32, 299, 299, 3, 2) - @testset "Inceptionv3" begin - m = Inceptionv3() + @testset "$Model" for Model in [Inceptionv3, Inceptionv4, InceptionResNetv2, Xception] + m = Model() @test size(m(x_299)) == (1000, 2) - if Inceptionv3 in PRETRAINED_MODELS - @test acctest(Inceptionv3(pretrain = true)) + if Model in PRETRAINED_MODELS + @test acctest(Model(pretrain = true)) else - @test_throws ArgumentError Inceptionv3(pretrain = true) - end - @test gradtest(m, x_299) - end - _gc() - @testset "Inceptionv4" begin - m = Inceptionv4() - @test size(m(x_299)) == (1000, 2) - if Inceptionv4 in PRETRAINED_MODELS - @test acctest(Inceptionv4(pretrain = true)) - else - @test_throws ArgumentError Inceptionv4(pretrain = true) - end - @test gradtest(m, x_299) - end - _gc() - @testset "InceptionResNetv2" begin - m = InceptionResNetv2() - @test size(m(x_299)) == (1000, 2) - if InceptionResNetv2 in PRETRAINED_MODELS - @test acctest(InceptionResNetv2(pretrain = true)) - else - @test_throws ArgumentError InceptionResNetv2(pretrain = true) - end - @test gradtest(m, x_299) - end - _gc() - @testset "Xception" begin - m = Xception() - @test size(m(x_299)) == (1000, 2) - if Xception in PRETRAINED_MODELS - @test acctest(Xception(pretrain = true)) - else - @test_throws ArgumentError Xception(pretrain = true) + @test_throws ArgumentError Model(pretrain = true) end @test gradtest(m, x_299) + _gc() end - _gc() end -@testset "SqueezeNet" begin +@testitem "SqueezeNet" setup=[TestModels] begin m = SqueezeNet() @test size(m(x_224)) == (1000, 1) if SqueezeNet in PRETRAINED_MODELS @@ -266,7 +233,7 @@ end _gc() end -@testset "DenseNet" begin +@testitem "DenseNet" setup=[TestModels] begin @testset for sz in [121, 161, 169, 201] m = DenseNet(sz) @test size(m(x_224)) == (1000, 1) @@ -280,8 +247,13 @@ end end end -@testset "MobileNets (width = $width_mult)" for width_mult in [0.5, 0.75, 1, 1.3] - @testset "MobileNetv1" begin +@testsetup module TestMobileNets + export WIDTH_MULTS + const WIDTH_MULTS = [0.5, 0.75, 1.0, 1.3] +end + +@testitem "MobileNetsV1" setup=[TestModels, TestMobileNets] begin + @testset for width_mult in WIDTH_MULTS m = MobileNetv1(width_mult) @test size(m(x_224)) == (1000, 1) if (MobileNetv1, width_mult) in PRETRAINED_MODELS @@ -290,9 +262,12 @@ end @test_throws ArgumentError MobileNetv1(pretrain = true) end @test gradtest(m, x_224) + _gc() end - _gc() - @testset "MobileNetv2" begin +end + +@testitem "MobileNetv2" setup=[TestModels, TestMobileNets] begin + @testset for width_mult in WIDTH_MULTS m = MobileNetv2(width_mult) @test size(m(x_224)) == (1000, 1) if (MobileNetv2, width_mult) in PRETRAINED_MODELS @@ -302,8 +277,11 @@ end end @test gradtest(m, x_224) end - _gc() - @testset "MobileNetv3" verbose = true begin +end + + +@testitem "MobileNetv3" setup=[TestModels, TestMobileNets] begin + @testset for width_mult in WIDTH_MULTS @testset for config in [:small, :large] m = MobileNetv3(config; width_mult) @test size(m(x_224)) == (1000, 1) @@ -316,7 +294,10 @@ end _gc() end end - @testset "MNASNet" verbose = true begin +end + +@testitem "MNASNet" setup=[TestModels, TestMobileNets] begin + @testset for width in WIDTH_MULTS @testset for config in [:A1, :B1] m = MNASNet(config; width_mult) @test size(m(x_224)) == (1000, 1) @@ -331,7 +312,7 @@ end end end -@testset "ConvNeXt" verbose = true begin +@testitem "ConvNeXt" setup=[TestModels] begin @testset for config in [:small, :base, :large, :tiny, :xlarge] m = ConvNeXt(config) @test size(m(x_224)) == (1000, 1) @@ -340,7 +321,7 @@ end end end -@testset "ConvMixer" verbose = true begin +@testitem "ConvMixer" setup=[TestModels] begin @testset for config in [:small, :base, :large] m = ConvMixer(config) @test size(m(x_224)) == (1000, 1) @@ -349,7 +330,7 @@ end end end -@testset "UNet" begin +@testitem "UNet" setup=[TestModels] begin encoder = Metalhead.backbone(ResNet(18)) model = UNet((256, 256), 3, 10, encoder) @test size(model(x_256)) == (256, 256, 10, 1) diff --git a/test/mixer_tests.jl b/test/mixer_tests.jl new file mode 100644 index 000000000..dd52d5aa9 --- /dev/null +++ b/test/mixer_tests.jl @@ -0,0 +1,26 @@ +@testitem "MLP-Mixer" setup=[TestModels] begin + @testset for config in [:small, :base, :large] + m, x = MLPMixer(config), x_224 + @test size(m(x)) == (1000, 1) + @test gradtest(m, x) + _gc() + end +end + +@testitem "ResMLP" setup=[TestModels] begin + @testset for config in [:small, :base, :large] + m, x = ResMLP(config), x_224 + @test size(m(x)) == (1000, 1) + @test gradtest(m, x) + _gc() + end +end + +@testitem "gMLP" setup=[TestModels] begin + @testset for config in [:small, :base, :large] + m, x = gMLP(config), x_224 + @test size(m(x)) == (1000, 1) + @test gradtest(m, x) + _gc() + end +end \ No newline at end of file diff --git a/test/mixers.jl b/test/mixers.jl deleted file mode 100644 index 2a5d9af70..000000000 --- a/test/mixers.jl +++ /dev/null @@ -1,8 +0,0 @@ -@testset for model in [MLPMixer, ResMLP, gMLP] - @testset for config in [:small, :base, :large] - m = model(config) - @test size(m(x_224)) == (1000, 1) - @test gradtest(m, x_224) - _gc() - end -end diff --git a/test/model_tests.jl b/test/model_tests.jl new file mode 100644 index 000000000..886909559 --- /dev/null +++ b/test/model_tests.jl @@ -0,0 +1,78 @@ +@testsetup module TestModels +using Metalhead, Images +using Flux: Zygote + +export PRETRAINED_MODELS, + _gc, + gradtest, + normalize_imagenet, + TEST_PATH, + TEST_IMG, + TEST_X, + TEST_LBLS, + acctest, + x_224, + x_256 + +const PRETRAINED_MODELS = [ + # (DenseNet, 121), + # (DenseNet, 161), + # (DenseNet, 169), + # (DenseNet, 201), + (ResNet, 18), + (ResNet, 34), + (ResNet, 50), + (ResNet, 101), + (ResNet, 152), + (ResNeXt, 50, 32, 4), + (ResNeXt, 101, 64, 4), + (ResNeXt, 101, 32, 8), + SqueezeNet, + (WideResNet, 50), + (WideResNet, 101), + (ViT, :base, (16, 16)), + (ViT, :base, (32, 32)), + (ViT, :large, (16, 16)), + (ViT, :large, (32, 32)), + (VGG, 11, false), + (VGG, 13, false), + (VGG, 16, false), + (VGG, 19, false), +] + +function _gc() + GC.safepoint() + return GC.gc(true) +end + +function gradtest(model, input) + y, pb = Zygote.pullback(model, input) + pb(ones(Float32, size(y))) + # if we make it to here with no error, success! + return true +end + +function normalize_imagenet(data) + cmean = reshape(Float32[0.485, 0.456, 0.406], (1, 1, 3, 1)) + cstd = reshape(Float32[0.229, 0.224, 0.225], (1, 1, 3, 1)) + return (data .- cmean) ./ cstd +end + +# test image +const TEST_PATH = download("https://cdn.pixabay.com/photo/2015/05/07/11/02/guitar-756326_960_720.jpg") +const TEST_IMG = imresize(Images.load(TEST_PATH), (224, 224)) +# CHW -> WHC +const TEST_X = permutedims(convert(Array{Float32}, channelview(TEST_IMG)), (3, 2, 1)) |> normalize_imagenet + +# ImageNet labels +const TEST_LBLS = readlines(download("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")) + +function acctest(model) + ypred = model(TEST_X) |> vec + top5 = TEST_LBLS[sortperm(ypred; rev = true)] + return "acoustic guitar" in top5 +end + +const x_224 = rand(Float32, 224, 224, 3, 1) +const x_256 = rand(Float32, 256, 256, 3, 1) +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 603dac280..890c10152 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,81 +1,8 @@ -using Test, Metalhead -using Flux -using Flux: Zygote -using Images - -const PRETRAINED_MODELS = [ - # (DenseNet, 121), - # (DenseNet, 161), - # (DenseNet, 169), - # (DenseNet, 201), - (ResNet, 18), - (ResNet, 34), - (ResNet, 50), - (ResNet, 101), - (ResNet, 152), - (ResNeXt, 50, 32, 4), - (ResNeXt, 101, 64, 4), - (ResNeXt, 101, 32, 8), - SqueezeNet, - (WideResNet, 50), - (WideResNet, 101), - (ViT, :base, (16, 16)), - (ViT, :base, (32, 32)), - (ViT, :large, (16, 16)), - (ViT, :large, (32, 32)), - (VGG, 11, false), - (VGG, 13, false), - (VGG, 16, false), - (VGG, 19, false), -] - -function _gc() - GC.safepoint() - GC.gc(true) -end - -function gradtest(model, input) - y, pb = Zygote.pullback(() -> model(input), Flux.params(model)) - gs = pb(ones(Float32, size(y))) - # if we make it to here with no error, success! - return true -end - -function normalize_imagenet(data) - cmean = reshape(Float32[0.485, 0.456, 0.406], (1, 1, 3, 1)) - cstd = reshape(Float32[0.229, 0.224, 0.225], (1, 1, 3, 1)) - return (data .- cmean) ./ cstd -end - -# test image -const TEST_PATH = download("https://cdn.pixabay.com/photo/2015/05/07/11/02/guitar-756326_960_720.jpg") -const TEST_IMG = imresize(Images.load(TEST_PATH), (224, 224)) -# CHW -> WHC -const TEST_X = permutedims(convert(Array{Float32}, channelview(TEST_IMG)), (3, 2, 1)) |> normalize_imagenet - -# ImageNet labels -const TEST_LBLS = readlines(download("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")) - -function acctest(model) - ypred = model(TEST_X) |> vec - top5 = TEST_LBLS[sortperm(ypred; rev = true)] - return "acoustic guitar" in top5 -end - -x_224 = rand(Float32, 224, 224, 3, 1) -x_256 = rand(Float32, 256, 256, 3, 1) - -# CNN tests -@testset verbose = true "ConvNets" begin - include("convnets.jl") -end - -# Mixer tests -@testset verbose = true "Mixers" begin - include("mixers.jl") -end - -# ViT tests -@testset verbose = true "ViTs" begin - include("vits.jl") -end +using TestItems, ReTestItems +using Metalhead: Metalhead + +# TODO account for GPU tests using name or tag filter +# TODO write GPU tests! +test_group = get(ENV, "GROUP", "All") +name_filter = test_group == "All" ? nothing : Regex(test_group) +ReTestItems.runtests(Metalhead; name = name_filter) \ No newline at end of file diff --git a/test/vit_tests.jl b/test/vit_tests.jl new file mode 100644 index 000000000..e38d1fb8a --- /dev/null +++ b/test/vit_tests.jl @@ -0,0 +1,8 @@ +@testitem "ViT" setup=[TestModels] begin + @testset for config in [:tiny, :small, :base, :large, :huge] # :giant, :gigantic] + m = ViT(config) + @test size(m(x_224)) == (1000, 1) + @test gradtest(m, x_224) + _gc() + end +end diff --git a/test/vits.jl b/test/vits.jl deleted file mode 100644 index 76a606bc9..000000000 --- a/test/vits.jl +++ /dev/null @@ -1,8 +0,0 @@ -@testset "ViT" begin - for config in [:tiny, :small, :base, :large, :huge] # :giant, :gigantic] - m = ViT(config) - @test size(m(x_224)) == (1000, 1) - @test gradtest(m, x_224) - _gc() - end -end From 9904b41c0856567d8285bc6d282afc1f6679ac15 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 11 Dec 2023 15:42:46 -0800 Subject: [PATCH 02/21] Fixup deps and use TestItemRunner on 1.6 --- test/Project.toml | 6 +++--- test/runtests.jl | 21 +++++++++++++++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/test/Project.toml b/test/Project.toml index da890733e..9775f9b88 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -4,14 +4,14 @@ Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -TestItems = "1c621080-faea-4a02-84b6-bbd5e436b8fe" +TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" [compat] CUDA = "4, 5" Flux = "0.13.16, 0.14" Images = "0.26" -Test = "1" -TestItems = "0.1" ReTestItems = "1" +Test = "1" +TestItemRunner = "0.2" cuDNN = "1" diff --git a/test/runtests.jl b/test/runtests.jl index 890c10152..b4e3c626e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,8 +1,21 @@ -using TestItems, ReTestItems using Metalhead: Metalhead # TODO account for GPU tests using name or tag filter # TODO write GPU tests! -test_group = get(ENV, "GROUP", "All") -name_filter = test_group == "All" ? nothing : Regex(test_group) -ReTestItems.runtests(Metalhead; name = name_filter) \ No newline at end of file +const test_group = get(ENV, "GROUP", "All") +const name_filter = test_group == "All" ? nothing : Regex(test_group) + +@static if VERSION >= v"1.7" + using ReTestItems + runtests(Metalhead; name = name_filter) +else + using TestItemRunner + function testitem_filter(ti) + return name_filter === nothing || match(name_filter, ti.name) !== nothing + end +end + +# Not sure why this needs to be split into a separate conditional... +@static if VERSION < v"1.7" + @run_package_tests filter=testitem_filter +end \ No newline at end of file From 61abd239f8557461ce93f2d30b7643450e32aace Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 11 Dec 2023 15:48:39 -0800 Subject: [PATCH 03/21] Remove incompatible dep on 1.6 CI --- .buildkite/pipeline.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 54024e34d..5471c98e7 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -12,6 +12,12 @@ steps: agents: queue: "juliagpu" cuda: "*" + commands: | + julia --project=test -e ' + if VERSION < v"1.7" + using Pkg + Pkg.rm("ReTestItems") # not compatible with 1.6 + end' env: GROUP: "CUDA" # TODO there are zero tests under this group if: build.message !~ /\[skip tests\]/ From 7b671240b5245f1cf5eebd569e3f147e9db54a2c Mon Sep 17 00:00:00 2001 From: Kyle Daruwalla Date: Mon, 11 Dec 2023 19:47:37 -0500 Subject: [PATCH 04/21] Update convnet_tests.jl --- test/convnet_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 6ea61d7ed..fb7acb9dc 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -297,7 +297,7 @@ end end @testitem "MNASNet" setup=[TestModels, TestMobileNets] begin - @testset for width in WIDTH_MULTS + @testset for width_mult in WIDTH_MULTS @testset for config in [:A1, :B1] m = MNASNet(config; width_mult) @test size(m(x_224)) == (1000, 1) From 2eac7f48a28b8438140ab11da1225fa867a57b1f Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 11 Dec 2023 20:22:13 -0800 Subject: [PATCH 05/21] Add GPU path, fast path, parallelism and 1.6 --- .buildkite/pipeline.yml | 4 +- .github/workflows/CI.yml | 10 +- test/convnet_tests.jl | 225 ++++++++++++++++++++------------------- test/mixer_tests.jl | 27 ++--- test/model_tests.jl | 21 ++-- test/runtests.jl | 6 +- test/vit_tests.jl | 5 +- 7 files changed, 162 insertions(+), 136 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 5471c98e7..acae78525 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -19,7 +19,9 @@ steps: Pkg.rm("ReTestItems") # not compatible with 1.6 end' env: - GROUP: "CUDA" # TODO there are zero tests under this group + GROUP: "All" + TEST_FAST: true + TEST_WORKERS: 4 if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index a5ac46ac2..33105f524 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -26,11 +26,13 @@ jobs: runs-on: ${{ matrix.os }} env: GROUP: ${{ matrix.suite }} + TEST_FAST: ${{ matrix.version != '1' || matrix.os != 'ubuntu-latest' }} strategy: fail-fast: false matrix: version: - - '1' # Replace this with the minimum Julia version that your package supports. + - '1.6' # Replace this with the minimum Julia version that your package supports. + - '1' - 'nightly' os: - ubuntu-latest @@ -61,13 +63,15 @@ jobs: - uses: julia-actions/cache@v1 - uses: julia-actions/julia-buildpkg@v1 + - name: Setup test env for 1.6 + if: ${{ matrix.version == '1.6' }} + run: | + julia --color=yes --depwarn=yes --project=./test -e 'using Pkg; Pkg.rm("ReTestItems")' - name: Run tests uses: julia-actions/julia-runtest@v1 continue-on-error: ${{ !(matrix.version == '1' && matrix.os == 'ubuntu-latest') && matrix.version == 'nightly' }} with: coverage: ${{ matrix.version == '1' && matrix.os == 'ubuntu-latest' }} - # run: | - # julia --color=yes --depwarn=yes --project=./test -e 'include("test/retest.jl"); retest(${{ matrix.suite }})' - uses: actions/upload-artifact@v3 with: name: coverage-${{ hashFiles('**/*.cov') }} diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index fb7acb9dc..490691952 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -1,5 +1,5 @@ @testitem "AlexNet" setup=[TestModels] begin - model = AlexNet() + model = AlexNet() |> gpu @test size(model(x_256)) == (1000, 1) @test_throws ArgumentError AlexNet(pretrain = true) @test gradtest(model, x_256) @@ -7,11 +7,12 @@ end @testitem "VGG" setup=[TestModels] begin - @testset "VGG($sz, batchnorm=$bn)" for sz in [11, 13, 16, 19], bn in [true, false] - m = VGG(sz, batchnorm = bn) + sizes = TEST_FAST ? [11] : [11, 13, 16, 19] + @testset "VGG($sz, batchnorm=$bn)" for sz in sizes, bn in [true, false] + m = VGG(sz; batchnorm = bn) |> gpu @test size(m(x_224)) == (1000, 1) if (VGG, sz, bn) in PRETRAINED_MODELS - @test acctest(VGG(sz, batchnorm = bn, pretrain = true)) + @test acctest(VGG(sz; batchnorm = bn, pretrain = true)) else @test_throws ArgumentError VGG(sz, batchnorm = bn, pretrain = true) end @@ -22,11 +23,12 @@ end @testitem "ResNet" setup=[TestModels] begin # Tests for pretrained ResNets - @testset "ResNet($sz)" for sz in [18, 34, 50, 101, 152] - m = ResNet(sz) + sizes = TEST_FAST ? [18] : [18, 34, 50, 101, 152] + @testset "ResNet($sz)" for sz in sizes + m = ResNet(sz) |> gpu @test size(m(x_224)) == (1000, 1) if (ResNet, sz) in PRETRAINED_MODELS - @test acctest(ResNet(sz, pretrain = true)) + @test acctest(ResNet(sz; pretrain = true)) else @test_throws ArgumentError ResNet(sz, pretrain = true) end @@ -38,7 +40,7 @@ end [2, 2, 2, 2], [3, 4, 6, 3], [3, 4, 23, 3], - [3, 8, 36, 3] + [3, 8, 36, 3], ] @testset for layers in layer_list drop_list = [ @@ -57,15 +59,15 @@ end end end - @testitem "WideResNet" setup=[TestModels] begin - @testset "WideResNet($sz)" for sz in [50, 101] - m = WideResNet(sz) + sizes = TEST_FAST ? [50] : [50, 101] + @testset "WideResNet($sz)" for sz in sizes + m = WideResNet(sz) |> gpu @test size(m(x_224)) == (1000, 1) @test gradtest(m, x_224) _gc() if (WideResNet, sz) in PRETRAINED_MODELS - @test acctest(WideResNet(sz, pretrain = true)) + @test acctest(WideResNet(sz; pretrain = true)) else @test_throws ArgumentError WideResNet(sz, pretrain = true) end @@ -73,29 +75,32 @@ end end @testitem "ResNeXt" setup=[TestModels] begin - @testset for depth in [50, 101, 152] - @testset for cardinality in [32, 64] - @testset for base_width in [4, 8] - m = ResNeXt(depth; cardinality, base_width) - @test size(m(x_224)) == (1000, 1) - if (ResNeXt, depth, cardinality, base_width) in PRETRAINED_MODELS - @test acctest(ResNeXt(depth; cardinality, base_width, pretrain = true)) - else - @test_throws ArgumentError ResNeXt(depth; cardinality, base_width, pretrain = true) - end - @test gradtest(m, x_224) - _gc() - end + depths = TEST_FAST ? [50] : [50, 101, 152] + cardinalities = TEST_FAST ? [32] : [32, 64] + base_widths = TEST_FAST ? [4] : [4, 8] + @testset for depth in depths, cardinality in cardinalities, base_width in base_widths + m = ResNeXt(depth; cardinality, base_width) |> gpu + @test size(m(x_224)) == (1000, 1) + if (ResNeXt, depth, cardinality, base_width) in PRETRAINED_MODELS + @test acctest(ResNeXt(depth; cardinality, base_width, pretrain = true)) + else + @test_throws ArgumentError ResNeXt(depth; + cardinality, + base_width, + pretrain = true) end + @test gradtest(m, x_224) + _gc() end end @testitem "SEResNet" setup=[TestModels] begin - @testset for depth in [18, 34, 50, 101, 152] - m = SEResNet(depth) + depths = TEST_FAST ? [18] : [18, 34, 50, 101, 152] + @testset for depth in depths + m = SEResNet(depth) |> gpu @test size(m(x_224)) == (1000, 1) if (SEResNet, depth) in PRETRAINED_MODELS - @test acctest(SEResNet(depth, pretrain = true)) + @test acctest(SEResNet(depth; pretrain = true)) else @test_throws ArgumentError SEResNet(depth, pretrain = true) end @@ -105,26 +110,26 @@ end end @testitem "SEResNeXt" setup=[TestModels] begin - @testset for depth in [50, 101, 152] - @testset for cardinality in [32, 64] - @testset for base_width in [4, 8] - m = SEResNeXt(depth; cardinality, base_width) - @test size(m(x_224)) == (1000, 1) - if (SEResNeXt, depth, cardinality, base_width) in PRETRAINED_MODELS - @test acctest(SEResNeXt(depth, pretrain = true)) - else - @test_throws ArgumentError SEResNeXt(depth, pretrain = true) - end - @test gradtest(m, x_224) - _gc() - end + depths = TEST_FAST ? [50] : [50, 101, 152] + cardinalities = TEST_FAST ? [32] : [32, 64] + base_widths = TEST_FAST ? [4] : [4, 8] + @testset for depth in depths, cardinality in cardinalities, base_width in base_widths + m = SEResNeXt(depth; cardinality, base_width) |> gpu + @test size(m(x_224)) == (1000, 1) + if (SEResNeXt, depth, cardinality, base_width) in PRETRAINED_MODELS + @test acctest(SEResNeXt(depth; pretrain = true)) + else + @test_throws ArgumentError SEResNeXt(depth, pretrain = true) end + @test gradtest(m, x_224) + _gc() end end @testitem "Res2Net" setup=[TestModels] begin - @testset for (base_width, scale) in [(26, 4), (48, 2), (14, 8), (26, 6), (26, 8)] - m = Res2Net(50; base_width, scale) + configs = TEST_FAST ? [(26, 4)] : [(26, 4), (48, 2), (14, 8), (26, 6), (26, 8)] + @testset for (base_width, scale) in configs + m = Res2Net(50; base_width, scale) |> gpu @test size(m(x_224)) == (1000, 1) if (Res2Net, 50, base_width, scale) in PRETRAINED_MODELS @test acctest(Res2Net(50; base_width, scale, pretrain = true)) @@ -134,25 +139,28 @@ end @test gradtest(m, x_224) _gc() end - @testset for (base_width, scale) in [(26, 4)] - m = Res2Net(101; base_width, scale) - @test size(m(x_224)) == (1000, 1) - if (Res2Net, 101, base_width, scale) in PRETRAINED_MODELS - @test acctest(Res2Net(101; base_width, scale, pretrain = true)) - else - @test_throws ArgumentError Res2Net(101; base_width, scale, pretrain = true) + + if !TEST_FAST + @testset for (base_width, scale) in [(26, 4)] + m = Res2Net(101; base_width, scale) |> gpu + @test size(m(x_224)) == (1000, 1) + if (Res2Net, 101, base_width, scale) in PRETRAINED_MODELS + @test acctest(Res2Net(101; base_width, scale, pretrain = true)) + else + @test_throws ArgumentError Res2Net(101; base_width, scale, pretrain = true) + end + @test gradtest(m, x_224) + _gc() end - @test gradtest(m, x_224) - _gc() end end @testitem "Res2NeXt" setup=[TestModels] begin @testset for depth in [50, 101] - m = Res2NeXt(depth) + m = Res2NeXt(depth) |> gpu @test size(m(x_224)) == (1000, 1) if (Res2NeXt, depth) in PRETRAINED_MODELS - @test acctest(Res2NeXt(depth, pretrain = true)) + @test acctest(Res2NeXt(depth; pretrain = true)) else @test_throws ArgumentError Res2NeXt(depth, pretrain = true) end @@ -162,14 +170,15 @@ end end @testitem "EfficientNet" setup=[TestModels] begin - @testset "EfficientNet($config)" for config in [:b0, :b1, :b2, :b3, :b4, :b5,] #:b6, :b7, :b8] + config = TEST_FAST ? [:b0] : [:b0, :b1, :b2, :b3, :b4, :b5] #:b6, :b7, :b8] + @testset "EfficientNet($config)" for config in configs # preferred image resolution scaling r = Metalhead.EFFICIENTNET_GLOBAL_CONFIGS[config][1] - x = rand(Float32, r, r, 3, 1) - m = EfficientNet(config) + x = rand(Float32, r, r, 3, 1) |> gpu + m = EfficientNet(config) |> gpu @test size(m(x)) == (1000, 1) if (EfficientNet, config) in PRETRAINED_MODELS - @test acctest(EfficientNet(config, pretrain = true)) + @test acctest(EfficientNet(config; pretrain = true)) else @test_throws ArgumentError EfficientNet(config, pretrain = true) end @@ -179,11 +188,12 @@ end end @testitem "EfficientNetv2" setup=[TestModels] begin - @testset for config in [:small, :medium, :large] # :xlarge] - m = EfficientNetv2(config) + configs = TEST_FAST ? [:small] : [:small, :medium, :large] # :xlarge] + @testset for config in configs + m = EfficientNetv2(config) |> gpu @test size(m(x_224)) == (1000, 1) if (EfficientNetv2, config) in PRETRAINED_MODELS - @test acctest(EfficientNetv2(config, pretrain = true)) + @test acctest(EfficientNetv2(config; pretrain = true)) else @test_throws ArgumentError EfficientNetv2(config, pretrain = true) end @@ -194,10 +204,10 @@ end @testitem "GoogLeNet" setup=[TestModels] begin @testset for bn in [true, false] - m = GoogLeNet(batchnorm = bn) + m = GoogLeNet(; batchnorm = bn) |> gpu @test size(m(x_224)) == (1000, 1) if (GoogLeNet, bn) in PRETRAINED_MODELS - @test acctest(GoogLeNet(batchnorm = bn, pretrain = true)) + @test acctest(GoogLeNet(; batchnorm = bn, pretrain = true)) else @test_throws ArgumentError GoogLeNet(batchnorm = bn, pretrain = true) end @@ -207,12 +217,12 @@ end end @testitem "Inception" setup=[TestModels] begin - x_299 = rand(Float32, 299, 299, 3, 2) + x_299 = rand(Float32, 299, 299, 3, 2) |> gpu @testset "$Model" for Model in [Inceptionv3, Inceptionv4, InceptionResNetv2, Xception] - m = Model() + m = Model() |> gpu @test size(m(x_299)) == (1000, 2) if Model in PRETRAINED_MODELS - @test acctest(Model(pretrain = true)) + @test acctest(Model(; pretrain = true)) else @test_throws ArgumentError Model(pretrain = true) end @@ -222,10 +232,10 @@ end end @testitem "SqueezeNet" setup=[TestModels] begin - m = SqueezeNet() + m = SqueezeNet() |> gpu @test size(m(x_224)) == (1000, 1) if SqueezeNet in PRETRAINED_MODELS - @test acctest(SqueezeNet(pretrain = true)) + @test acctest(SqueezeNet(; pretrain = true)) else @test_throws ArgumentError SqueezeNet(pretrain = true) end @@ -234,11 +244,12 @@ end end @testitem "DenseNet" setup=[TestModels] begin - @testset for sz in [121, 161, 169, 201] - m = DenseNet(sz) + sizes = TEST_FAST ? [121] : [121, 161, 169, 201] + @testset for sz in sizes + m = DenseNet(sz) |> gpu @test size(m(x_224)) == (1000, 1) if (DenseNet, sz) in PRETRAINED_MODELS - @test acctest(DenseNet(sz, pretrain = true)) + @test acctest(DenseNet(sz; pretrain = true)) else @test_throws ArgumentError DenseNet(sz, pretrain = true) end @@ -248,16 +259,16 @@ end end @testsetup module TestMobileNets - export WIDTH_MULTS - const WIDTH_MULTS = [0.5, 0.75, 1.0, 1.3] +export WIDTH_MULTS +const WIDTH_MULTS = get(ENV, "FAST_TEST", "false") == "true" ? [0.5] : [0.5, 0.75, 1.0, 1.3] end -@testitem "MobileNetsV1" setup=[TestModels, TestMobileNets] begin +@testitem "MobileNetV1" setup=[TestModels, TestMobileNets] begin @testset for width_mult in WIDTH_MULTS - m = MobileNetv1(width_mult) + m = MobileNetv1(width_mult) |> gpu @test size(m(x_224)) == (1000, 1) if (MobileNetv1, width_mult) in PRETRAINED_MODELS - @test acctest(MobileNetv1(pretrain = true)) + @test acctest(MobileNetv1(; pretrain = true)) else @test_throws ArgumentError MobileNetv1(pretrain = true) end @@ -267,11 +278,11 @@ end end @testitem "MobileNetv2" setup=[TestModels, TestMobileNets] begin - @testset for width_mult in WIDTH_MULTS - m = MobileNetv2(width_mult) + @testset for width_mult in WIDTH_MULTS + m = MobileNetv2(width_mult) |> gpu @test size(m(x_224)) == (1000, 1) if (MobileNetv2, width_mult) in PRETRAINED_MODELS - @test acctest(MobileNetv2(pretrain = true)) + @test acctest(MobileNetv2(; pretrain = true)) else @test_throws ArgumentError MobileNetv2(pretrain = true) end @@ -279,42 +290,39 @@ end end end - @testitem "MobileNetv3" setup=[TestModels, TestMobileNets] begin - @testset for width_mult in WIDTH_MULTS - @testset for config in [:small, :large] - m = MobileNetv3(config; width_mult) - @test size(m(x_224)) == (1000, 1) - if (MobileNetv3, config, width_mult) in PRETRAINED_MODELS - @test acctest(MobileNetv3(config; pretrain = true)) - else - @test_throws ArgumentError MobileNetv3(config; pretrain = true) - end - @test gradtest(m, x_224) - _gc() + configs = TEST_FAST ? [:small] : [:small, :large] + @testset for width_mult in WIDTH_MULTS, config in configs + m = MobileNetv3(config; width_mult) |> gpu + @test size(m(x_224)) == (1000, 1) + if (MobileNetv3, config, width_mult) in PRETRAINED_MODELS + @test acctest(MobileNetv3(config; pretrain = true)) + else + @test_throws ArgumentError MobileNetv3(config; pretrain = true) end + @test gradtest(m, x_224) + _gc() end end @testitem "MNASNet" setup=[TestModels, TestMobileNets] begin - @testset for width_mult in WIDTH_MULTS - @testset for config in [:A1, :B1] - m = MNASNet(config; width_mult) - @test size(m(x_224)) == (1000, 1) - if (MNASNet, config, width_mult) in PRETRAINED_MODELS - @test acctest(MNASNet(config; pretrain = true)) - else - @test_throws ArgumentError MNASNet(config; pretrain = true) - end - @test gradtest(m, x_224) - _gc() + @testset for width_mult in WIDTH_MULTS, config in [:A1, :B1] + m = MNASNet(config; width_mult) |> gpu + @test size(m(x_224)) == (1000, 1) + if (MNASNet, config, width_mult) in PRETRAINED_MODELS + @test acctest(MNASNet(config; pretrain = true)) + else + @test_throws ArgumentError MNASNet(config; pretrain = true) end + @test gradtest(m, x_224) + _gc() end end @testitem "ConvNeXt" setup=[TestModels] begin - @testset for config in [:small, :base, :large, :tiny, :xlarge] - m = ConvNeXt(config) + configs = TEST_FAST ? [:small] : [:small, :base, :large, :tiny, :xlarge] + @testset for config in configs + m = ConvNeXt(config) |> gpu @test size(m(x_224)) == (1000, 1) @test gradtest(m, x_224) _gc() @@ -322,8 +330,9 @@ end end @testitem "ConvMixer" setup=[TestModels] begin - @testset for config in [:small, :base, :large] - m = ConvMixer(config) + configs = TEST_FAST ? [:small] : [:small, :base, :large] + @testset for config in configs + m = ConvMixer(config) |> gpu @test size(m(x_224)) == (1000, 1) @test gradtest(m, x_224) _gc() @@ -332,11 +341,11 @@ end @testitem "UNet" setup=[TestModels] begin encoder = Metalhead.backbone(ResNet(18)) - model = UNet((256, 256), 3, 10, encoder) + model = UNet((256, 256), 3, 10, encoder) |> gpu @test size(model(x_256)) == (256, 256, 10, 1) @test gradtest(model, x_256) - model = UNet() + model = UNet() |> gpu @test size(model(x_256)) == (256, 256, 3, 1) _gc() end diff --git a/test/mixer_tests.jl b/test/mixer_tests.jl index dd52d5aa9..1a03f33fc 100644 --- a/test/mixer_tests.jl +++ b/test/mixer_tests.jl @@ -1,26 +1,29 @@ @testitem "MLP-Mixer" setup=[TestModels] begin - @testset for config in [:small, :base, :large] - m, x = MLPMixer(config), x_224 - @test size(m(x)) == (1000, 1) - @test gradtest(m, x) + configs = TEST_FAST ? [:small] : [:small, :base, :large] + @testset for config in configs + m = MLPMixer(config) |> gpu + @test size(m(x_224)) == (1000, 1) + @test gradtest(m, x_224) _gc() end end @testitem "ResMLP" setup=[TestModels] begin - @testset for config in [:small, :base, :large] - m, x = ResMLP(config), x_224 - @test size(m(x)) == (1000, 1) - @test gradtest(m, x) + configs = TEST_FAST ? [:small] : [:small, :base, :large] + @testset for config in configs + m = ResMLP(config) |> gpu + @test size(m(x_224)) == (1000, 1) + @test gradtest(m, x_224) _gc() end end @testitem "gMLP" setup=[TestModels] begin - @testset for config in [:small, :base, :large] - m, x = gMLP(config), x_224 - @test size(m(x)) == (1000, 1) - @test gradtest(m, x) + configs = TEST_FAST ? [:small] : [:small, :base, :large] + @testset for config in configs + m = gMLP(config) |> gpu + @test size(m(x_224)) == (1000, 1) + @test gradtest(m, x_224) _gc() end end \ No newline at end of file diff --git a/test/model_tests.jl b/test/model_tests.jl index 886909559..c50e62797 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -1,8 +1,9 @@ @testsetup module TestModels using Metalhead, Images -using Flux: Zygote +using Flux: gradient, gpu export PRETRAINED_MODELS, + TEST_FAST, _gc, gradtest, normalize_imagenet, @@ -12,7 +13,8 @@ export PRETRAINED_MODELS, TEST_LBLS, acctest, x_224, - x_256 + x_256, + gpu const PRETRAINED_MODELS = [ # (DenseNet, 121), @@ -40,14 +42,15 @@ const PRETRAINED_MODELS = [ (VGG, 19, false), ] +const TEST_FAST = get(ENV, "FAST_TEST", "false") == "true" + function _gc() GC.safepoint() return GC.gc(true) end function gradtest(model, input) - y, pb = Zygote.pullback(model, input) - pb(ones(Float32, size(y))) + gradient(model, input) # if we make it to here with no error, success! return true end @@ -62,17 +65,19 @@ end const TEST_PATH = download("https://cdn.pixabay.com/photo/2015/05/07/11/02/guitar-756326_960_720.jpg") const TEST_IMG = imresize(Images.load(TEST_PATH), (224, 224)) # CHW -> WHC -const TEST_X = permutedims(convert(Array{Float32}, channelview(TEST_IMG)), (3, 2, 1)) |> normalize_imagenet +const TEST_X = let img_array = convert(Array{Float32}, channelview(TEST_IMG)) + permutedims(img_array, (3, 2, 1)) |> normalize_imagenet |> gpu +end # ImageNet labels const TEST_LBLS = readlines(download("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")) function acctest(model) - ypred = model(TEST_X) |> vec + ypred = gpu(model)(TEST_X) |> vec top5 = TEST_LBLS[sortperm(ypred; rev = true)] return "acoustic guitar" in top5 end -const x_224 = rand(Float32, 224, 224, 3, 1) -const x_256 = rand(Float32, 256, 256, 3, 1) +const x_224 = rand(Float32, 224, 224, 3, 1) |> gpu +const x_256 = rand(Float32, 256, 256, 3, 1) |> gpu end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index b4e3c626e..6bd2e47b2 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -7,7 +7,9 @@ const name_filter = test_group == "All" ? nothing : Regex(test_group) @static if VERSION >= v"1.7" using ReTestItems - runtests(Metalhead; name = name_filter) + verbose_results = get(ENV, "CI", "false") == "true" + nworkers = parse(Int, get(ENV, "TEST_WORKERS", "0")) + runtests(Metalhead; name = name_filter, verbose_results, nworkers) else using TestItemRunner function testitem_filter(ti) @@ -17,5 +19,5 @@ end # Not sure why this needs to be split into a separate conditional... @static if VERSION < v"1.7" - @run_package_tests filter=testitem_filter + @run_package_tests filter = testitem_filter end \ No newline at end of file diff --git a/test/vit_tests.jl b/test/vit_tests.jl index e38d1fb8a..eb9969be1 100644 --- a/test/vit_tests.jl +++ b/test/vit_tests.jl @@ -1,6 +1,7 @@ @testitem "ViT" setup=[TestModels] begin - @testset for config in [:tiny, :small, :base, :large, :huge] # :giant, :gigantic] - m = ViT(config) + configs = TEST_FAST ? [:tiny] : [:tiny, :small, :base, :large, :huge] # :giant, :gigantic] + @testset for config in configs + m = ViT(config) |> gpu @test size(m(x_224)) == (1000, 1) @test gradtest(m, x_224) _gc() From 4dfdbd830d438416e27f98035153f8f73a177c3c Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Mon, 11 Dec 2023 20:48:20 -0800 Subject: [PATCH 06/21] fixup test item name groups --- .github/workflows/CI.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 33105f524..bf51f969e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -52,8 +52,8 @@ jobs: - 'DenseNet' - 'UNet' - 'ConvNeXt|ConvMixer' - - 'Mixers' - - 'ViTs' + - 'MLP-Mixer|ResMLP|gMLP' + - 'ViT' steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@v1 From 0a57226a67f53eb2f076962e403bfa65b0307954 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Tue, 12 Dec 2023 17:09:10 -0800 Subject: [PATCH 07/21] Use TestImages to avoid flaky download We can guarantee these test images will always be available, which is not the case for the current sample image. --- test/Project.toml | 2 ++ test/model_tests.jl | 7 +++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/test/Project.toml b/test/Project.toml index 9775f9b88..2a93eb04f 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -4,6 +4,7 @@ Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TestImages = "5e47fb64-e119-507b-a336-dd2b206d9990" TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" @@ -13,5 +14,6 @@ Flux = "0.13.16, 0.14" Images = "0.26" ReTestItems = "1" Test = "1" +TestImages = "1.8" TestItemRunner = "0.2" cuDNN = "1" diff --git a/test/model_tests.jl b/test/model_tests.jl index c50e62797..623dff167 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -1,5 +1,5 @@ @testsetup module TestModels -using Metalhead, Images +using Metalhead, Images, TestImages using Flux: gradient, gpu export PRETRAINED_MODELS, @@ -62,8 +62,7 @@ function normalize_imagenet(data) end # test image -const TEST_PATH = download("https://cdn.pixabay.com/photo/2015/05/07/11/02/guitar-756326_960_720.jpg") -const TEST_IMG = imresize(Images.load(TEST_PATH), (224, 224)) +const TEST_IMG = imresize(testimage("monarch_color_256"), (224, 224)) # CHW -> WHC const TEST_X = let img_array = convert(Array{Float32}, channelview(TEST_IMG)) permutedims(img_array, (3, 2, 1)) |> normalize_imagenet |> gpu @@ -75,7 +74,7 @@ const TEST_LBLS = readlines(download("https://raw.githubusercontent.com/pytorch/ function acctest(model) ypred = gpu(model)(TEST_X) |> vec top5 = TEST_LBLS[sortperm(ypred; rev = true)] - return "acoustic guitar" in top5 + return "monarch" in top5 end const x_224 = rand(Float32, 224, 224, 3, 1) |> gpu From 046603de6a4bbc1eed77f78de5b7d7e472590bca Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Tue, 12 Dec 2023 17:21:31 -0800 Subject: [PATCH 08/21] fixup gradient call --- test/model_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/model_tests.jl b/test/model_tests.jl index 623dff167..3cfa43d7a 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -50,7 +50,7 @@ function _gc() end function gradtest(model, input) - gradient(model, input) + gradient(sum ∘ model, input) # if we make it to here with no error, success! return true end From da5be3f8bab54899c9087c98646b31d85b9a95a2 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Tue, 12 Dec 2023 18:37:49 -0800 Subject: [PATCH 09/21] Fixup tests and tweak env var handling --- .buildkite/pipeline.yml | 2 +- test/convnet_tests.jl | 8 ++++++-- test/model_tests.jl | 2 +- test/runtests.jl | 9 ++++++--- 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index acae78525..b967fbc54 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -21,7 +21,7 @@ steps: env: GROUP: "All" TEST_FAST: true - TEST_WORKERS: 4 + RETESTITEMS_NWORKERS: 4 if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 490691952..05930709a 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -170,7 +170,7 @@ end end @testitem "EfficientNet" setup=[TestModels] begin - config = TEST_FAST ? [:b0] : [:b0, :b1, :b2, :b3, :b4, :b5] #:b6, :b7, :b8] + configs = TEST_FAST ? [:b0] : [:b0, :b1, :b2, :b3, :b4, :b5] #:b6, :b7, :b8] @testset "EfficientNet($config)" for config in configs # preferred image resolution scaling r = Metalhead.EFFICIENTNET_GLOBAL_CONFIGS[config][1] @@ -235,7 +235,11 @@ end m = SqueezeNet() |> gpu @test size(m(x_224)) == (1000, 1) if SqueezeNet in PRETRAINED_MODELS - @test acctest(SqueezeNet(; pretrain = true)) + if VERSION >= v"1.7" + @test acctest(SqueezeNet(; pretrain = true)) + else + @test_broken acctest(SqueezeNet(; pretrain = true)) + end else @test_throws ArgumentError SqueezeNet(pretrain = true) end diff --git a/test/model_tests.jl b/test/model_tests.jl index 3cfa43d7a..24888d76c 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -72,7 +72,7 @@ end const TEST_LBLS = readlines(download("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")) function acctest(model) - ypred = gpu(model)(TEST_X) |> vec + ypred = gpu(model)(TEST_X) |> collect |> vec top5 = TEST_LBLS[sortperm(ypred; rev = true)] return "monarch" in top5 end diff --git a/test/runtests.jl b/test/runtests.jl index 6bd2e47b2..20fb55f1f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -7,9 +7,12 @@ const name_filter = test_group == "All" ? nothing : Regex(test_group) @static if VERSION >= v"1.7" using ReTestItems - verbose_results = get(ENV, "CI", "false") == "true" - nworkers = parse(Int, get(ENV, "TEST_WORKERS", "0")) - runtests(Metalhead; name = name_filter, verbose_results, nworkers) + if parse(Bool, get(ENV, "CI", "false")) + runtests(Metalhead; name = name_filter, verbose_results = true) + else + # For running locally + runtests(Metalhead; name = name_filter) + end else using TestItemRunner function testitem_filter(ti) From 096025ce5e50e558918e0f61bd84246f18b7d886 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 14 Dec 2023 15:56:08 -0800 Subject: [PATCH 10/21] Proper test group regexes --- .github/workflows/CI.yml | 4 ++-- test/runtests.jl | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index bf51f969e..8e5b37afb 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -44,8 +44,8 @@ jobs: - 'AlexNet|VGG' - 'GoogLeNet|SqueezeNet|MobileNet|MNASNet' - 'EfficientNet' - - 'ResNet|WideResNet' - - 'ResNeXt' # split off from ResNet to reduce overall runtime + - '^ResNet|WideResNet' + - '^ResNeXt' # split off from ResNet to reduce overall runtime - 'SEResNet|SEResNeXt' - 'Res2Net|Res2NeXt' - 'Inception' diff --git a/test/runtests.jl b/test/runtests.jl index 20fb55f1f..2ccdb719d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,5 @@ using Metalhead: Metalhead +using CUDA # TODO account for GPU tests using name or tag filter # TODO write GPU tests! From 13b0d241b5314938a2f7166b68f72cd373e4cf0e Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 14 Dec 2023 19:33:45 -0800 Subject: [PATCH 11/21] wrong env var name Co-authored-by: Kyle Daruwalla --- test/model_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/model_tests.jl b/test/model_tests.jl index 24888d76c..00606023d 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -42,7 +42,7 @@ const PRETRAINED_MODELS = [ (VGG, 19, false), ] -const TEST_FAST = get(ENV, "FAST_TEST", "false") == "true" +const TEST_FAST = get(ENV, "TEST_FAST", "false") == "true" function _gc() GC.safepoint() From eee59a99cd7b5c20378a5f1251bc24d987766459 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 14 Dec 2023 19:57:45 -0800 Subject: [PATCH 12/21] Mark tests broken on 1.6 + CUDA --- test/convnet_tests.jl | 23 ++++++++++++++++++----- test/mixer_tests.jl | 9 +++++++-- test/model_tests.jl | 4 +++- test/vit_tests.jl | 6 +++++- 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 05930709a..db2d431fa 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -49,7 +49,7 @@ end (dropout_prob = 0.8, stochastic_depth_prob = 0.8, dropblock_prob = 0.8), ] @testset for drop_probs in drop_list - m = Metalhead.resnet(block_fn, layers; drop_probs...) + m = Metalhead.resnet(block_fn, layers; drop_probs...) |> gpu @test size(m(x_224)) == (1000, 1) @test gradtest(m, x_224) _gc() @@ -132,9 +132,14 @@ end m = Res2Net(50; base_width, scale) |> gpu @test size(m(x_224)) == (1000, 1) if (Res2Net, 50, base_width, scale) in PRETRAINED_MODELS - @test acctest(Res2Net(50; base_width, scale, pretrain = true)) + if VERSION < v"1.7" && has_cuda() + @test_broken acctest(Res2Net(50; base_width, scale, pretrain = true)) + else + @test acctest(Res2Net(50; base_width, scale, pretrain = true)) + end else - @test_throws ArgumentError Res2Net(50; base_width, scale, pretrain = true) + err_type = VERSION < v"1.7" && has_cuda() ? Exception : ArgumentError + @test_throws err_type Res2Net(50; base_width, scale, pretrain = true) end @test gradtest(m, x_224) _gc() @@ -158,13 +163,21 @@ end @testitem "Res2NeXt" setup=[TestModels] begin @testset for depth in [50, 101] m = Res2NeXt(depth) |> gpu - @test size(m(x_224)) == (1000, 1) + if VERSION < v"1.7" && has_cuda() + @test_broken size(m(x_224)) == (1000, 1) + else + @test size(m(x_224)) == (1000, 1) + end if (Res2NeXt, depth) in PRETRAINED_MODELS @test acctest(Res2NeXt(depth; pretrain = true)) else @test_throws ArgumentError Res2NeXt(depth, pretrain = true) end - @test gradtest(m, x_224) + if VERSION < v"1.7" && has_cuda() + @test_broken gradtest(m, x_224) + else + @test gradtest(m, x_224) + end _gc() end end diff --git a/test/mixer_tests.jl b/test/mixer_tests.jl index 1a03f33fc..9f6d5e259 100644 --- a/test/mixer_tests.jl +++ b/test/mixer_tests.jl @@ -22,8 +22,13 @@ end configs = TEST_FAST ? [:small] : [:small, :base, :large] @testset for config in configs m = gMLP(config) |> gpu - @test size(m(x_224)) == (1000, 1) - @test gradtest(m, x_224) + if VERSION < v"1.7" && has_cuda() + @test_broken size(m(x_224)) == (1000, 1) + @test_broken gradtest(m, x_224) + else + @test size(m(x_224)) == (1000, 1) + @test gradtest(m, x_224) + end _gc() end end \ No newline at end of file diff --git a/test/model_tests.jl b/test/model_tests.jl index 00606023d..ecfb6f722 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -1,6 +1,7 @@ @testsetup module TestModels using Metalhead, Images, TestImages using Flux: gradient, gpu +using CUDA: has_cuda export PRETRAINED_MODELS, TEST_FAST, @@ -14,7 +15,8 @@ export PRETRAINED_MODELS, acctest, x_224, x_256, - gpu + gpu, + has_cuda const PRETRAINED_MODELS = [ # (DenseNet, 121), diff --git a/test/vit_tests.jl b/test/vit_tests.jl index eb9969be1..df9244f69 100644 --- a/test/vit_tests.jl +++ b/test/vit_tests.jl @@ -3,7 +3,11 @@ @testset for config in configs m = ViT(config) |> gpu @test size(m(x_224)) == (1000, 1) - @test gradtest(m, x_224) + if VERSION < v"1.7" && has_cuda() + @test_broken gradtest(m, x_224) + else + @test gradtest(m, x_224) + end _gc() end end From 96bbc70bd8fc271e876668252291d151e450a60b Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 21 Dec 2023 20:19:23 -0800 Subject: [PATCH 13/21] More broken GPU tests and better GPU memory cleanup --- test/convnet_tests.jl | 12 ++++++++---- test/mixer_tests.jl | 2 +- test/model_tests.jl | 5 +++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index db2d431fa..9ef4a2927 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -132,7 +132,7 @@ end m = Res2Net(50; base_width, scale) |> gpu @test size(m(x_224)) == (1000, 1) if (Res2Net, 50, base_width, scale) in PRETRAINED_MODELS - if VERSION < v"1.7" && has_cuda() + if has_cuda() @test_broken acctest(Res2Net(50; base_width, scale, pretrain = true)) else @test acctest(Res2Net(50; base_width, scale, pretrain = true)) @@ -141,7 +141,11 @@ end err_type = VERSION < v"1.7" && has_cuda() ? Exception : ArgumentError @test_throws err_type Res2Net(50; base_width, scale, pretrain = true) end - @test gradtest(m, x_224) + if has_cuda() + @test_broken gradtest(m, x_224) + else + @test gradtest(m, x_224) + end _gc() end @@ -163,7 +167,7 @@ end @testitem "Res2NeXt" setup=[TestModels] begin @testset for depth in [50, 101] m = Res2NeXt(depth) |> gpu - if VERSION < v"1.7" && has_cuda() + if has_cuda() @test_broken size(m(x_224)) == (1000, 1) else @test size(m(x_224)) == (1000, 1) @@ -173,7 +177,7 @@ end else @test_throws ArgumentError Res2NeXt(depth, pretrain = true) end - if VERSION < v"1.7" && has_cuda() + if has_cuda() @test_broken gradtest(m, x_224) else @test gradtest(m, x_224) diff --git a/test/mixer_tests.jl b/test/mixer_tests.jl index 9f6d5e259..a6177d88c 100644 --- a/test/mixer_tests.jl +++ b/test/mixer_tests.jl @@ -22,7 +22,7 @@ end configs = TEST_FAST ? [:small] : [:small, :base, :large] @testset for config in configs m = gMLP(config) |> gpu - if VERSION < v"1.7" && has_cuda() + if has_cuda() @test_broken size(m(x_224)) == (1000, 1) @test_broken gradtest(m, x_224) else diff --git a/test/model_tests.jl b/test/model_tests.jl index ecfb6f722..81ad65ed3 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -1,7 +1,7 @@ @testsetup module TestModels using Metalhead, Images, TestImages using Flux: gradient, gpu -using CUDA: has_cuda +using CUDA: CUDA, has_cuda export PRETRAINED_MODELS, TEST_FAST, @@ -48,7 +48,8 @@ const TEST_FAST = get(ENV, "TEST_FAST", "false") == "true" function _gc() GC.safepoint() - return GC.gc(true) + GC.gc(true) + CUDA.reclaim() end function gradtest(model, input) From b1ed20ddb5d9782c27d84bd9f213946eca640b8a Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 21 Dec 2023 20:42:25 -0800 Subject: [PATCH 14/21] Don't reclaim in tests on non-GPU systems `reclaim` to load the CUDA driver and fails otherwise --- test/model_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/model_tests.jl b/test/model_tests.jl index 81ad65ed3..729701bc4 100644 --- a/test/model_tests.jl +++ b/test/model_tests.jl @@ -49,7 +49,7 @@ const TEST_FAST = get(ENV, "TEST_FAST", "false") == "true" function _gc() GC.safepoint() GC.gc(true) - CUDA.reclaim() + has_cuda() && CUDA.reclaim() end function gradtest(model, input) From 99ca13a69b78800798f713557feb5ff43e35e5e6 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 21 Dec 2023 20:46:02 -0800 Subject: [PATCH 15/21] missed broken test --- test/convnet_tests.jl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 9ef4a2927..29da5ff2f 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -129,8 +129,12 @@ end @testitem "Res2Net" setup=[TestModels] begin configs = TEST_FAST ? [(26, 4)] : [(26, 4), (48, 2), (14, 8), (26, 6), (26, 8)] @testset for (base_width, scale) in configs - m = Res2Net(50; base_width, scale) |> gpu - @test size(m(x_224)) == (1000, 1) + m = Res2Net(50; base_width, scale) |> gpu # FIXME GPU + if has_cuda() + @test_broken size(m(x_224)) == (1000, 1) + else + @test size(m(x_224)) == (1000, 1) + end if (Res2Net, 50, base_width, scale) in PRETRAINED_MODELS if has_cuda() @test_broken acctest(Res2Net(50; base_width, scale, pretrain = true)) From 6112dc71448fca56425e58d684ef7b19255f86ba Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Thu, 21 Dec 2023 22:49:01 -0800 Subject: [PATCH 16/21] 1.6 GPU works better than 1.7+??? --- test/convnet_tests.jl | 14 +++++++++++--- test/vit_tests.jl | 6 +----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 29da5ff2f..1e10c049a 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -64,7 +64,11 @@ end @testset "WideResNet($sz)" for sz in sizes m = WideResNet(sz) |> gpu @test size(m(x_224)) == (1000, 1) - @test gradtest(m, x_224) + if VERSION >= v"1.7" && has_cuda() + @test_broken gradtest(m, x_224) + else + @test gradtest(m, x_224) + end _gc() if (WideResNet, sz) in PRETRAINED_MODELS @test acctest(WideResNet(sz; pretrain = true)) @@ -104,7 +108,11 @@ end else @test_throws ArgumentError SEResNet(depth, pretrain = true) end - @test gradtest(m, x_224) + if VERSION >= v"1.7" && has_cuda() + @test_broken gradtest(m, x_224) + else + @test gradtest(m, x_224) + end _gc() end end @@ -131,7 +139,7 @@ end @testset for (base_width, scale) in configs m = Res2Net(50; base_width, scale) |> gpu # FIXME GPU if has_cuda() - @test_broken size(m(x_224)) == (1000, 1) + @test size(m(x_224)) == (1000, 1) else @test size(m(x_224)) == (1000, 1) end diff --git a/test/vit_tests.jl b/test/vit_tests.jl index df9244f69..eb9969be1 100644 --- a/test/vit_tests.jl +++ b/test/vit_tests.jl @@ -3,11 +3,7 @@ @testset for config in configs m = ViT(config) |> gpu @test size(m(x_224)) == (1000, 1) - if VERSION < v"1.7" && has_cuda() - @test_broken gradtest(m, x_224) - else - @test gradtest(m, x_224) - end + @test gradtest(m, x_224) _gc() end end From 574c55e28de83b480f80fbb4561278b3213e7c29 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 22 Dec 2023 07:21:35 -0800 Subject: [PATCH 17/21] another missed test --- test/convnet_tests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 1e10c049a..4dd192b7c 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -137,9 +137,9 @@ end @testitem "Res2Net" setup=[TestModels] begin configs = TEST_FAST ? [(26, 4)] : [(26, 4), (48, 2), (14, 8), (26, 6), (26, 8)] @testset for (base_width, scale) in configs - m = Res2Net(50; base_width, scale) |> gpu # FIXME GPU + m = Res2Net(50; base_width, scale) |> gpu if has_cuda() - @test size(m(x_224)) == (1000, 1) + @test_broken size(m(x_224)) == (1000, 1) else @test size(m(x_224)) == (1000, 1) end From 64418b2c8f42472ed88e7ee59eb0c926e702d0f7 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 22 Dec 2023 07:59:40 -0800 Subject: [PATCH 18/21] reduce worker count --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b967fbc54..6d27295b5 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -21,7 +21,7 @@ steps: env: GROUP: "All" TEST_FAST: true - RETESTITEMS_NWORKERS: 4 + RETESTITEMS_NWORKERS: 2 if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: From 672d579a55275d6daeaa731478a1eba8b86757dd Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 22 Dec 2023 09:02:27 -0800 Subject: [PATCH 19/21] unbroken test --- test/convnet_tests.jl | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index 4dd192b7c..f11d21cc0 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -108,11 +108,7 @@ end else @test_throws ArgumentError SEResNet(depth, pretrain = true) end - if VERSION >= v"1.7" && has_cuda() - @test_broken gradtest(m, x_224) - else - @test gradtest(m, x_224) - end + @test gradtest(m, x_224) _gc() end end From fc4e3a7a07f419d6fab20ec62e7b6d4412ea551d Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 22 Dec 2023 09:25:44 -0800 Subject: [PATCH 20/21] try memory limit 50% per worker so we avoid --- .buildkite/pipeline.yml | 1 + test/convnet_tests.jl | 6 +----- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 6d27295b5..05406b75a 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -22,6 +22,7 @@ steps: GROUP: "All" TEST_FAST: true RETESTITEMS_NWORKERS: 2 + JULIA_CUDA_HARD_MEMORY_LIMIT: "50%" if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: diff --git a/test/convnet_tests.jl b/test/convnet_tests.jl index f11d21cc0..630de19e3 100644 --- a/test/convnet_tests.jl +++ b/test/convnet_tests.jl @@ -64,11 +64,7 @@ end @testset "WideResNet($sz)" for sz in sizes m = WideResNet(sz) |> gpu @test size(m(x_224)) == (1000, 1) - if VERSION >= v"1.7" && has_cuda() - @test_broken gradtest(m, x_224) - else - @test gradtest(m, x_224) - end + @test gradtest(m, x_224) _gc() if (WideResNet, sz) in PRETRAINED_MODELS @test acctest(WideResNet(sz; pretrain = true)) From 929aca0c9103b6e3f827c42d3f815a9e080544b4 Mon Sep 17 00:00:00 2001 From: Brian Chen Date: Fri, 22 Dec 2023 15:13:43 -0800 Subject: [PATCH 21/21] maybe the memory limit doesn't work --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 05406b75a..42206349b 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -22,7 +22,7 @@ steps: GROUP: "All" TEST_FAST: true RETESTITEMS_NWORKERS: 2 - JULIA_CUDA_HARD_MEMORY_LIMIT: "50%" + # JULIA_CUDA_HARD_MEMORY_LIMIT: "50%" if: build.message !~ /\[skip tests\]/ timeout_in_minutes: 180 matrix: