|
1 | 1 |
|
2 | 2 | println("RNN")
|
3 |
| -for n in [2, 20, 200, 2000], T in [1, 8, 16, 64] |
| 3 | +for n in [2, 20, 200, 1000], T in [1, 8, 16, 64] |
4 | 4 | x = [randn(Float32, n, n) for t in 1:T]
|
5 | 5 | model = RNN(n, n)
|
6 | 6 | println("CPU n=$n, t=$T")
|
7 | 7 | run_benchmark(model, x, cuda=false)
|
8 | 8 | println("CUDA n=$n, t=$T")
|
9 |
| - run_benchmark(model, x, cuda=true) |
| 9 | + try |
| 10 | + run_benchmark(model, x, cuda=true) |
| 11 | + catch ex |
| 12 | + @show typeof(ex) |
| 13 | + if ex isa OutOfGPUMemoryError |
| 14 | + @warn "Not enough GPU memory to run test" |
| 15 | + else |
| 16 | + rethrow(ex) |
| 17 | + end |
| 18 | + end |
10 | 19 | end
|
11 | 20 |
|
12 | 21 | println("RNN-3d")
|
13 |
| -for n in [2, 20, 200, 2000], T in [1, 8, 16, 64] |
| 22 | +for n in [2, 20, 200, 1000], T in [1, 8, 16, 64] |
14 | 23 | x = randn(Float32, n, n, T)
|
15 | 24 | model = RNN(n, n)
|
16 | 25 | println("CPU n=$n, t=$T")
|
17 | 26 | run_benchmark(model, x, cuda=false)
|
18 | 27 | println("CUDA n=$n, t=$T")
|
19 |
| - run_benchmark(model, x, cuda=true) |
| 28 | + try |
| 29 | + run_benchmark(model, x, cuda=true) |
| 30 | + catch ex |
| 31 | + @show typeof(ex) |
| 32 | + if ex isa OutOfGPUMemoryError |
| 33 | + @warn "Not enough GPU memory to run test" |
| 34 | + else |
| 35 | + rethrow(ex) |
| 36 | + end |
| 37 | + end |
20 | 38 | end
|
21 | 39 |
|
22 | 40 |
|
|
0 commit comments