Skip to content

Commit 20f08cf

Browse files
committed
rename channels_in, channels_out add ConvTranspose cuda tests
1 parent 7815469 commit 20f08cf

File tree

2 files changed

+12
-11
lines changed

2 files changed

+12
-11
lines changed

src/layers/conv.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -166,12 +166,12 @@ function (c::Conv)(x::AbstractArray)
166166
σ.(conv(x, c.weight, cdims) .+ b)
167167
end
168168

169-
channels_in(l ::Conv) = size(l.weight, ndims(l.weight)-1) * l.groups
170-
channels_out(l::Conv) = size(l.weight, ndims(l.weight))
169+
_channels_in(l ::Conv) = size(l.weight, ndims(l.weight)-1) * l.groups
170+
_channels_out(l::Conv) = size(l.weight, ndims(l.weight))
171171

172172
function Base.show(io::IO, l::Conv)
173173
print(io, "Conv(", size(l.weight)[1:ndims(l.weight)-2])
174-
print(io, ", ", channels_in(l), " => ", channels_out(l))
174+
print(io, ", ", _channels_in(l), " => ", _channels_out(l))
175175
_print_conv_opt(io, l)
176176
print(io, ")")
177177
end
@@ -228,8 +228,8 @@ struct ConvTranspose{N,M,F,A,V}
228228
groups::Int
229229
end
230230

231-
channels_in(l::ConvTranspose) = size(l.weight)[end]
232-
channels_out(l::ConvTranspose) = size(l.weight)[end-1]*l.groups
231+
_channels_in(l::ConvTranspose) = size(l.weight)[end]
232+
_channels_out(l::ConvTranspose) = size(l.weight)[end-1]*l.groups
233233

234234
"""
235235
ConvTranspose(weight::AbstractArray, [bias, activation; stride, pad, dilation, groups])
@@ -285,7 +285,7 @@ end
285285

286286
function Base.show(io::IO, l::ConvTranspose)
287287
print(io, "ConvTranspose(", size(l.weight)[1:ndims(l.weight)-2])
288-
print(io, ", ", channels_in(l), " => ", channels_out(l))
288+
print(io, ", ", _channels_in(l), " => ", _channels_out(l))
289289
_print_conv_opt(io, l)
290290
print(io, ")")
291291
end

test/cuda/layers.jl

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ function gpu_gradtest(name::String, layers::Vector, x_cpu = nothing, args...; te
4848
xg_cpu = gradient(x -> sum(l_cpu(x)), x_cpu)[1]
4949
xg_gpu = gradient(x -> sum(l_gpu(x)), x_gpu)[1]
5050

51-
# test
51+
# test
5252
if test_cpu
5353
@test y_gpu y_cpu rtol=1f-3 atol=1f-3
5454
if isnothing(xg_cpu)
@@ -80,6 +80,7 @@ ConvTransposeNoBias(args...) = ConvTranspose(args...; bias = false)
8080
CrossCorNoBias(args...) = CrossCor(args...; bias = false)
8181
DepthwiseConvNoBias(args...) = DepthwiseConv(args...; bias = false)
8282
GroupedConv(args...) = Conv(args..., groups = 5)
83+
GroupedConvTranspose(args...) = ConvTranspose(args..., groups = 5)
8384

8485
for act in ACTIVATIONS
8586
r = rand(Float32, 28, 28, 1, 1)
@@ -89,16 +90,16 @@ for act in ACTIVATIONS
8990
DepthwiseConv, DepthwiseConvNoBias]
9091
gpu_gradtest("Convolution with $act", conv_layers, r, (2,2), 1=>3, act, test_cpu = false)
9192

92-
groupedconv = [GroupedConv]
93+
groupedconv = [GroupedConv, GroupedConvTranspose]
9394
gpu_gradtest("GroupedConvolution with $act", groupedconv, rand(Float32, 28, 28, 100, 2), (3,3), 100 => 25, act, test_cpu = true)
9495

9596
batch_norm = [BatchNorm]
9697
gpu_gradtest("BatchNorm 1 with $act", batch_norm, rand(Float32, 28,28,3,4), 3, act, test_cpu = false) #TODO fix errors
9798
gpu_gradtest("BatchNorm 2 with $act", batch_norm, rand(Float32, 5,4), 5, act, test_cpu = false)
98-
99+
99100
instancenorm = [InstanceNorm]
100101
gpu_gradtest("InstanceNorm with $act", instancenorm, r, 1, act, test_cpu = false)
101-
102+
102103
groupnorm = [GroupNorm]
103104
gpu_gradtest("GroupNorm with $act", groupnorm, rand(Float32, 28,28,3,1), 3, 1, act, test_cpu = false)
104105
end
@@ -151,7 +152,7 @@ end
151152
else
152153
@test sum(l(ip)) 0.f0
153154
gs = gradient(() -> sum(l(ip)), Flux.params(l))
154-
@test l.bias gs.params
155+
@test l.bias gs.params
155156
end
156157
end
157158

0 commit comments

Comments
 (0)