Skip to content

Commit af504d3

Browse files
committed
Final minor tweaks
1 parent 8e6f929 commit af504d3

File tree

2 files changed

+11
-12
lines changed

2 files changed

+11
-12
lines changed

src/convnets/inception.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ function xception_block(inchannels, outchannels, nrepeats; stride = 1,
524524
push!(layers, x -> relu.(x))
525525
append!(layers,
526526
depthwise_sep_conv_bn((3, 3), inc, outc; pad = 1, bias = false,
527-
use_bn1 = false, use_bn2 = false))
527+
use_bn = (false, false)))
528528
push!(layers, BatchNorm(outc))
529529
end
530530
layers = start_with_relu ? layers : layers[2:end]

src/layers/conv.jl

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
"""
22
conv_bn(kernelsize, inplanes, outplanes, activation = relu;
3-
rev = false, preact = false, use_bn = true,
4-
initβ = Flux.zeros32, initγ = Flux.ones32, ϵ = 1.0f-5, momentum = 1.0f-1,
5-
kwargs...)
3+
rev = false, preact = false, use_bn = true, stride = 1, pad = 0, dilation = 1,
4+
groups = 1, [bias, weight, init], initβ = Flux.zeros32, initγ = Flux.ones32,
5+
ϵ = 1.0f-5, momentum = 1.0f-1)
66
77
Create a convolution + batch normalization pair with activation.
88
@@ -55,10 +55,10 @@ end
5555

5656
"""
5757
depthwise_sep_conv_bn(kernelsize, inplanes, outplanes, activation = relu;
58-
rev = false, use_bn1 = true, use_bn2 = true,
58+
rev = false, use_bn = (true, true),
59+
stride = 1, pad = 0, dilation = 1, [bias, weight, init],
5960
initβ = Flux.zeros32, initγ = Flux.ones32,
60-
ϵ = 1.0f-5, momentum = 1.0f-1,
61-
stride = 1, kwargs...)
61+
ϵ = 1.0f-5, momentum = 1.0f-1)
6262
6363
Create a depthwise separable convolution chain as used in MobileNetv1.
6464
This is sequence of layers:
@@ -77,8 +77,7 @@ See Fig. 3 in [reference](https://arxiv.org/abs/1704.04861v1).
7777
- `outplanes`: number of output feature maps
7878
- `activation`: the activation function for the final layer
7979
- `rev`: set to `true` to place the batch norm before the convolution
80-
- `use_bn1`: set to `true` to use a batch norm after the depthwise convolution
81-
- `use_bn2`: set to `true` to use a batch norm after the pointwise convolution
80+
- `use_bn`: a tuple of two booleans to specify whether to use batch normalization for the first and second convolution
8281
- `stride`: stride of the first convolution kernel
8382
- `pad`: padding of the first convolution kernel
8483
- `dilation`: dilation of the first convolution kernel
@@ -87,16 +86,16 @@ See Fig. 3 in [reference](https://arxiv.org/abs/1704.04861v1).
8786
- `ϵ`, `momentum`: batch norm parameters (see [`Flux.BatchNorm`](#))
8887
"""
8988
function depthwise_sep_conv_bn(kernelsize, inplanes, outplanes, activation = relu;
90-
rev = false, use_bn1 = true, use_bn2 = true,
89+
rev = false, use_bn = (true, true),
9190
initβ = Flux.zeros32, initγ = Flux.ones32,
9291
ϵ = 1.0f-5, momentum = 1.0f-1,
9392
stride = 1, kwargs...)
9493
return vcat(conv_bn(kernelsize, inplanes, inplanes, activation;
9594
rev = rev, initβ = initβ, initγ = initγ,
96-
ϵ = ϵ, momentum = momentum, use_bn = use_bn1,
95+
ϵ = ϵ, momentum = momentum, use_bn = use_bn[1],
9796
stride = stride, groups = Int(inplanes), kwargs...),
9897
conv_bn((1, 1), inplanes, outplanes, activation;
99-
rev = rev, initβ = initβ, initγ = initγ, use_bn = use_bn2,
98+
rev = rev, initβ = initβ, initγ = initγ, use_bn = use_bn[2],
10099
ϵ = ϵ, momentum = momentum))
101100
end
102101

0 commit comments

Comments
 (0)