Skip to content

Commit b6730a8

Browse files
committed
Remove convfilter from manual + separate docstrings for some constructors
1 parent 87fe70f commit b6730a8

File tree

2 files changed

+46
-24
lines changed

2 files changed

+46
-24
lines changed

docs/src/models/layers.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ These layers are used to build convolutional neural networks (CNNs).
1313

1414
```@docs
1515
Conv
16+
Conv(weight::AbstractArray)
1617
AdaptiveMaxPool
1718
MaxPool
1819
GlobalMaxPool
@@ -21,10 +22,10 @@ MeanPool
2122
GlobalMeanPool
2223
DepthwiseConv
2324
ConvTranspose
25+
ConvTranspose(weight::AbstractArray)
2426
CrossCor
2527
SamePad
2628
Flux.flatten
27-
Flux.convfilter
2829
```
2930

3031
## Upsampling Layers

src/layers/conv.jl

Lines changed: 44 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@ Conv((2, 2), 3 => 7) # 91 parameters
3636
julia> layer2(xs) |> size # the output dimension changes as the padding was not "same"
3737
(99, 99, 7, 50)
3838
39-
julia> layer3 = Conv((2,2), 3 => 7, stride=2, pad=SamePad())
40-
Conv((2, 2), 3 => 7, pad=(1, 0, 1, 0), stride=2) # 91 parameters
39+
julia> layer3 = Conv((5, 5), 3 => 7, stride=2, pad=SamePad())
40+
Conv((5, 5), 3 => 7, pad=2, stride=2) # 532 parameters
4141
4242
julia> layer3(xs) |> size # output size = `ceil(input_size/stride)` = 50
4343
(50, 50, 7, 50)
@@ -60,7 +60,6 @@ end
6060
"""
6161
Conv(filter, in => out, σ = identity;
6262
stride = 1, pad = 0, dilation = 1, groups = 1, [bias, init])
63-
Conv(weight::AbstractArray, [bias, activation; stride, pad, dilation])
6463
6564
Standard convolutional layer. `filter` is a tuple of integers
6665
specifying the size of the convolutional kernel;
@@ -93,10 +92,6 @@ Keywords to control initialization of the layer:
9392
* `bias` - The initial bias vector is all zero by default. Trainable bias can be disabled entirely
9493
by setting this to `false`, or another vector can be provided such as `bias = randn(Float32, out)`.
9594
96-
Convolutional layer can also be manually constructed by passing in weights and
97-
biases. This constructor accepts the same keywords (and has the same defaults)
98-
as the `Conv((4,4), 3 => 7, relu)` method.
99-
10095
See also [`ConvTranspose`](@ref), [`DepthwiseConv`](@ref), [`CrossCor`](@ref).
10196
10297
# Examples
@@ -120,7 +115,26 @@ julia> Conv((1,1), 3 => 7; pad = (20,10,0,0))(xs) |> size
120115
121116
julia> Conv((5,5), 3 => 7; stride = 2, dilation = 4)(xs) |> size
122117
(42, 42, 7, 50)
118+
```
119+
"""
120+
struct Conv{N,M,F,A,V}
121+
σ::F
122+
weight::A
123+
bias::V
124+
stride::NTuple{N,Int}
125+
pad::NTuple{M,Int}
126+
dilation::NTuple{N,Int}
127+
groups::Int
128+
end
129+
130+
"""
131+
Conv(weight::AbstractArray, [bias, activation; stride, pad, dilation])
132+
133+
Constructs a convolutional layer with the given weight and bias.
134+
Accepts the same keywords (and has the same defaults) as the `Conv((4,4), 3 => 7, relu)`
135+
method.
123136
137+
```jldoctest
124138
julia> weight = rand(3, 4, 5);
125139
126140
julia> bias = zeros(5);
@@ -135,16 +149,6 @@ julia> Flux.params(c1) |> length
135149
2
136150
```
137151
"""
138-
struct Conv{N,M,F,A,V}
139-
σ::F
140-
weight::A
141-
bias::V
142-
stride::NTuple{N,Int}
143-
pad::NTuple{M,Int}
144-
dilation::NTuple{N,Int}
145-
groups::Int
146-
end
147-
148152
function Conv(w::AbstractArray{T,N}, b = true, σ = identity;
149153
stride = 1, pad = 0, dilation = 1, groups = 1) where {T,N}
150154

@@ -173,7 +177,7 @@ channels from `in` to `out`.
173177
Accepts the keyword `init` (default: `glorot_uniform`) to control the sampling
174178
distribution.
175179
176-
This is internally used by the [`Conv`](@ref) layer but can also be used independently.
180+
This is internally used by the [`Conv`](@ref) layer.
177181
"""
178182
function convfilter(filter::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer};
179183
init = glorot_uniform, groups = 1) where N
@@ -219,7 +223,6 @@ end
219223

220224
"""
221225
ConvTranspose(filter, in => out, σ=identity; stride=1, pad=0, dilation=1, [bias, init])
222-
ConvTranspose(weight::AbstractArray, [bias, activation; stride, pad, dilation, groups])
223226
224227
Standard convolutional transpose layer. `filter` is a tuple of integers
225228
specifying the size of the convolutional kernel, while
@@ -230,10 +233,6 @@ Note that `pad=SamePad()` here tries to ensure `size(output,d) == size(x,d) * st
230233
Parameters are controlled by additional keywords, with defaults
231234
`init=glorot_uniform` and `bias=true`.
232235
233-
ConvTranspose layer can also be manually constructed by passing in weights and
234-
biases. This constructor accepts the same keywords (and has the same defaults) as the
235-
`ConvTranspose((4,4), 3 => 7, relu)` method.
236-
237236
See also [`Conv`](@ref) for more detailed description of keywords.
238237
239238
# Examples
@@ -266,6 +265,28 @@ end
266265
_channels_in(l::ConvTranspose) = size(l.weight)[end]
267266
_channels_out(l::ConvTranspose) = size(l.weight)[end-1]*l.groups
268267

268+
"""
269+
ConvTranspose(weight::AbstractArray, [bias, activation; stride, pad, dilation, groups])
270+
271+
Constructs a ConvTranspose layer with the given weight and bias.
272+
Accepts the same keywords (and has the same defaults) as the `ConvTranspose((4,4), 3 => 7, relu)` method.
273+
274+
# Examples
275+
```jldoctest
276+
julia> weight = rand(3, 4, 5);
277+
278+
julia> bias = zeros(4);
279+
280+
julia> c1 = ConvTranspose(weight, bias, sigmoid)
281+
ConvTranspose((3,), 5 => 4, σ) # 64 parameters
282+
283+
julia> c1(randn(100, 5, 64)) |> size # transposed convolution will increase the dimension size (upsampling)
284+
(102, 4, 64)
285+
286+
julia> Flux.params(c1) |> length
287+
2
288+
```
289+
"""
269290
function ConvTranspose(w::AbstractArray{T,N}, bias = true, σ = identity;
270291
stride = 1, pad = 0, dilation = 1, groups=1) where {T,N}
271292
stride = expand(Val(N-2), stride)

0 commit comments

Comments
 (0)