@@ -36,8 +36,8 @@ Conv((2, 2), 3 => 7) # 91 parameters
36
36
julia> layer2(xs) |> size # the output dimension changes as the padding was not "same"
37
37
(99, 99, 7, 50)
38
38
39
- julia> layer3 = Conv((2,2 ), 3 => 7, stride=2, pad=SamePad())
40
- Conv((2, 2 ), 3 => 7, pad=(1, 0, 1, 0), stride=2) # 91 parameters
39
+ julia> layer3 = Conv((5, 5 ), 3 => 7, stride=2, pad=SamePad())
40
+ Conv((5, 5 ), 3 => 7, pad=2, stride=2) # 532 parameters
41
41
42
42
julia> layer3(xs) |> size # output size = `ceil(input_size/stride)` = 50
43
43
(50, 50, 7, 50)
60
60
"""
61
61
Conv(filter, in => out, σ = identity;
62
62
stride = 1, pad = 0, dilation = 1, groups = 1, [bias, init])
63
- Conv(weight::AbstractArray, [bias, activation; stride, pad, dilation])
64
63
65
64
Standard convolutional layer. `filter` is a tuple of integers
66
65
specifying the size of the convolutional kernel;
@@ -93,10 +92,6 @@ Keywords to control initialization of the layer:
93
92
* `bias` - The initial bias vector is all zero by default. Trainable bias can be disabled entirely
94
93
by setting this to `false`, or another vector can be provided such as `bias = randn(Float32, out)`.
95
94
96
- Convolutional layer can also be manually constructed by passing in weights and
97
- biases. This constructor accepts the same keywords (and has the same defaults)
98
- as the `Conv((4,4), 3 => 7, relu)` method.
99
-
100
95
See also [`ConvTranspose`](@ref), [`DepthwiseConv`](@ref), [`CrossCor`](@ref).
101
96
102
97
# Examples
@@ -120,7 +115,26 @@ julia> Conv((1,1), 3 => 7; pad = (20,10,0,0))(xs) |> size
120
115
121
116
julia> Conv((5,5), 3 => 7; stride = 2, dilation = 4)(xs) |> size
122
117
(42, 42, 7, 50)
118
+ ```
119
+ """
120
+ struct Conv{N,M,F,A,V}
121
+ σ:: F
122
+ weight:: A
123
+ bias:: V
124
+ stride:: NTuple{N,Int}
125
+ pad:: NTuple{M,Int}
126
+ dilation:: NTuple{N,Int}
127
+ groups:: Int
128
+ end
129
+
130
+ """
131
+ Conv(weight::AbstractArray, [bias, activation; stride, pad, dilation])
132
+
133
+ Constructs a convolutional layer with the given weight and bias.
134
+ Accepts the same keywords (and has the same defaults) as the `Conv((4,4), 3 => 7, relu)`
135
+ method.
123
136
137
+ ```jldoctest
124
138
julia> weight = rand(3, 4, 5);
125
139
126
140
julia> bias = zeros(5);
@@ -135,16 +149,6 @@ julia> Flux.params(c1) |> length
135
149
2
136
150
```
137
151
"""
138
- struct Conv{N,M,F,A,V}
139
- σ:: F
140
- weight:: A
141
- bias:: V
142
- stride:: NTuple{N,Int}
143
- pad:: NTuple{M,Int}
144
- dilation:: NTuple{N,Int}
145
- groups:: Int
146
- end
147
-
148
152
function Conv (w:: AbstractArray{T,N} , b = true , σ = identity;
149
153
stride = 1 , pad = 0 , dilation = 1 , groups = 1 ) where {T,N}
150
154
@@ -173,7 +177,7 @@ channels from `in` to `out`.
173
177
Accepts the keyword `init` (default: `glorot_uniform`) to control the sampling
174
178
distribution.
175
179
176
- This is internally used by the [`Conv`](@ref) layer but can also be used independently .
180
+ This is internally used by the [`Conv`](@ref) layer.
177
181
"""
178
182
function convfilter (filter:: NTuple{N,Integer} , ch:: Pair{<:Integer,<:Integer} ;
179
183
init = glorot_uniform, groups = 1 ) where N
219
223
220
224
"""
221
225
ConvTranspose(filter, in => out, σ=identity; stride=1, pad=0, dilation=1, [bias, init])
222
- ConvTranspose(weight::AbstractArray, [bias, activation; stride, pad, dilation, groups])
223
226
224
227
Standard convolutional transpose layer. `filter` is a tuple of integers
225
228
specifying the size of the convolutional kernel, while
@@ -230,10 +233,6 @@ Note that `pad=SamePad()` here tries to ensure `size(output,d) == size(x,d) * st
230
233
Parameters are controlled by additional keywords, with defaults
231
234
`init=glorot_uniform` and `bias=true`.
232
235
233
- ConvTranspose layer can also be manually constructed by passing in weights and
234
- biases. This constructor accepts the same keywords (and has the same defaults) as the
235
- `ConvTranspose((4,4), 3 => 7, relu)` method.
236
-
237
236
See also [`Conv`](@ref) for more detailed description of keywords.
238
237
239
238
# Examples
266
265
_channels_in (l:: ConvTranspose ) = size (l. weight)[end ]
267
266
_channels_out (l:: ConvTranspose ) = size (l. weight)[end - 1 ]* l. groups
268
267
268
+ """
269
+ ConvTranspose(weight::AbstractArray, [bias, activation; stride, pad, dilation, groups])
270
+
271
+ Constructs a ConvTranspose layer with the given weight and bias.
272
+ Accepts the same keywords (and has the same defaults) as the `ConvTranspose((4,4), 3 => 7, relu)` method.
273
+
274
+ # Examples
275
+ ```jldoctest
276
+ julia> weight = rand(3, 4, 5);
277
+
278
+ julia> bias = zeros(4);
279
+
280
+ julia> c1 = ConvTranspose(weight, bias, sigmoid)
281
+ ConvTranspose((3,), 5 => 4, σ) # 64 parameters
282
+
283
+ julia> c1(randn(100, 5, 64)) |> size # transposed convolution will increase the dimension size (upsampling)
284
+ (102, 4, 64)
285
+
286
+ julia> Flux.params(c1) |> length
287
+ 2
288
+ ```
289
+ """
269
290
function ConvTranspose (w:: AbstractArray{T,N} , bias = true , σ = identity;
270
291
stride = 1 , pad = 0 , dilation = 1 , groups= 1 ) where {T,N}
271
292
stride = expand (Val (N- 2 ), stride)
0 commit comments