41
41
42
42
# Convolution
43
43
44
+ # Since CUDNN does not support 1D convolution, Conv in Flux will give a CUDNNError if the size is 1-dimensional.
45
+ # We have to reshape the CuArray/PoolDims/DenseConvDims to 4D before feeding to CUDNN.
46
+ fix1d (x) = x
47
+
48
+ fix1d (x:: CuArray{T, 3} ) where T = reshape (x, size (x, 1 ), 1 , size (x, 2 ), size (x, 3 ))
49
+
50
+ fix1d (cdims:: DenseConvDims{1,K,C_in,C_out,S,P,D,F} ) where {K,C_in,C_out,S,P,D,F} =
51
+ DenseConvDims {2,(K...,1),C_in,C_out,(S...,1),(P...,0,0),(D...,1),F} ((cdims. I... ,1 ))
52
+
53
+ fix1d (pdims:: PoolDims{1,K,S,P,D} ) where {K,S,P,D,F} =
54
+ PoolDims {2,(K...,1),(S...,1),(P...,0,0),(D...,1)} ((pdims. I... , 1 ), pdims. C_in)
55
+
44
56
function conv! (y:: CuArray{T} , x:: CuArray{T} , w:: CuArray{T} , cdims:: DenseConvDims ;
45
57
alpha= 1 , algo= 0 ) where T<: CUDNNFloat
46
58
if version () < v " 6"
47
59
all (x -> x == 1 , dilation (cdims)) || error (" Only dilation = 1 is supported in cuDNN version < 6" )
48
60
end
49
-
50
- cudnnConvolutionForward (y, x, w, cdims, alpha = alpha, algo = algo)
61
+ cudnnConvolutionForward ( fix1d (y), fix1d (x), fix1d (w), fix1d (cdims), alpha = alpha, algo = algo)
62
+ return y
51
63
end
52
64
53
65
function ∇conv_filter! (dw:: CuArray{T} , x:: CuArray{T} , dy:: CuArray{T} ,
@@ -56,7 +68,8 @@ function ∇conv_filter!(dw::CuArray{T}, x::CuArray{T}, dy::CuArray{T},
56
68
all (x -> x == 1 , dilation (cdims)) || error (" Only dilation = 1 is supported in cuDNN version < 6" )
57
69
end
58
70
59
- cudnnConvolutionBackwardFilter (dw, x, dy, cdims, alpha= alpha, algo= algo)
71
+ cudnnConvolutionBackwardFilter (fix1d (dw), fix1d (x), fix1d (dy), fix1d (cdims), alpha= alpha, algo= algo)
72
+ return dw
60
73
end
61
74
62
75
function ∇conv_data! (dx:: CuArray{T} , dy:: CuArray{T} , w:: CuArray{T} ,
@@ -65,22 +78,23 @@ function ∇conv_data!(dx::CuArray{T}, dy::CuArray{T}, w::CuArray{T},
65
78
all (x -> x == 1 , dilation (cdims)) || error (" Only dilation = 1 is supported in cuDNN version < 6" )
66
79
end
67
80
68
- cudnnConvolutionBackwardData (dx, w, dy, cdims, alpha= alpha, algo= algo)
81
+ cudnnConvolutionBackwardData (fix1d (dx), fix1d (w), fix1d (dy), fix1d (cdims), alpha= alpha, algo= algo)
82
+ return dx
69
83
end
70
84
71
85
∇conv_bias! (db:: CuArray{T} , dy:: CuArray{T} ; alpha= 1 , beta= 0 ) where T<: CUDNNFloat =
72
- cudnnConvolutionBackwardBias (db, dy , alpha= alpha, beta= beta)
86
+ ( cudnnConvolutionBackwardBias (fix1d (db), fix1d (dy) , alpha= alpha, beta= beta); return db )
73
87
74
88
maxpool! (y:: CuArray{T} , x:: CuArray{T} , pdims:: PoolDims ) where T<: CUDNNFloat =
75
- cudnnPoolingForward (y, x, pdims; mode= 0 )
89
+ ( cudnnPoolingForward (fix1d (y), fix1d (x), fix1d ( pdims) ; mode= 0 ); return y )
76
90
77
91
∇maxpool! (dx:: CuArray{T} , dy:: CuArray{T} , y:: CuArray{T} , x:: CuArray{T} ,
78
92
pdims:: PoolDims ) where T<: CUDNNFloat =
79
- cudnnPoolingBackward (dx, dy, x, y, pdims, mode= 0 )
93
+ ( cudnnPoolingBackward (fix1d (dx), fix1d (dy), fix1d (x), fix1d (y), fix1d ( pdims) , mode= 0 ); return dx )
80
94
81
95
meanpool! (y:: CuArray{T} , x:: CuArray{T} , pdims:: PoolDims ) where T<: CUDNNFloat =
82
- cudnnPoolingForward (y, x, pdims, mode= 1 )
96
+ ( cudnnPoolingForward (fix1d (y), fix1d (x), fix1d ( pdims) , mode= 1 ); return y )
83
97
84
98
∇meanpool! (dx:: CuArray{T} , dy:: CuArray{T} , y:: CuArray{T} , x:: CuArray{T} ,
85
99
pdims:: PoolDims ) where T<: CUDNNFloat =
86
- cudnnPoolingBackward (dx, dy, x, y, pdims, mode= 1 )
100
+ ( cudnnPoolingBackward (fix1d (dx), fix1d (dy), fix1d (x), fix1d (y), fix1d ( pdims) , mode= 1 ); return dx )
0 commit comments