1
+ using System ;
2
+ using System . Linq ;
3
+ using JetBrains . Annotations ;
4
+ using NeuralNetworkNET . APIs . Enums ;
5
+ using NeuralNetworkNET . APIs . Structs ;
6
+ using NeuralNetworkNET . Extensions ;
7
+ using NeuralNetworkNET . Networks . Activations ;
8
+ using NeuralNetworkNET . Networks . Layers . Cuda ;
9
+
10
+ namespace NeuralNetworkNET . APIs
11
+ {
12
+ /// <summary>
13
+ /// A static class that exposes the available cuDNN network layer types
14
+ /// </summary>
15
+ public static class CuDnnNetworkLayers
16
+ {
17
+ /// <summary>
18
+ /// Gets whether or not the Cuda acceleration is supported on the current system
19
+ /// </summary>
20
+ public static bool IsCudaSupportAvailable
21
+ {
22
+ [ Pure ]
23
+ get
24
+ {
25
+ try
26
+ {
27
+ // Calling this directly would could a crash in the <Module> loader due to the missing .dll files
28
+ return CuDnnSupportHelper . IsGpuAccelerationSupported ( ) ;
29
+ }
30
+ catch ( TypeInitializationException )
31
+ {
32
+ // Missing .dll file
33
+ return false ;
34
+ }
35
+ }
36
+ }
37
+
38
+ /// <summary>
39
+ /// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
40
+ /// </summary>
41
+ /// <param name="neurons">The number of output neurons</param>
42
+ /// <param name="activation">The desired activation function to use in the network layer</param>
43
+ /// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
44
+ /// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
45
+ [ PublicAPI ]
46
+ [ Pure , NotNull ]
47
+ public static LayerFactory FullyConnected (
48
+ int neurons , ActivationFunctionType activation ,
49
+ WeightsInitializationMode weightsMode = WeightsInitializationMode . GlorotUniform , BiasInitializationMode biasMode = BiasInitializationMode . Zero )
50
+ => input => new CuDnnFullyConnectedLayer ( input , neurons , activation , weightsMode , biasMode ) ;
51
+
52
+ /// <summary>
53
+ /// Creates a fully connected softmax output layer (used for classification problems with mutually-exclusive classes)
54
+ /// </summary>
55
+ /// <param name="outputs">The number of output neurons</param>
56
+ /// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
57
+ /// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
58
+ [ PublicAPI ]
59
+ [ Pure , NotNull ]
60
+ public static LayerFactory Softmax (
61
+ int outputs ,
62
+ WeightsInitializationMode weightsMode = WeightsInitializationMode . GlorotUniform , BiasInitializationMode biasMode = BiasInitializationMode . Zero )
63
+ => input => new CuDnnSoftmaxLayer ( input , outputs , weightsMode , biasMode ) ;
64
+
65
+ /// <summary>
66
+ /// Creates a convolutional layer with the desired number of kernels
67
+ /// </summary>
68
+ /// <param name="info">The info on the convolution operation to perform</param>
69
+ /// <param name="kernel">The volume information of the kernels used in the layer</param>
70
+ /// <param name="kernels">The number of convolution kernels to apply to the input volume</param>
71
+ /// <param name="activation">The desired activation function to use in the network layer</param>
72
+ /// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
73
+ [ PublicAPI ]
74
+ [ Pure , NotNull ]
75
+ public static LayerFactory Convolutional (
76
+ ConvolutionInfo info , ( int X , int Y ) kernel , int kernels , ActivationFunctionType activation ,
77
+ BiasInitializationMode biasMode = BiasInitializationMode . Zero )
78
+ => input => new CuDnnConvolutionalLayer ( input , info , kernel , kernels , activation , biasMode ) ;
79
+
80
+ /// <summary>
81
+ /// Creates a pooling layer with a window of size 2 and a stride of 2
82
+ /// </summary>
83
+ /// <param name="info">The info on the pooling operation to perform</param>
84
+ /// <param name="activation">The desired activation function to use in the network layer</param>
85
+ [ PublicAPI ]
86
+ [ Pure , NotNull ]
87
+ public static LayerFactory Pooling ( PoolingInfo info , ActivationFunctionType activation ) => input => new CuDnnPoolingLayer ( input , info , activation ) ;
88
+
89
+ /// <summary>
90
+ /// Creates a new inception layer with the given features
91
+ /// </summary>
92
+ /// <param name="info">The info on the operations to execute inside the layer</param>
93
+ /// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
94
+ [ PublicAPI ]
95
+ [ Pure , NotNull ]
96
+ public static LayerFactory Inception ( InceptionInfo info , BiasInitializationMode biasMode = BiasInitializationMode . Zero )
97
+ => input => new CuDnnInceptionLayer ( input , info , biasMode ) ;
98
+
99
+ #region Feature helper
100
+
101
+ /// <summary>
102
+ /// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to <Module> loading crashes)
103
+ /// </summary>
104
+ private static class CuDnnSupportHelper
105
+ {
106
+ /// <summary>
107
+ /// Checks whether or not the Cuda features are currently supported
108
+ /// </summary>
109
+ public static bool IsGpuAccelerationSupported ( )
110
+ {
111
+ try
112
+ {
113
+ // CUDA test
114
+ Alea . Gpu gpu = Alea . Gpu . Default ;
115
+ if ( gpu == null ) return false ;
116
+ if ( ! Alea . cuDNN . Dnn . IsAvailable ) return false ; // cuDNN
117
+ using ( Alea . DeviceMemory < float > sample_gpu = gpu . AllocateDevice < float > ( 1024 ) )
118
+ {
119
+ Alea . deviceptr < float > ptr = sample_gpu . Ptr ;
120
+ void Kernel ( int i ) => ptr [ i ] = i ;
121
+ Alea . Parallel . GpuExtension . For ( gpu , 0 , 1024 , Kernel ) ; // JIT test
122
+ float [ ] sample = Alea . Gpu . CopyToHost ( sample_gpu ) ;
123
+ return Enumerable . Range ( 0 , 1024 ) . Select < int , float > ( i => i ) . ToArray ( ) . ContentEquals ( sample ) ;
124
+ }
125
+ }
126
+ catch
127
+ {
128
+ // Missing .dll or other errors
129
+ return false ;
130
+ }
131
+ }
132
+ }
133
+
134
+ #endregion
135
+ }
136
+ }
0 commit comments