1
- using JetBrains . Annotations ;
1
+ using System ;
2
+ using System . Linq ;
3
+ using JetBrains . Annotations ;
2
4
using NeuralNetworkNET . APIs . Enums ;
3
5
using NeuralNetworkNET . APIs . Structs ;
4
6
using NeuralNetworkNET . Cuda . Layers ;
7
+ using NeuralNetworkNET . Extensions ;
5
8
using NeuralNetworkNET . Networks . Activations ;
6
9
7
10
namespace NeuralNetworkNET . APIs
@@ -12,7 +15,28 @@ namespace NeuralNetworkNET.APIs
12
15
public static class CuDnnNetworkLayers
13
16
{
14
17
/// <summary>
15
- /// Creates a new fully connected layer with the specified number of output neurons, and the given activation function
18
+ /// Gets whether or not the Cuda acceleration is supported on the current system
19
+ /// </summary>
20
+ public static bool IsCudaSupportAvailable
21
+ {
22
+ [ Pure ]
23
+ get
24
+ {
25
+ try
26
+ {
27
+ // Calling this directly would could a crash in the <Module> loader due to the missing .dll files
28
+ return CuDnnSupportHelper . IsGpuAccelerationSupported ( ) ;
29
+ }
30
+ catch ( TypeInitializationException )
31
+ {
32
+ // Missing .dll file
33
+ return false ;
34
+ }
35
+ }
36
+ }
37
+
38
+ /// <summary>
39
+ /// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
16
40
/// </summary>
17
41
/// <param name="neurons">The number of output neurons</param>
18
42
/// <param name="activation">The desired activation function to use in the network layer</param>
@@ -71,5 +95,44 @@ public static LayerFactory Convolutional(
71
95
[ Pure , NotNull ]
72
96
public static LayerFactory Inception ( InceptionInfo info , BiasInitializationMode biasMode = BiasInitializationMode . Zero )
73
97
=> input => new CuDnnInceptionLayer ( input , info , biasMode ) ;
98
+
99
+ #region Feature helper
100
+
101
+ /// <summary>
102
+ /// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to <Module> loading crashes)
103
+ /// </summary>
104
+ private static class CuDnnSupportHelper
105
+ {
106
+ /// <summary>
107
+ /// Checks whether or not the Cuda features are currently supported
108
+ /// </summary>
109
+ public static bool IsGpuAccelerationSupported ( )
110
+ {
111
+ try
112
+ {
113
+ // CUDA test
114
+ using ( Alea . Gpu gpu = Alea . Gpu . Default )
115
+ {
116
+ if ( gpu == null ) return false ;
117
+ if ( ! Alea . cuDNN . Dnn . IsAvailable ) return false ; // cuDNN
118
+ using ( Alea . DeviceMemory < float > sample_gpu = gpu . AllocateDevice < float > ( 1024 ) )
119
+ {
120
+ Alea . deviceptr < float > ptr = sample_gpu . Ptr ;
121
+ void Kernel ( int i ) => ptr [ i ] = i ;
122
+ Alea . Parallel . GpuExtension . For ( gpu , 0 , 1024 , Kernel ) ; // JIT test
123
+ float [ ] sample = Alea . Gpu . CopyToHost ( sample_gpu ) ;
124
+ return Enumerable . Range ( 0 , 1024 ) . Select < int , float > ( i => i ) . ToArray ( ) . ContentEquals ( sample ) ;
125
+ }
126
+ }
127
+ }
128
+ catch
129
+ {
130
+ // Missing .dll or other errors
131
+ return false ;
132
+ }
133
+ }
134
+ }
135
+
136
+ #endregion
74
137
}
75
138
}
0 commit comments