1
- using JetBrains . Annotations ;
1
+ using System ;
2
+ using System . Linq ;
3
+ using JetBrains . Annotations ;
2
4
using NeuralNetworkNET . APIs . Enums ;
3
5
using NeuralNetworkNET . APIs . Interfaces ;
4
6
using NeuralNetworkNET . APIs . Structs ;
5
7
using NeuralNetworkNET . Cuda . Layers ;
8
+ using NeuralNetworkNET . Extensions ;
6
9
using NeuralNetworkNET . Networks . Activations ;
7
10
8
11
namespace NeuralNetworkNET . APIs
@@ -12,6 +15,27 @@ namespace NeuralNetworkNET.APIs
12
15
/// </summary>
13
16
public static class CuDnnNetworkLayers
14
17
{
18
+ /// <summary>
19
+ /// Gets whether or not the Cuda acceleration is supported on the current system
20
+ /// </summary>
21
+ public static bool IsCudaSupportAvailable
22
+ {
23
+ [ Pure ]
24
+ get
25
+ {
26
+ try
27
+ {
28
+ // Calling this directly would could a crash in the <Module> loader due to the missing .dll files
29
+ return CuDnnSupportHelper . IsGpuAccelerationSupported ( ) ;
30
+ }
31
+ catch ( TypeInitializationException )
32
+ {
33
+ // Missing .dll file
34
+ return false ;
35
+ }
36
+ }
37
+ }
38
+
15
39
/// <summary>
16
40
/// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
17
41
/// </summary>
@@ -80,5 +104,44 @@ public static INetworkLayer Inception(
80
104
in TensorInfo input , in InceptionInfo info ,
81
105
BiasInitializationMode biasMode = BiasInitializationMode . Zero )
82
106
=> new CuDnnInceptionLayer ( input , info , biasMode ) ;
107
+
108
+ #region Feature helper
109
+
110
+ /// <summary>
111
+ /// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to <Module> loading crashes)
112
+ /// </summary>
113
+ private static class CuDnnSupportHelper
114
+ {
115
+ /// <summary>
116
+ /// Checks whether or not the Cuda features are currently supported
117
+ /// </summary>
118
+ public static bool IsGpuAccelerationSupported ( )
119
+ {
120
+ try
121
+ {
122
+ // CUDA test
123
+ using ( Alea . Gpu gpu = Alea . Gpu . Default )
124
+ {
125
+ if ( gpu == null ) return false ;
126
+ if ( ! Alea . cuDNN . Dnn . IsAvailable ) return false ; // cuDNN
127
+ using ( Alea . DeviceMemory < float > sample_gpu = gpu . AllocateDevice < float > ( 1024 ) )
128
+ {
129
+ Alea . deviceptr < float > ptr = sample_gpu . Ptr ;
130
+ void Kernel ( int i ) => ptr [ i ] = i ;
131
+ Alea . Parallel . GpuExtension . For ( gpu , 0 , 1024 , Kernel ) ; // JIT test
132
+ float [ ] sample = Alea . Gpu . CopyToHost ( sample_gpu ) ;
133
+ return Enumerable . Range ( 0 , 1024 ) . Select < int , float > ( i => i ) . ToArray ( ) . ContentEquals ( sample ) ;
134
+ }
135
+ }
136
+ }
137
+ catch
138
+ {
139
+ // Missing .dll or other errors
140
+ return false ;
141
+ }
142
+ }
143
+ }
144
+
145
+ #endregion
83
146
}
84
147
}
0 commit comments