Skip to content

Commit 786f1bb

Browse files
authored
Merge pull request #51 from Sergio0694/feature_unified-library
Feature unified library
2 parents b786ab5 + e6889d1 commit 786f1bb

26 files changed

+94
-23
lines changed

NeuralNetwork.NET.Cuda/APIs/CuDnnNetworkLayers.cs renamed to NeuralNetwork.NET/APIs/CuDnnNetworkLayers.cs

Lines changed: 65 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
1-
using JetBrains.Annotations;
1+
using System;
2+
using System.Linq;
3+
using JetBrains.Annotations;
24
using NeuralNetworkNET.APIs.Enums;
35
using NeuralNetworkNET.APIs.Structs;
46
using NeuralNetworkNET.Cuda.Layers;
7+
using NeuralNetworkNET.Extensions;
58
using NeuralNetworkNET.Networks.Activations;
69

710
namespace NeuralNetworkNET.APIs
@@ -12,7 +15,28 @@ namespace NeuralNetworkNET.APIs
1215
public static class CuDnnNetworkLayers
1316
{
1417
/// <summary>
15-
/// Creates a new fully connected layer with the specified number of output neurons, and the given activation function
18+
/// Gets whether or not the Cuda acceleration is supported on the current system
19+
/// </summary>
20+
public static bool IsCudaSupportAvailable
21+
{
22+
[Pure]
23+
get
24+
{
25+
try
26+
{
27+
// Calling this directly would could a crash in the <Module> loader due to the missing .dll files
28+
return CuDnnSupportHelper.IsGpuAccelerationSupported();
29+
}
30+
catch (TypeInitializationException)
31+
{
32+
// Missing .dll file
33+
return false;
34+
}
35+
}
36+
}
37+
38+
/// <summary>
39+
/// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
1640
/// </summary>
1741
/// <param name="neurons">The number of output neurons</param>
1842
/// <param name="activation">The desired activation function to use in the network layer</param>
@@ -71,5 +95,44 @@ public static LayerFactory Convolutional(
7195
[Pure, NotNull]
7296
public static LayerFactory Inception(InceptionInfo info, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
7397
=> input => new CuDnnInceptionLayer(input, info, biasMode);
98+
99+
#region Feature helper
100+
101+
/// <summary>
102+
/// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to &lt;Module&gt; loading crashes)
103+
/// </summary>
104+
private static class CuDnnSupportHelper
105+
{
106+
/// <summary>
107+
/// Checks whether or not the Cuda features are currently supported
108+
/// </summary>
109+
public static bool IsGpuAccelerationSupported()
110+
{
111+
try
112+
{
113+
// CUDA test
114+
using (Alea.Gpu gpu = Alea.Gpu.Default)
115+
{
116+
if (gpu == null) return false;
117+
if (!Alea.cuDNN.Dnn.IsAvailable) return false; // cuDNN
118+
using (Alea.DeviceMemory<float> sample_gpu = gpu.AllocateDevice<float>(1024))
119+
{
120+
Alea.deviceptr<float> ptr = sample_gpu.Ptr;
121+
void Kernel(int i) => ptr[i] = i;
122+
Alea.Parallel.GpuExtension.For(gpu, 0, 1024, Kernel); // JIT test
123+
float[] sample = Alea.Gpu.CopyToHost(sample_gpu);
124+
return Enumerable.Range(0, 1024).Select<int, float>(i => i).ToArray().ContentEquals(sample);
125+
}
126+
}
127+
}
128+
catch
129+
{
130+
// Missing .dll or other errors
131+
return false;
132+
}
133+
}
134+
}
135+
136+
#endregion
74137
}
75138
}

0 commit comments

Comments
 (0)