Skip to content

Commit 369b1c8

Browse files
authored
Merge pull request #53 from Sergio0694/dev
Dev
2 parents 2d56d11 + 8f636c0 commit 369b1c8

File tree

65 files changed

+1766
-1777
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+1766
-1777
lines changed

NeuralNetwork.NET.Cuda/APIs/CuDnnNetworkLayers.cs

-84
This file was deleted.

NeuralNetwork.NET.Cuda/APIs/CuDnnNetworkLayersDeserializer.cs

-39
This file was deleted.

NeuralNetwork.NET.Cuda/AssemblyInfo.cs

-4
This file was deleted.

NeuralNetwork.NET.Cuda/NeuralNetwork.NET.Cuda.csproj

-21
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
using System;
2+
using System.Linq;
3+
using JetBrains.Annotations;
4+
using NeuralNetworkNET.APIs.Enums;
5+
using NeuralNetworkNET.APIs.Structs;
6+
using NeuralNetworkNET.Extensions;
7+
using NeuralNetworkNET.Networks.Activations;
8+
using NeuralNetworkNET.Networks.Layers.Cuda;
9+
10+
namespace NeuralNetworkNET.APIs
11+
{
12+
/// <summary>
13+
/// A static class that exposes the available cuDNN network layer types
14+
/// </summary>
15+
public static class CuDnnNetworkLayers
16+
{
17+
/// <summary>
18+
/// Gets whether or not the Cuda acceleration is supported on the current system
19+
/// </summary>
20+
public static bool IsCudaSupportAvailable
21+
{
22+
[Pure]
23+
get
24+
{
25+
try
26+
{
27+
// Calling this directly would could a crash in the <Module> loader due to the missing .dll files
28+
return CuDnnSupportHelper.IsGpuAccelerationSupported();
29+
}
30+
catch (TypeInitializationException)
31+
{
32+
// Missing .dll file
33+
return false;
34+
}
35+
}
36+
}
37+
38+
/// <summary>
39+
/// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
40+
/// </summary>
41+
/// <param name="neurons">The number of output neurons</param>
42+
/// <param name="activation">The desired activation function to use in the network layer</param>
43+
/// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
44+
/// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
45+
[PublicAPI]
46+
[Pure, NotNull]
47+
public static LayerFactory FullyConnected(
48+
int neurons, ActivationFunctionType activation,
49+
WeightsInitializationMode weightsMode = WeightsInitializationMode.GlorotUniform, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
50+
=> input => new CuDnnFullyConnectedLayer(input, neurons, activation, weightsMode, biasMode);
51+
52+
/// <summary>
53+
/// Creates a fully connected softmax output layer (used for classification problems with mutually-exclusive classes)
54+
/// </summary>
55+
/// <param name="outputs">The number of output neurons</param>
56+
/// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
57+
/// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
58+
[PublicAPI]
59+
[Pure, NotNull]
60+
public static LayerFactory Softmax(
61+
int outputs,
62+
WeightsInitializationMode weightsMode = WeightsInitializationMode.GlorotUniform, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
63+
=> input => new CuDnnSoftmaxLayer(input, outputs, weightsMode, biasMode);
64+
65+
/// <summary>
66+
/// Creates a convolutional layer with the desired number of kernels
67+
/// </summary>
68+
/// <param name="info">The info on the convolution operation to perform</param>
69+
/// <param name="kernel">The volume information of the kernels used in the layer</param>
70+
/// <param name="kernels">The number of convolution kernels to apply to the input volume</param>
71+
/// <param name="activation">The desired activation function to use in the network layer</param>
72+
/// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
73+
[PublicAPI]
74+
[Pure, NotNull]
75+
public static LayerFactory Convolutional(
76+
ConvolutionInfo info, (int X, int Y) kernel, int kernels, ActivationFunctionType activation,
77+
BiasInitializationMode biasMode = BiasInitializationMode.Zero)
78+
=> input => new CuDnnConvolutionalLayer(input, info, kernel, kernels, activation, biasMode);
79+
80+
/// <summary>
81+
/// Creates a pooling layer with a window of size 2 and a stride of 2
82+
/// </summary>
83+
/// <param name="info">The info on the pooling operation to perform</param>
84+
/// <param name="activation">The desired activation function to use in the network layer</param>
85+
[PublicAPI]
86+
[Pure, NotNull]
87+
public static LayerFactory Pooling(PoolingInfo info, ActivationFunctionType activation) => input => new CuDnnPoolingLayer(input, info, activation);
88+
89+
/// <summary>
90+
/// Creates a new inception layer with the given features
91+
/// </summary>
92+
/// <param name="info">The info on the operations to execute inside the layer</param>
93+
/// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
94+
[PublicAPI]
95+
[Pure, NotNull]
96+
public static LayerFactory Inception(InceptionInfo info, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
97+
=> input => new CuDnnInceptionLayer(input, info, biasMode);
98+
99+
#region Feature helper
100+
101+
/// <summary>
102+
/// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to &lt;Module&gt; loading crashes)
103+
/// </summary>
104+
private static class CuDnnSupportHelper
105+
{
106+
/// <summary>
107+
/// Checks whether or not the Cuda features are currently supported
108+
/// </summary>
109+
public static bool IsGpuAccelerationSupported()
110+
{
111+
try
112+
{
113+
// CUDA test
114+
Alea.Gpu gpu = Alea.Gpu.Default;
115+
if (gpu == null) return false;
116+
if (!Alea.cuDNN.Dnn.IsAvailable) return false; // cuDNN
117+
using (Alea.DeviceMemory<float> sample_gpu = gpu.AllocateDevice<float>(1024))
118+
{
119+
Alea.deviceptr<float> ptr = sample_gpu.Ptr;
120+
void Kernel(int i) => ptr[i] = i;
121+
Alea.Parallel.GpuExtension.For(gpu, 0, 1024, Kernel); // JIT test
122+
float[] sample = Alea.Gpu.CopyToHost(sample_gpu);
123+
return Enumerable.Range(0, 1024).Select<int, float>(i => i).ToArray().ContentEquals(sample);
124+
}
125+
}
126+
catch
127+
{
128+
// Missing .dll or other errors
129+
return false;
130+
}
131+
}
132+
}
133+
134+
#endregion
135+
}
136+
}

NeuralNetwork.NET/APIs/Delegates/LayerDeserializer.cs

-15
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
namespace NeuralNetworkNET.APIs.Enums
2+
{
3+
/// <summary>
4+
/// Indicates the preferred type of network layers to serialize, whenever possible
5+
/// </summary>
6+
public enum LayersLoadingPreference
7+
{
8+
Cpu,
9+
Cuda
10+
}
11+
}

NeuralNetwork.NET/APIs/Interfaces/INeuralNetwork.cs

+5
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,11 @@ public interface INeuralNetwork : IEquatable<INeuralNetwork>, IClonable<INeuralN
2929
[NotNull, ItemNotNull]
3030
IReadOnlyList<INetworkLayer> Layers { get; }
3131

32+
/// <summary>
33+
/// Gets the total number of parameters in the current network layer
34+
/// </summary>
35+
int Parameters { get; }
36+
3237
#endregion
3338

3439
#region Methods

0 commit comments

Comments
 (0)