Skip to content

Commit 6a7f6e8

Browse files
committed
Removed unnecessary code, Unit tests fixed
1 parent 4e0d60c commit 6a7f6e8

File tree

9 files changed

+126
-1233
lines changed

9 files changed

+126
-1233
lines changed

NeuralNetwork.NET/Extensions/ConvolutionExtensions.cs

Lines changed: 0 additions & 400 deletions
This file was deleted.

NeuralNetwork.NET/Extensions/MatrixExtensions.cs

Lines changed: 3 additions & 392 deletions
Large diffs are not rendered by default.

NeuralNetwork.NET/Extensions/PoolingExtensions.cs

Lines changed: 0 additions & 239 deletions
This file was deleted.

NeuralNetwork.NET/Networks/Cost/CostFunctions.cs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
using System.Threading.Tasks;
33
using JetBrains.Annotations;
44
using NeuralNetworkNET.APIs.Structs;
5+
using NeuralNetworkNET.cpuDNN;
56
using NeuralNetworkNET.Extensions;
67
using NeuralNetworkNET.Networks.Activations.Delegates;
78

@@ -161,7 +162,7 @@ public static unsafe void QuadraticCostPrime(in Tensor yHat, in Tensor y, in Ten
161162

162163
// Calculate (yHat - y) * activation'(z)
163164
float* pyHat = yHat, py = y, pz = z;
164-
unsafe void Kernel(int i)
165+
void Kernel(int i)
165166
{
166167
// Save the index and iterate for each column
167168
int offset = i * w;
@@ -192,7 +193,7 @@ public static void CrossEntropyCostPrime(in Tensor yHat, in Tensor y, in Tensor
192193
if (h != y.Entities || w != y.Length) throw new ArgumentException("The two matrices must have the same size");
193194

194195
// Calculate (yHat - y)
195-
yHat.Subtract(y);
196+
CpuBlas.Subtract(yHat, y, yHat);
196197
}
197198

198199
#endregion

NeuralNetwork.NET/Networks/Implementations/NeuralNetwork.cs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
using NeuralNetworkNET.APIs.Interfaces;
1111
using NeuralNetworkNET.APIs.Enums;
1212
using NeuralNetworkNET.APIs.Structs;
13+
using NeuralNetworkNET.cpuDNN;
1314
using NeuralNetworkNET.Extensions;
1415
using NeuralNetworkNET.Helpers;
1516
using NeuralNetworkNET.Networks.Activations;
@@ -229,7 +230,7 @@ internal unsafe void Backpropagate(in TrainingBatch batch, float dropout, [NotNu
229230
if (_Layers[i].LayerType == LayerType.FullyConnected && dropout > 0)
230231
{
231232
ThreadSafeRandom.NextDropoutMask(aList[i].Entities, aList[i].Length, dropout, out dropoutMasks[i]);
232-
aList[i].InPlaceHadamardProduct(dropoutMasks[i]);
233+
CpuBlas.MultiplyElementwise(aList[i], dropoutMasks[i], aList[i]);
233234
}
234235
}
235236

@@ -252,7 +253,7 @@ internal unsafe void Backpropagate(in TrainingBatch batch, float dropout, [NotNu
252253
* Multiply the previous delta with the transposed weights of the following layer
253254
* Compute d(l), the Hadamard product of z'(l) and delta(l + 1) * W(l + 1)T */
254255
_Layers[l + 1].Backpropagate(*deltas[l + 1], zList[l], _Layers[l].ActivationFunctions.ActivationPrime);
255-
if (!dropoutMasks[l].IsNull) zList[l].InPlaceHadamardProduct(dropoutMasks[l]);
256+
if (!dropoutMasks[l].IsNull) CpuBlas.MultiplyElementwise(zList[l], dropoutMasks[l], zList[l]);
256257
deltas[l] = zList + l;
257258
}
258259

NeuralNetwork.NET/Networks/Layers/Cuda/CuDnnFullyConnectedLayer.cs

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
using NeuralNetworkNET.APIs.Enums;
55
using NeuralNetworkNET.APIs.Interfaces;
66
using NeuralNetworkNET.APIs.Structs;
7+
using NeuralNetworkNET.cpuDNN;
78
using NeuralNetworkNET.Extensions;
89
using NeuralNetworkNET.Networks.Activations;
910
using NeuralNetworkNET.Networks.Activations.Delegates;
@@ -68,7 +69,8 @@ public override void ComputeGradient(in Tensor a, in Tensor delta, out Tensor dJ
6869
DnnInstance.FullyConnectedBackwardFilter(a.Entities, a.Length, delta.Length, a_gpu.Ptr, delta_gpu.Ptr, w_gpu.Ptr);
6970
w_gpu.CopyToHost(1, Weights.Length, out dJdw);
7071
}
71-
delta.CompressVertically(out dJdb); // Doing this on CPU is generally faster than launching the kernels
72+
Tensor.New(1, Biases.Length, out dJdb);
73+
CpuDnn.FullyConnectedBackwardBias(delta, dJdb); // Doing this on CPU is generally faster than launching the kernels
7274
}
7375

7476
#endregion

NeuralNetwork.NET/cpuDNN/CpuBlas.cs

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,35 @@ void Kernel(int i)
6565
Parallel.For(0, n, Kernel).AssertCompleted();
6666
}
6767

68+
/// <summary>
69+
/// Performs the in place multiplication (Hadamard product) product between two <see cref="Tensor"/> instances
70+
/// </summary>
71+
/// <param name="x1">The first <see cref="Tensor"/></param>
72+
/// <param name="x2">The second <see cref="Tensor"/></param>
73+
/// <param name="y">The resulting <see cref="Tensor"/></param>
74+
public static unsafe void MultiplyElementwise(in Tensor x1, in Tensor x2, in Tensor y)
75+
{
76+
// Check
77+
int
78+
n = x1.Entities,
79+
l = x1.Length;
80+
if (!x1.MatchShape(x2)) throw new ArgumentException("The two input tensors must be of equal shape");
81+
if (!x1.MatchShape(y)) throw new ArgumentException("The output tensor must have the same shape as the input tensors", nameof(y));
82+
float* px1 = x1, px2 = x2, py = y;
83+
84+
// Loop in parallel
85+
void Kernel(int i)
86+
{
87+
int offset = i * l;
88+
for (int j = 0; j < l; j++)
89+
{
90+
int position = offset + j;
91+
py[position] = px1[position] * px2[position];
92+
}
93+
}
94+
Parallel.For(0, n, Kernel).AssertCompleted();
95+
}
96+
6897
/// <summary>
6998
/// Subtracts two <see cref="Tensor"/> instances, element wise
7099
/// </summary>
@@ -93,7 +122,6 @@ void Kernel(int i)
93122
Parallel.For(0, n, Kernel).AssertCompleted();
94123
}
95124

96-
97125
/// <summary>
98126
/// Compresses a <see cref="Tensor"/> into a row by summing the components column by column
99127
/// </summary>

0 commit comments

Comments
 (0)