@@ -15,6 +15,7 @@ Implementation of Scale layer.
15
15
#include " ../op_inf_engine.hpp"
16
16
#include " ../ie_ngraph.hpp"
17
17
18
+ #include < opencv2/imgproc.hpp>
18
19
#include < opencv2/dnn/shape_utils.hpp>
19
20
20
21
namespace cv
@@ -324,7 +325,7 @@ class DataAugmentationLayerImpl CV_FINAL : public DataAugmentationLayer
324
325
std::vector<MatShape> &internals) const CV_OVERRIDE
325
326
{
326
327
CV_Assert_N (inputs.size () == 1 , blobs.size () == 3 );
327
- CV_Assert_N (blobs[0 ].total () == 1 , blobs[ 1 ]. total () == total (inputs[ 0 ], 1 ),
328
+ CV_Assert_N (blobs[0 ].total () == 1 ,
328
329
blobs[2 ].total () == inputs[0 ][1 ]);
329
330
330
331
outputs.assign (1 , inputs[0 ]);
@@ -347,15 +348,20 @@ class DataAugmentationLayerImpl CV_FINAL : public DataAugmentationLayer
347
348
float * outData = outputs[0 ].ptr <float >();
348
349
349
350
Mat data_mean_cpu = blobs[1 ].clone ();
351
+ Mat mean_resize = Mat (inputs[0 ].size [3 ], inputs[0 ].size [2 ], CV_32FC3);
352
+ Mat mean_3d = Mat (data_mean_cpu.size [3 ], data_mean_cpu.size [2 ], CV_32FC3, data_mean_cpu.ptr <float >(0 ));
353
+ resize (mean_3d, mean_resize, Size (inputs[0 ].size [3 ], inputs[0 ].size [2 ]));
354
+ int new_size[] = {1 , mean_resize.channels (), mean_resize.cols , mean_resize.rows };
355
+ Mat data_mean_cpu_resize = mean_resize.reshape (1 , *new_size);
350
356
Mat data_mean_per_channel_cpu = blobs[2 ].clone ();
351
357
352
- const int numWeights = data_mean_cpu .total ();
358
+ const int numWeights = data_mean_cpu_resize .total ();
353
359
CV_Assert (numWeights != 0 );
354
360
355
361
++num_iter;
356
362
if (num_iter <= recompute_mean)
357
363
{
358
- data_mean_cpu *= (num_iter - 1 );
364
+ data_mean_cpu_resize *= (num_iter - 1 );
359
365
const int batch = inputs[0 ].size [0 ];
360
366
float alpha = 1.0 / batch;
361
367
@@ -364,15 +370,15 @@ class DataAugmentationLayerImpl CV_FINAL : public DataAugmentationLayer
364
370
Mat inpSlice (1 , numWeights, CV_32F, inpData);
365
371
inpSlice = alpha * inpSlice;
366
372
367
- add (data_mean_cpu .reshape (1 , 1 ), inpSlice, data_mean_cpu .reshape (1 , 1 ));
373
+ add (data_mean_cpu_resize .reshape (1 , 1 ), inpSlice, data_mean_cpu_resize .reshape (1 , 1 ));
368
374
inpData += numWeights;
369
375
}
370
- data_mean_cpu *= (1.0 / num_iter);
376
+ data_mean_cpu_resize *= (1.0 / num_iter);
371
377
372
- int newsize[] = {blobs[ 1 ].size [1 ], (int )blobs[ 1 ].total (2 )};
373
- reduce (data_mean_cpu .reshape (1 , 2 , &newsize[0 ]), data_mean_per_channel_cpu, 1 , REDUCE_SUM, CV_32F);
378
+ int newsize[] = {inputs[ 0 ].size [1 ], (int )inputs[ 0 ].total (2 )};
379
+ reduce (data_mean_cpu_resize .reshape (1 , 2 , &newsize[0 ]), data_mean_per_channel_cpu, 1 , REDUCE_SUM, CV_32F);
374
380
375
- int area = blobs[ 1 ].total (2 );
381
+ int area = inputs[ 0 ].total (2 );
376
382
data_mean_per_channel_cpu *= (1.0 / area);
377
383
}
378
384
@@ -387,7 +393,7 @@ class DataAugmentationLayerImpl CV_FINAL : public DataAugmentationLayer
387
393
Mat inpSlice (1 , numWeights, CV_32F, inpData);
388
394
Mat outSlice (1 , numWeights, CV_32F, outData);
389
395
390
- add (inpSlice, (-1 ) * data_mean_cpu , outSlice);
396
+ add (inpSlice, (-1 ) * data_mean_cpu_resize , outSlice);
391
397
inpData += numWeights;
392
398
outData += numWeights;
393
399
}
0 commit comments