Skip to content

Commit a2bbfa1

Browse files
committed
Enable some tests for Inference Engine 2019R1
1 parent dad2247 commit a2bbfa1

File tree

9 files changed

+70
-98
lines changed

9 files changed

+70
-98
lines changed

modules/dnn/src/dnn.cpp

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1160,12 +1160,6 @@ struct Net::Impl
11601160
continue;
11611161

11621162
currLayer->unsetAttached();
1163-
1164-
Ptr<PoolingLayer> poolingLayer = currLayer.dynamicCast<PoolingLayer>();
1165-
if( !poolingLayer.empty() )
1166-
{
1167-
poolingLayer->computeMaxIdx = true;
1168-
}
11691163
}
11701164

11711165
layersTimings.clear();
@@ -2082,30 +2076,11 @@ struct Net::Impl
20822076
}
20832077
}
20842078
}
2085-
// the optimization #2. if there is no layer that takes max pooling layer's computed
2086-
// max indices (and only some semantical segmentation networks might need this;
2087-
// many others only take the maximum values), then we switch the max pooling
2088-
// layer to the faster operating mode.
2089-
Ptr<PoolingLayer> poolingLayer = ld.layerInstance.dynamicCast<PoolingLayer>();
2090-
if( !poolingLayer.empty() && !ld.consumers.empty() )
2091-
{
2092-
size_t i = 0, nconsumers = ld.consumers.size();
2093-
for( ; i < nconsumers; i++ )
2094-
if( ld.consumers[i].oid > 0 )
2095-
break;
2096-
// if there is no layer that takes the second output pin of the pooling layer
2097-
// on input then we don't need to compute the indices
2098-
if( i >= nconsumers )
2099-
{
2100-
poolingLayer->computeMaxIdx = false;
2101-
printf_(("\tsimplified pooling layer %s\n", poolingLayer->name.c_str()));
2102-
}
2103-
}
21042079

21052080
if (preferableBackend != DNN_BACKEND_OPENCV)
21062081
continue; // Go to the next layer.
21072082

2108-
// the optimization #3. if there is concat layer that concatenates channels
2083+
// the optimization #2. if there is concat layer that concatenates channels
21092084
// from the inputs together (i.e. axis == 1) then we make the inputs of
21102085
// the concat layer to write to the concatenation output buffer
21112086
// (and so we eliminate the concatenation layer, because the channels

modules/dnn/src/layers/elementwise_layers.cpp

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,11 @@ struct ReLUFunctor
256256

257257
bool supportBackend(int backendId, int)
258258
{
259-
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
260-
backendId == DNN_BACKEND_INFERENCE_ENGINE;
259+
#ifdef HAVE_INF_ENGINE
260+
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
261+
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
262+
#endif
263+
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
261264
}
262265

263266
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@@ -741,8 +744,11 @@ struct AbsValFunctor
741744

742745
bool supportBackend(int backendId, int)
743746
{
744-
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
745-
backendId == DNN_BACKEND_INFERENCE_ENGINE;
747+
#ifdef HAVE_INF_ENGINE
748+
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
749+
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
750+
#endif
751+
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
746752
}
747753

748754
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const

modules/dnn/src/layers/flatten_layer.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,8 @@ class FlattenLayerImpl CV_FINAL : public FlattenLayer
159159
InferenceEngine::Builder::Layer ieLayer(name);
160160
ieLayer.setName(name);
161161
ieLayer.setType("Flatten");
162-
ieLayer.getParameters()["axis"] = _startAxis;
163-
ieLayer.getParameters()["end_axis"] = _endAxis;
162+
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
163+
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
164164
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
165165
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
166166
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));

modules/dnn/src/layers/padding_layer.cpp

Lines changed: 42 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ Implementation of padding layer, which adds paddings to input blob.
1212
#include "../precomp.hpp"
1313
#include "layers_common.hpp"
1414
#include "../op_halide.hpp"
15+
#include "../op_inf_engine.hpp"
1516
#include <vector>
1617

1718
namespace cv
@@ -68,28 +69,36 @@ class PaddingLayerImpl CV_FINAL : public PaddingLayer
6869

6970
// Compute dstRanges.
7071
const MatSize& inpShape = inputs[0].size;
71-
dstRanges.resize(paddings.size());
7272

73-
int offset = 0;
7473
if (inputDims != -1 && inputs[0].dims != inputDims)
7574
{
76-
dstRanges.insert(dstRanges.begin(), Range::all());
77-
offset = 1;
75+
paddings.insert(paddings.begin(), std::make_pair(0, 0));
7876
}
7977

78+
dstRanges.resize(paddings.size());
8079
for (int i = 0; i < paddings.size(); ++i)
8180
{
82-
dstRanges[offset + i].start = paddings[i].first;
83-
dstRanges[offset + i].end = paddings[i].first + inpShape[offset + i];
81+
dstRanges[i].start = paddings[i].first;
82+
dstRanges[i].end = paddings[i].first + inpShape[i];
8483
}
8584

8685
// Add the rest of dimensions.
8786
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
87+
{
8888
dstRanges.push_back(Range::all());
89+
paddings.push_back(std::make_pair(0, 0));
90+
}
91+
inputDims = -1; // Next time paddings are filled for all the dimensions.
8992
}
9093

9194
virtual bool supportBackend(int backendId) CV_OVERRIDE
9295
{
96+
#ifdef HAVE_INF_ENGINE
97+
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
98+
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
99+
(preferableTarget != DNN_TARGET_MYRIAD ||
100+
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
101+
#endif
93102
return backendId == DNN_BACKEND_OPENCV ||
94103
(backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
95104
}
@@ -109,7 +118,7 @@ class PaddingLayerImpl CV_FINAL : public PaddingLayer
109118
{
110119
std::vector<float> paddingValue_fp32(1, paddingValue);
111120
std::vector<int16_t> paddingValue_fp16(1);
112-
convertFp16(paddingValue_fp32, paddingValue_fp16);
121+
cv::convertFp16(paddingValue_fp32, paddingValue_fp16);
113122
outputs[0].setTo(paddingValue_fp16[0]);
114123
}
115124
else
@@ -173,6 +182,32 @@ class PaddingLayerImpl CV_FINAL : public PaddingLayer
173182
return Ptr<BackendNode>();
174183
}
175184

185+
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
186+
{
187+
#ifdef HAVE_INF_ENGINE
188+
InferenceEngine::Builder::Layer ieLayer(name);
189+
ieLayer.setName(name);
190+
ieLayer.setType("Pad");
191+
192+
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
193+
for (int i = 0; i < paddings.size(); ++i)
194+
{
195+
begins[i] = paddings[i].first;
196+
ends[i] = paddings[i].second;
197+
}
198+
ieLayer.getParameters()["pads_begin"] = begins;
199+
ieLayer.getParameters()["pads_end"] = ends;
200+
ieLayer.getParameters()["pad_mode"] = paddingType;
201+
if (paddingType == "constant")
202+
ieLayer.getParameters()["pad_value"] = paddingValue;
203+
204+
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
205+
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
206+
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
207+
#endif
208+
return Ptr<BackendNode>();
209+
}
210+
176211
private:
177212
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.
178213
std::vector<Range> dstRanges;

modules/dnn/src/layers/pooling_layer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer
140140
#ifdef HAVE_OPENCL
141141
poolOp.release();
142142
#endif
143-
computeMaxIdx = type == MAX;
143+
computeMaxIdx = type == MAX && outputs.size() == 2;
144144
}
145145

146146
virtual bool supportBackend(int backendId) CV_OVERRIDE

modules/dnn/test/test_backends.cpp

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -289,12 +289,7 @@ TEST_P(DNNTestNetwork, OpenFace)
289289
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
290290
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
291291
throw SkipTestException("Test is disabled for Myriad targets");
292-
#elif INF_ENGINE_VER_MAJOR_GE(2019010000)
293-
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
294-
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
295-
)
296-
throw SkipTestException("Test is disabled for MyriadX target");
297-
#else
292+
#elif INF_ENGINE_VER_MAJOR_EQ(2018030000)
298293
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
299294
throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
300295
#endif

modules/dnn/test/test_halide_layers.cpp

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -561,12 +561,6 @@ TEST_P(ReLU, Accuracy)
561561
float negativeSlope = get<0>(GetParam());
562562
Backend backendId = get<0>(get<1>(GetParam()));
563563
Target targetId = get<1>(get<1>(GetParam()));
564-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
565-
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
566-
&& negativeSlope < 0
567-
)
568-
throw SkipTestException("Test is disabled");
569-
#endif
570564

571565
LayerParams lp;
572566
lp.set("negative_slope", negativeSlope);
@@ -589,13 +583,6 @@ TEST_P(NoParamActivation, Accuracy)
589583
LayerParams lp;
590584
lp.type = get<0>(GetParam());
591585
lp.name = "testLayer";
592-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
593-
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
594-
&& lp.type == "AbsVal"
595-
)
596-
throw SkipTestException("Test is disabled");
597-
#endif
598-
599586
testInPlaceActivation(lp, backendId, targetId);
600587
}
601588
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(

modules/dnn/test/test_onnx_importer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
379379
lInf = 0.035;
380380
}
381381
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
382-
l1 = 4.5e-5;
382+
l1 = 4.6e-5;
383383
lInf = 1.9e-4;
384384
}
385385
testONNXModels("LResNet100E_IR", pb, l1, lInf);

modules/dnn/test/test_tf_importer.cpp

Lines changed: 12 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -140,10 +140,6 @@ TEST_P(Test_TensorFlow_layers, padding)
140140

141141
TEST_P(Test_TensorFlow_layers, padding_same)
142142
{
143-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
144-
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
145-
throw SkipTestException("Test is disabled for DLIE");
146-
#endif
147143
#if defined(INF_ENGINE_RELEASE)
148144
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
149145
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
@@ -251,10 +247,6 @@ TEST_P(Test_TensorFlow_layers, reshape)
251247

252248
TEST_P(Test_TensorFlow_layers, flatten)
253249
{
254-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
255-
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
256-
throw SkipTestException("Test is disabled for DLIE");
257-
#endif
258250
#if defined(INF_ENGINE_RELEASE)
259251
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
260252
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
@@ -267,11 +259,6 @@ TEST_P(Test_TensorFlow_layers, flatten)
267259

268260
TEST_P(Test_TensorFlow_layers, unfused_flatten)
269261
{
270-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
271-
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
272-
throw SkipTestException("Test is disabled for DLIE");
273-
#endif
274-
275262
runTensorFlowNet("unfused_flatten");
276263
runTensorFlowNet("unfused_flatten_unknown_batch");
277264
}
@@ -320,11 +307,14 @@ class Test_TensorFlow_nets : public DNNTestLayer {};
320307

321308
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
322309
{
323-
checkBackend();
324-
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
325-
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
326-
throw SkipTestException("");
310+
#if defined(INF_ENGINE_RELEASE)
311+
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
312+
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
313+
)
314+
throw SkipTestException("Test is disabled for MyriadX");
315+
#endif
327316

317+
checkBackend();
328318
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
329319
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
330320
std::string imgPath = findDataFile("dnn/street.png", false);
@@ -333,30 +323,18 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
333323
resize(imread(imgPath), inp, Size(300, 300));
334324
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
335325

336-
std::vector<String> outNames(3);
337-
outNames[0] = "concat";
338-
outNames[1] = "concat_1";
339-
outNames[2] = "detection_out";
340-
341-
std::vector<Mat> refs(outNames.size());
342-
for (int i = 0; i < outNames.size(); ++i)
343-
{
344-
std::string path = findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco." + outNames[i] + ".npy", false);
345-
refs[i] = blobFromNPY(path);
346-
}
326+
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy", false));
347327

348328
Net net = readNetFromTensorflow(netPath, netConfig);
349329
net.setPreferableBackend(backend);
350330
net.setPreferableTarget(target);
351331

352332
net.setInput(inp);
333+
Mat out = net.forward();
353334

354-
std::vector<Mat> output;
355-
net.forward(output, outNames);
356-
357-
normAssert(refs[0].reshape(1, 1), output[0].reshape(1, 1), "", 1e-5, 1.5e-4);
358-
normAssert(refs[1].reshape(1, 1), output[1].reshape(1, 1), "", 1e-5, 3e-4);
359-
normAssertDetections(refs[2], output[2], "", 0.2);
335+
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
336+
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
337+
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
360338
}
361339

362340
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
@@ -597,10 +575,6 @@ TEST_P(Test_TensorFlow_layers, fp16_weights)
597575

598576
TEST_P(Test_TensorFlow_layers, fp16_padding_same)
599577
{
600-
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
601-
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
602-
throw SkipTestException("Test is disabled for DLIE");
603-
#endif
604578
#if defined(INF_ENGINE_RELEASE)
605579
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
606580
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X

0 commit comments

Comments
 (0)