Skip to content

Commit 250bcd9

Browse files
committed
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
2 parents a1d8730 + 4446ef5 commit 250bcd9

File tree

10 files changed

+128
-52
lines changed

10 files changed

+128
-52
lines changed

modules/aruco/include/opencv2/aruco.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,8 @@ enum CornerRefineMethod{
144144
* done at full resolution.
145145
* - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
146146
* Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8).
147+
* - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
148+
* invert a normal marker by using a tilde, ~markerImage. (default false)
147149
*/
148150
struct CV_EXPORTS_W DetectorParameters {
149151

@@ -183,6 +185,9 @@ struct CV_EXPORTS_W DetectorParameters {
183185
CV_PROP_RW float aprilTagMaxLineFitMse;
184186
CV_PROP_RW int aprilTagMinWhiteBlackDiff;
185187
CV_PROP_RW int aprilTagDeglitch;
188+
189+
// to detect white (inverted) markers
190+
CV_PROP_RW bool detectInvertedMarker;
186191
};
187192

188193

modules/aruco/src/aruco.cpp

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ DetectorParameters::DetectorParameters()
8686
aprilTagCriticalRad( (float)(10* CV_PI /180) ),
8787
aprilTagMaxLineFitMse(10.0),
8888
aprilTagMinWhiteBlackDiff(5),
89-
aprilTagDeglitch(0){}
89+
aprilTagDeglitch(0),
90+
detectInvertedMarker(false){}
9091

9192

9293
/**
@@ -512,7 +513,19 @@ static bool _identifyOneCandidate(const Ptr<Dictionary>& dictionary, InputArray
512513
int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate);
513514
int borderErrors =
514515
_getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits);
515-
if(borderErrors > maximumErrorsInBorder) return false; // border is wrong
516+
517+
// check if it is a white marker
518+
if(params->detectInvertedMarker){
519+
// to get from 255 to 1
520+
Mat invertedImg = ~candidateBits-254;
521+
int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits);
522+
// white marker
523+
if(invBError<borderErrors){
524+
borderErrors = invBError;
525+
invertedImg.copyTo(candidateBits);
526+
}
527+
}
528+
if(borderErrors > maximumErrorsInBorder) return false;
516529

517530
// take only inner bits
518531
Mat onlyBits =

modules/aruco/test/test_arucodetection.cpp

Lines changed: 79 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -182,12 +182,29 @@ static void getSyntheticRT(double yaw, double pitch, double distance, Mat &rvec,
182182
*/
183183
static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraMatrix, double yaw,
184184
double pitch, double distance, Size imageSize, int markerBorder,
185-
vector< Point2f > &corners) {
185+
vector< Point2f > &corners, int encloseMarker=0) {
186186

187187
// canonical image
188-
Mat markerImg;
188+
Mat marker, markerImg;
189189
const int markerSizePixels = 100;
190-
aruco::drawMarker(dictionary, id, markerSizePixels, markerImg, markerBorder);
190+
191+
aruco::drawMarker(dictionary, id, markerSizePixels, marker, markerBorder);
192+
marker.copyTo(markerImg);
193+
194+
if(encloseMarker){ //to enclose the marker
195+
int enclose = int(marker.rows/4);
196+
markerImg = Mat::zeros(marker.rows+(2*enclose), marker.cols+(enclose*2), CV_8UC1);
197+
198+
Mat field= markerImg.rowRange(int(enclose), int(markerImg.rows-enclose))
199+
.colRange(int(0), int(markerImg.cols));
200+
field.setTo(255);
201+
field= markerImg.rowRange(int(0), int(markerImg.rows))
202+
.colRange(int(enclose), int(markerImg.cols-enclose));
203+
field.setTo(255);
204+
205+
field = markerImg(Rect(enclose,enclose,marker.rows,marker.cols));
206+
marker.copyTo(field);
207+
}
191208

192209
// get rvec and tvec for the perspective
193210
Mat rvec, tvec;
@@ -205,10 +222,10 @@ static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraM
205222
projectPoints(markerObjPoints, rvec, tvec, cameraMatrix, distCoeffs, corners);
206223

207224
vector< Point2f > originalCorners;
208-
originalCorners.push_back(Point2f(0, 0));
209-
originalCorners.push_back(Point2f((float)markerSizePixels, 0));
210-
originalCorners.push_back(Point2f((float)markerSizePixels, (float)markerSizePixels));
211-
originalCorners.push_back(Point2f(0, (float)markerSizePixels));
225+
originalCorners.push_back(Point2f(0+float(encloseMarker*markerSizePixels/4), 0+float(encloseMarker*markerSizePixels/4)));
226+
originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, 0));
227+
originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, (float)markerSizePixels));
228+
originalCorners.push_back(originalCorners[0]+Point2f(0, (float)markerSizePixels));
212229

213230
Mat transformation = getPerspectiveTransform(originalCorners, corners);
214231

@@ -238,6 +255,11 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest {
238255
public:
239256
CV_ArucoDetectionPerspective();
240257

258+
enum checkWithParameter{
259+
USE_APRILTAG=1, /// Detect marker candidates :: using AprilTag
260+
DETECT_INVERTED_MARKER, /// Check if there is a white marker
261+
};
262+
241263
protected:
242264
void run(int);
243265
};
@@ -246,9 +268,10 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest {
246268
CV_ArucoDetectionPerspective::CV_ArucoDetectionPerspective() {}
247269

248270

249-
void CV_ArucoDetectionPerspective::run(int aprilDecimate) {
271+
void CV_ArucoDetectionPerspective::run(int tryWith) {
250272

251273
int iter = 0;
274+
int szEnclosed = 0;
252275
Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1);
253276
Size imgSize(500, 500);
254277
cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650;
@@ -257,29 +280,35 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) {
257280
Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
258281

259282
// detect from different positions
260-
for(double distance = 0.1; distance <= 0.5; distance += 0.2) {
261-
for(int pitch = 0; pitch < 360; pitch += 60) {
262-
for(int yaw = 30; yaw <= 90; yaw += 50) {
283+
for(double distance = 0.1; distance < 0.7; distance += 0.2) {
284+
for(int pitch = 0; pitch < 360; pitch += (distance == 0.1? 60:180)) {
285+
for(int yaw = 70; yaw <= 120; yaw += 40){
263286
int currentId = iter % 250;
264287
int markerBorder = iter % 2 + 1;
265288
iter++;
266289
vector< Point2f > groundTruthCorners;
267-
// create synthetic image
268-
Mat img =
269-
projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch),
270-
distance, imgSize, markerBorder, groundTruthCorners);
271290

272-
// detect markers
273-
vector< vector< Point2f > > corners;
274-
vector< int > ids;
275291
Ptr<aruco::DetectorParameters> params = aruco::DetectorParameters::create();
276292
params->minDistanceToBorder = 1;
277293
params->markerBorderBits = markerBorder;
278294

279-
if(aprilDecimate == 1){
280-
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG;
295+
/// create synthetic image
296+
Mat img=
297+
projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch),
298+
distance, imgSize, markerBorder, groundTruthCorners, szEnclosed);
299+
// marker :: Inverted
300+
if(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith){
301+
img = ~img;
302+
params->detectInvertedMarker = true;
303+
}
304+
305+
if(CV_ArucoDetectionPerspective::USE_APRILTAG == tryWith){
306+
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG;
281307
}
282308

309+
// detect markers
310+
vector< vector< Point2f > > corners;
311+
vector< int > ids;
283312
aruco::detectMarkers(img, dictionary, corners, ids, params);
284313

285314
// check results
@@ -293,14 +322,33 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) {
293322
}
294323
for(int c = 0; c < 4; c++) {
295324
double dist = cv::norm(groundTruthCorners[c] - corners[0][c]); // TODO cvtest
296-
if(dist > 5) {
297-
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
298-
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
299-
return;
325+
if(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith){
326+
if(szEnclosed && dist > 3){
327+
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
328+
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
329+
return;
330+
}
331+
if(!szEnclosed && dist >15){
332+
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
333+
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
334+
return;
335+
}
336+
}
337+
else{
338+
if(dist > 5) {
339+
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
340+
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
341+
return;
342+
}
300343
}
301344
}
302345
}
303346
}
347+
// change the state :: to detect an enclosed inverted marker
348+
if( CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith && distance == 0.1 ){
349+
distance -= 0.1;
350+
szEnclosed++;
351+
}
304352
}
305353
}
306354

@@ -488,10 +536,16 @@ void CV_ArucoBitCorrection::run(int) {
488536
}
489537

490538
typedef CV_ArucoDetectionPerspective CV_AprilTagDetectionPerspective;
539+
typedef CV_ArucoDetectionPerspective CV_InvertedArucoDetectionPerspective;
540+
541+
TEST(CV_InvertedArucoDetectionPerspective, algorithmic) {
542+
CV_InvertedArucoDetectionPerspective test;
543+
test.safe_run(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER);
544+
}
491545

492546
TEST(CV_AprilTagDetectionPerspective, algorithmic) {
493547
CV_AprilTagDetectionPerspective test;
494-
test.safe_run(1);
548+
test.safe_run(CV_ArucoDetectionPerspective::USE_APRILTAG);
495549
}
496550

497551
TEST(CV_ArucoDetectionSimple, algorithmic) {

modules/face/include/opencv2/face/facemark.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ class CV_EXPORTS_W Facemark : public virtual Algorithm
7474
@endcode
7575
*/
7676
CV_WRAP virtual bool fit( InputArray image,
77-
InputArray faces,
78-
OutputArrayOfArrays landmarks ) = 0;
77+
const std::vector<Rect>& faces,
78+
CV_OUT std::vector<std::vector<Point2f> >& landmarks ) = 0;
7979
}; /* Facemark*/
8080

8181

modules/face/include/opencv2/face/facemarkAAM.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ class CV_EXPORTS_W FacemarkAAM : public FacemarkTrain
146146
};
147147

148148
//! overload with additional Config structures
149-
virtual bool fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &runtime_params ) = 0;
149+
virtual bool fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &runtime_params ) = 0;
150150

151151

152152
//! initializer

modules/face/src/face_alignmentimpl.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ class FacemarkKazemiImpl : public FacemarkKazemi{
7373
void loadModel(String fs) CV_OVERRIDE;
7474
bool setFaceDetector(FN_FaceDetector f, void* userdata) CV_OVERRIDE;
7575
bool getFaces(InputArray image, OutputArray faces) CV_OVERRIDE;
76-
bool fit(InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE;
76+
bool fit(InputArray image, const std::vector<Rect>& faces, CV_OUT std::vector<std::vector<Point2f> >& landmarks ) CV_OVERRIDE;
7777
void training(String imageList, String groundTruth);
7878
bool training(vector<Mat>& images, vector< vector<Point2f> >& landmarks,string filename,Size scale,string modelFilename) CV_OVERRIDE;
7979
// Destructor for the class.

modules/face/src/facemarkAAM.cpp

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -103,12 +103,11 @@ class FacemarkAAMImpl : public FacemarkAAM {
103103

104104
bool getData(void * items) CV_OVERRIDE;
105105

106-
bool fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &runtime_params ) CV_OVERRIDE;
106+
bool fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &runtime_params ) CV_OVERRIDE;
107107

108108
protected:
109109

110-
bool fit( InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE;
111-
//bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params);//!< from many ROIs
110+
bool fit( InputArray image, const std::vector<Rect>& faces, CV_OUT std::vector<std::vector<Point2f> >& landmarks ) CV_OVERRIDE;
112111
bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 );
113112

114113
bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE;
@@ -323,19 +322,18 @@ void FacemarkAAMImpl::training(void* parameters){
323322
if(params.verbose) printf("Training is completed\n");
324323
}
325324

326-
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks )
325+
bool FacemarkAAMImpl::fit( InputArray image, const std::vector<Rect>& roi, CV_OUT std::vector<std::vector<Point2f> >& _landmarks )
327326
{
328327
std::vector<Config> config; // empty
329328
return fitConfig(image, roi, _landmarks, config);
330329
}
331330

332-
bool FacemarkAAMImpl::fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &configs )
331+
bool FacemarkAAMImpl::fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &configs )
333332
{
334-
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
333+
const std::vector<Rect> & faces = roi;
335334
if(faces.size()<1) return false;
336335

337-
std::vector<std::vector<Point2f> > & landmarks =
338-
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
336+
std::vector<std::vector<Point2f> > & landmarks = _landmarks;
339337
landmarks.resize(faces.size());
340338

341339
Mat img = image.getMat();

modules/face/src/facemarkLBF.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ class FacemarkLBFImpl : public FacemarkLBF {
115115

116116
protected:
117117

118-
bool fit( InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE;//!< from many ROIs
118+
bool fit( InputArray image, const std::vector<Rect> & faces, std::vector<std::vector<Point2f> > & landmarks ) CV_OVERRIDE;//!< from many ROIs
119119
bool fitImpl( const Mat image, std::vector<Point2f> & landmarks );//!< from a face
120120

121121
bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE;
@@ -370,14 +370,12 @@ void FacemarkLBFImpl::training(void* parameters){
370370
isModelTrained = true;
371371
}
372372

373-
bool FacemarkLBFImpl::fit( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks )
373+
bool FacemarkLBFImpl::fit( InputArray image, const std::vector<Rect> & roi, CV_OUT std::vector<std::vector<Point2f> > & _landmarks )
374374
{
375-
// FIXIT
376-
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
375+
const std::vector<Rect> & faces = roi;
377376
if (faces.empty()) return false;
378377

379-
std::vector<std::vector<Point2f> > & landmarks =
380-
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
378+
std::vector<std::vector<Point2f> > & landmarks = _landmarks;
381379

382380
landmarks.resize(faces.size());
383381

modules/face/src/getlandmarks.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,15 +168,15 @@ void FacemarkKazemiImpl :: loadModel(String filename){
168168
f.close();
169169
isModelLoaded = true;
170170
}
171-
bool FacemarkKazemiImpl::fit(InputArray img, InputArray roi, OutputArrayOfArrays landmarks){
171+
bool FacemarkKazemiImpl::fit(InputArray img, const std::vector<Rect>& roi, CV_OUT std::vector<std::vector<Point2f> >& landmarks){
172172
if(!isModelLoaded){
173173
String error_message = "No model loaded. Aborting....";
174174
CV_Error(Error::StsBadArg, error_message);
175175
return false;
176176
}
177177
Mat image = img.getMat();
178-
std::vector<Rect> & faces = *(std::vector<Rect>*)roi.getObj();
179-
std::vector<std::vector<Point2f> > & shapes = *(std::vector<std::vector<Point2f> >*) landmarks.getObj();
178+
const std::vector<Rect> & faces = roi;
179+
std::vector<std::vector<Point2f> > & shapes = landmarks;
180180
shapes.resize(faces.size());
181181

182182
if(image.empty()){

modules/ximgproc/src/selectivesearchsegmentation.cpp

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -558,9 +558,14 @@ namespace cv {
558558

559559
center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0));
560560
rot = cv::getRotationMatrix2D(center, -45.0, 1.0);
561-
warpAffine(tmp_gradiant, tmp_rot, rot, bbox.size());
561+
// Using this bigger box avoids clipping the ends of narrow images
562+
Rect bbox2 = cv::RotatedRect(center, img_plane_rotated.size(), -45.0).boundingRect();\
563+
warpAffine(tmp_gradiant, tmp_rot, rot, bbox2.size());
562564

563-
tmp_gradiant = tmp_rot(Rect((bbox.width - img.cols) / 2, (bbox.height - img.rows) / 2, img.cols, img.rows));
565+
// for narrow images, bbox might be less tall or wide than img
566+
int start_x = std::max(0, (bbox.width - img.cols) / 2);
567+
int start_y = std::max(0, (bbox.height - img.rows) / 2);
568+
tmp_gradiant = tmp_rot(Rect(start_x, start_y, img.cols, img.rows));
564569

565570
threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO);
566571
threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV);
@@ -573,9 +578,12 @@ namespace cv {
573578

574579
center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0));
575580
rot = cv::getRotationMatrix2D(center, -45.0, 1.0);
576-
warpAffine(tmp_gradiant, tmp_rot, rot, bbox.size());
581+
bbox2 = cv::RotatedRect(center, img_plane_rotated.size(), -45.0).boundingRect();\
582+
warpAffine(tmp_gradiant, tmp_rot, rot, bbox2.size());
577583

578-
tmp_gradiant = tmp_rot(Rect((bbox.width - img.cols) / 2, (bbox.height - img.rows) / 2, img.cols, img.rows));
584+
start_x = std::max(0, (bbox.width - img.cols) / 2);
585+
start_y = std::max(0, (bbox.height - img.rows) / 2);
586+
tmp_gradiant = tmp_rot(Rect(start_x, start_y, img.cols, img.rows));
579587

580588
threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO);
581589
threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV);

0 commit comments

Comments
 (0)