Skip to content

Commit 514b376

Browse files
Merge pull request #181 from justadudewhohacks/typescript-definitions
Typescript definitions
2 parents 502b4a2 + 3c12905 commit 514b376

File tree

111 files changed

+3899
-10
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

111 files changed

+3899
-10
lines changed

.gitattributes

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,3 @@
1-
*.sh* text eol=lf
1+
*.sh* text eol=lf
2+
*.d.ts linguist-vendored=false
3+
/examples/* linguist-documentation=false

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,5 @@ coverage
99
coverage-report
1010
tmpdata
1111
data/dnn
12-
.idea/
12+
.idea/
13+
dist

.npmignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,5 @@ coverage
1414
coverage-report
1515
tmpdata
1616
ci
17-
.dockerignore
17+
.dockerignore
18+
dist

cc/core/Point.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,8 @@ NAN_MODULE_INIT(Point::Init) {
2929
ctor->InstanceTemplate()->SetInternalFieldCount(1);
3030
ctor->SetClassName(Nan::New("Point").ToLocalChecked());
3131
target->Set(Nan::New("Point").ToLocalChecked(), ctor->GetFunction());
32+
target->Set(Nan::New("Point2").ToLocalChecked(), ctor->GetFunction());
33+
target->Set(Nan::New("Point3").ToLocalChecked(), ctor->GetFunction());
3234
};
3335

3436
NAN_METHOD(Point::New) {

cc/core/Vec.cc

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,9 @@ NAN_MODULE_INIT(Vec::Init) {
4343
ctor->InstanceTemplate()->SetInternalFieldCount(1);
4444
ctor->SetClassName(Nan::New("Vec").ToLocalChecked());
4545
target->Set(Nan::New("Vec").ToLocalChecked(), ctor->GetFunction());
46+
target->Set(Nan::New("Vec2").ToLocalChecked(), ctor->GetFunction());
47+
target->Set(Nan::New("Vec3").ToLocalChecked(), ctor->GetFunction());
48+
target->Set(Nan::New("Vec4").ToLocalChecked(), ctor->GetFunction());
4649
};
4750

4851
NAN_METHOD(Vec::New) {
File renamed without changes.
File renamed without changes.

examples/dnn/loadFacenet.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ const {
55
} = require('../utils');
66

77
module.exports = function () {
8-
const modelPath = '../../data/dnn/facenet';
8+
const modelPath = path.resolve(__dirname, '../../data/dnn/facenet');
99

1010
const prototxt = path.resolve(modelPath, 'facenet.prototxt');
1111
const modelFile = path.resolve(modelPath, 'res10_300x300_ssd_iter_140000.caffemodel');

examples/dnnSSDCoco.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ function classifyImg(img) {
4646

4747
const makeDrawClassDetections = predictions => (drawImg, className, getColor, thickness = 2) => {
4848
predictions
49-
.filter(p => p.className === className)
49+
.filter(p => classNames[p.classLabel] === className)
5050
.forEach(p => drawRect(drawImg, p.rect, getColor(), { thickness }));
5151
return drawImg;
5252
};

examples/faceRecognition1.js

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,13 @@ const lbph = new cv.LBPHFaceRecognizer();
4141
lbph.train(trainImgs, labels);
4242

4343
const twoFacesImg = cv.imread(path.resolve(basePath, 'daryl-rick.jpg'));
44-
const faces = classifier.detectMultiScale(twoFacesImg.bgrToGray()).objects;
44+
const result = classifier.detectMultiScale(twoFacesImg.bgrToGray());
4545

46-
faces.forEach((faceRect) => {
46+
const minDetections = 10;
47+
result.objects.forEach((faceRect, i) => {
48+
if (result.numDetections[i] < minDetections) {
49+
return;
50+
}
4751
const faceImg = twoFacesImg.getRegion(faceRect).bgrToGray();
4852
const who = nameMappings[lbph.predict(faceImg).label];
4953

examples/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"name": "opencv4nodejs_examples",
33
"version": "1.1.0",
44
"author": "justadudewhohacks",
5-
"license": "ISC",
5+
"license": "MIT",
66
"dependencies": {},
77
"devDependencies": {
88
"eslint": "^3.18.0",

examples/templateMatching.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ const cv = require('../');
33

44
const findWaldo = async () => {
55
// Load images
6-
const originalMat = await cv.imreadAsync(`${__dirname}/templateMatching/original.jpg`);
7-
const waldoMat = await cv.imreadAsync(`${__dirname}/templateMatching/waldo.jpg`);
6+
const originalMat = await cv.imreadAsync(`${__dirname}/../data/findwaldo.jpg`);
7+
const waldoMat = await cv.imreadAsync(`${__dirname}/../data/waldo.jpg`);
88

99
// Match template (the brightest locations indicate the highest match)
1010
const matched = originalMat.matchTemplate(waldoMat, 5);

examples/typed/OCRTools.ts

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
import * as fs from 'fs';
2+
import * as cv from '../../';
3+
4+
// a - z
5+
export const lccs = Array(26).fill(97).map((v, i) => v + i).map(ascii => String.fromCharCode(ascii));
6+
7+
const invert = (img: cv.Mat) => img.threshold(254, 255, cv.THRESH_BINARY_INV);
8+
9+
const getBoundingRect = (component: number[]) => new cv.Rect(
10+
component[cv.CC_STAT_LEFT],
11+
component[cv.CC_STAT_TOP],
12+
component[cv.CC_STAT_WIDTH],
13+
component[cv.CC_STAT_HEIGHT]
14+
);
15+
16+
const getLetterBoundingRect = (img: cv.Mat, isIorJ: boolean) => {
17+
const { stats } = invert(img).bgrToGray().connectedComponentsWithStats();
18+
const componentsOrderedBySize =
19+
stats.getDataAsArray().sort((s0, s1) => s1[cv.CC_STAT_AREA] - s0[cv.CC_STAT_AREA]);
20+
21+
if (componentsOrderedBySize.length < 2) {
22+
return null;
23+
}
24+
25+
// background actually is largest component so we take the next largest
26+
let largestComponent = componentsOrderedBySize[1];
27+
let letterRect = getBoundingRect(largestComponent);
28+
29+
if (isIorJ && componentsOrderedBySize.length > 2) {
30+
let dotComponent = componentsOrderedBySize[2];
31+
32+
if (largestComponent[cv.CC_STAT_TOP] < dotComponent[cv.CC_STAT_TOP]) {
33+
largestComponent = componentsOrderedBySize[2];
34+
dotComponent = componentsOrderedBySize[1];
35+
letterRect = getBoundingRect(largestComponent);
36+
}
37+
38+
const dotRectXRight = dotComponent[cv.CC_STAT_LEFT] + dotComponent[cv.CC_STAT_WIDTH];
39+
const xLeft = Math.min(letterRect.x, dotComponent[cv.CC_STAT_LEFT]);
40+
const letterRectYBottom = letterRect.y + letterRect.height;
41+
42+
letterRect = new cv.Rect(
43+
xLeft,
44+
dotComponent[cv.CC_STAT_TOP],
45+
Math.max(letterRect.width, dotRectXRight - xLeft),
46+
(letterRectYBottom - dotComponent[cv.CC_STAT_TOP])
47+
);
48+
}
49+
50+
return letterRect;
51+
};
52+
53+
export function centerLetterInImage (img: cv.Mat, isIorJ: boolean): cv.Mat {
54+
const rect = getLetterBoundingRect(img, isIorJ);
55+
if (!rect) {
56+
return null;
57+
}
58+
59+
const offX = (img.cols - rect.width) / 2;
60+
const offY = (img.rows - rect.height) / 2;
61+
const centeredRect = new cv.Rect(
62+
offX,
63+
offY,
64+
rect.width,
65+
rect.height
66+
);
67+
68+
const centered = new cv.Mat(img.rows, img.cols, img.type, [255, 255, 255]);
69+
img.getRegion(rect).copyTo(centered.getRegion(centeredRect));
70+
71+
return centered;
72+
};
73+
74+
export function saveConfusionMatrix (
75+
testDataFiles: any[],
76+
predict: (mat: cv.Mat, isIorJ: boolean) => number,
77+
numTestImagesPerClass: number,
78+
outputFile: string
79+
): void {
80+
const confusionMat = new cv.Mat(26, 26, cv.CV_64F, 0);
81+
testDataFiles.forEach((files, label) => {
82+
files.forEach((file: string) => {
83+
const img = cv.imread(file);
84+
const predictedLabel = predict(img, label === 8 || label === 9);
85+
confusionMat.set(label, predictedLabel, confusionMat.at(label, predictedLabel) + 1);
86+
});
87+
});
88+
89+
const confusionMatMatrix = [[''].concat(lccs)].concat(
90+
confusionMat.div(numTestImagesPerClass)
91+
.getDataAsArray().map((col, l) => [lccs[l]].concat(`${col.map(v => Math.round(v * 100) / 100)}`))
92+
);
93+
94+
const csvRows = confusionMatMatrix.map(cols => cols.join(';'));
95+
fs.writeFileSync(outputFile, csvRows.join('\n'));
96+
};

examples/typed/README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# opencv4nodejs TypeScript examples
2+
3+
### Install
4+
``` bash
5+
npm install
6+
```
7+
8+
### Run
9+
``` bash
10+
npm run ts-node <example>.ts
11+
```

examples/typed/asyncMatchFeatures.ts

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import * as cv from '../../';
2+
3+
const detectAndComputeAsync = (det: cv.FeatureDetector, img: cv.Mat) =>
4+
det.detectAsync(img)
5+
.then(kps => det.computeAsync(img, kps)
6+
.then(desc => ({ kps, desc }))
7+
);
8+
9+
const img1 = cv.imread('../../data/s0.jpg');
10+
const img2 = cv.imread('../../data/s1.jpg');
11+
12+
const detectorNames = [
13+
'AKAZE',
14+
'BRISK',
15+
'KAZE',
16+
'ORB'
17+
];
18+
19+
const createDetectorFromName = (name: string) => new cv[`${name}Detector`]();
20+
21+
// create 4 promises -> each detector detects and computes descriptors for img1 and img2
22+
const promises = detectorNames
23+
.map(createDetectorFromName)
24+
.map(det =>
25+
// also detect and compute descriptors for img1 and img2 async
26+
Promise.all([detectAndComputeAsync(det, img1), detectAndComputeAsync(det, img2)])
27+
.then(allResults =>
28+
cv.matchBruteForceAsync(
29+
allResults[0].desc,
30+
allResults[1].desc
31+
)
32+
.then(matches => ({
33+
matches,
34+
kps1: allResults[0].kps,
35+
kps2: allResults[1].kps
36+
}))
37+
)
38+
);
39+
40+
Promise.all(promises)
41+
.then((allResults) => {
42+
allResults.forEach((result, i) => {
43+
const drawMatchesImg = cv.drawMatches(
44+
img1,
45+
img2,
46+
result.kps1,
47+
result.kps2,
48+
result.matches
49+
);
50+
cv.imshowWait(detectorNames[i], drawMatchesImg);
51+
cv.destroyAllWindows();
52+
});
53+
})
54+
.catch(err => console.error(err));

examples/typed/dnn/loadFacenet.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import * as fs from 'fs';
2+
import * as path from 'path';
3+
import * as cv from '../../../';
4+
5+
export function loadFacenet (): cv.Net {
6+
const modelPath = path.resolve(__dirname, '../../../data/dnn/facenet');
7+
8+
const prototxt = path.resolve(modelPath, 'facenet.prototxt');
9+
const modelFile = path.resolve(modelPath, 'res10_300x300_ssd_iter_140000.caffemodel');
10+
11+
if (!fs.existsSync(prototxt) || !fs.existsSync(modelFile)) {
12+
console.log('could not find facenet model');
13+
console.log('download the prototxt from: https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt');
14+
console.log('download the model from: https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel');
15+
throw new Error('exiting');
16+
}
17+
return cv.readNetFromCaffe(prototxt, modelFile);
18+
};

examples/typed/dnn/ssdUtils.ts

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import * as cv from '../../../';
2+
3+
export type Prediction = {
4+
classLabel: number
5+
confidence: number
6+
rect: cv.Rect
7+
}
8+
9+
export function extractResults (
10+
outputBlob: cv.Mat,
11+
imgDimensions: { rows: number, cols: number }
12+
): Prediction[] {
13+
return Array(outputBlob.rows).fill(0)
14+
.map((res, i) => {
15+
const classLabel = outputBlob.at(i, 1);
16+
const confidence = outputBlob.at(i, 2);
17+
const bottomLeft = new cv.Point2(
18+
outputBlob.at(i, 3) * imgDimensions.cols,
19+
outputBlob.at(i, 6) * imgDimensions.rows
20+
);
21+
const topRight = new cv.Point2(
22+
outputBlob.at(i, 5) * imgDimensions.cols,
23+
outputBlob.at(i, 4) * imgDimensions.rows
24+
);
25+
const rect = new cv.Rect(
26+
bottomLeft.x,
27+
topRight.y,
28+
topRight.x - bottomLeft.x,
29+
bottomLeft.y - topRight.y
30+
);
31+
32+
return ({
33+
classLabel,
34+
confidence,
35+
rect
36+
});
37+
});
38+
};

0 commit comments

Comments
 (0)