Unverified Commit 8d5a777b by justadudewhohacks Committed by GitHub

Merge pull request #54 from justadudewhohacks/align-from-5-point-landmarks

Align from 5 point landmarks + allFacesMtcnn
parents 049997bc 5f68495a
import { Point } from './Point';
import { FaceDetection } from './FaceDetection';
import { IPoint, Point } from './Point';
import { Rect } from './Rect';
import { Dimensions } from './types';
export declare class FaceLandmarks {
protected _imageWidth: number;
......@@ -11,4 +13,20 @@ export declare class FaceLandmarks {
getImageHeight(): number;
getPositions(): Point[];
getRelativePositions(): Point[];
forSize<T extends FaceLandmarks>(width: number, height: number): T;
shift<T extends FaceLandmarks>(x: number, y: number): T;
shiftByPoint<T extends FaceLandmarks>(pt: IPoint): T;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: FaceDetection | Rect): Rect;
protected getRefPointsForAlignment(): Point[];
}
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var getCenterPoint_1 = require("./commons/getCenterPoint");
var FaceDetection_1 = require("./FaceDetection");
var Point_1 = require("./Point");
var Rect_1 = require("./Rect");
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks = /** @class */ (function () {
function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) {
if (shift === void 0) { shift = new Point_1.Point(0, 0); }
......@@ -26,6 +33,47 @@ var FaceLandmarks = /** @class */ (function () {
var _this = this;
return this._faceLandmarks.map(function (pt) { return pt.sub(_this._shift).div(new Point_1.Point(_this._imageWidth, _this._imageHeight)); });
};
FaceLandmarks.prototype.forSize = function (width, height) {
return new this.constructor(this.getRelativePositions(), { width: width, height: height });
};
FaceLandmarks.prototype.shift = function (x, y) {
return new this.constructor(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point_1.Point(x, y));
};
FaceLandmarks.prototype.shiftByPoint = function (pt) {
return this.shift(pt.x, pt.y);
};
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection_1.FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = this.getRefPointsForAlignment();
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint_1.getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect_1.Rect(x, y, Math.min(size, this._imageWidth - x), Math.min(size, this._imageHeight - y));
};
FaceLandmarks.prototype.getRefPointsForAlignment = function () {
throw new Error('getRefPointsForAlignment not implemented by base class');
};
return FaceLandmarks;
}());
exports.FaceLandmarks = FaceLandmarks;
......
{"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../src/FaceLandmarks.ts"],"names":[],"mappings":";;AAAA,iCAAgC;AAGhC;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,aAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,aAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,aAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,WAAW,CAAA;IACzB,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,YAAY,CAAA;IAC1B,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,aAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IACH,oBAAC;AAAD,CAAC,AAzCD,IAyCC;AAzCY,sCAAa"}
\ No newline at end of file
{"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../src/FaceLandmarks.ts"],"names":[],"mappings":";;AAAA,2DAA0D;AAC1D,iDAAgD;AAChD,iCAAwC;AACxC,+BAA8B;AAG9B,2BAA2B;AAC3B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,aAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,aAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,aAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,WAAW,CAAA;IACzB,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,YAAY,CAAA;IAC1B,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,aAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IAEM,+BAAO,GAAd,UAAwC,KAAa,EAAE,MAAc;QACnE,OAAO,IAAK,IAAI,CAAC,WAAmB,CAClC,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,6BAAK,GAAZ,UAAsC,CAAS,EAAE,CAAS;QACxD,OAAO,IAAK,IAAI,CAAC,WAAmB,CAClC,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,aAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IAEM,oCAAY,GAAnB,UAA6C,EAAU;QACrD,OAAO,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAA;IAC/B,CAAC;IAED;;;;;;;;;;OAUG;IACI,6BAAK,GAAZ,UACE,SAAgC;QAEhC,IAAI,SAAS,EAAE;YACb,IAAM,GAAG,GAAG,SAAS,YAAY,6BAAa;gBAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;gBAC5B,CAAC,CAAC,SAAS,CAAA;YAEb,OAAO,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAA;SACxC;QAED,IAAM,OAAO,GAAG,IAAI,CAAC,wBAAwB,EAAE,CAAA;QAExC,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;QAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;QAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;QAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;QAElD,IAAM,QAAQ,GAAG,+BAAc,CAAC,OAAO,CAAC,CAAA;QACxC,qDAAqD;QACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAE7D,OAAO,IAAI,WAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,IAAI,CAAC,YAAY,GAAG,CAAC,CAAC,CAAC,CAAA;IACpG,CAAC;IAES,gDAAwB,GAAlC;QACE,MAAM,IAAI,KAAK,CAAC,wDAAwD,CAAC,CAAA;IAC3E,CAAC;IACH,oBAAC;AAAD,CAAC,AAtGD,IAsGC;AAtGY,sCAAa"}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68';
import { FaceLandmarks } from './FaceLandmarks';
export declare class FullFaceDescription {
private _detection;
private _landmarks;
private _descriptor;
constructor(_detection: FaceDetection, _landmarks: FaceLandmarks68, _descriptor: Float32Array);
constructor(_detection: FaceDetection, _landmarks: FaceLandmarks, _descriptor: Float32Array);
readonly detection: FaceDetection;
readonly landmarks: FaceLandmarks68;
readonly landmarks: FaceLandmarks;
readonly descriptor: Float32Array;
forSize(width: number, height: number): FullFaceDescription;
}
{"version":3,"file":"FullFaceDescription.js","sourceRoot":"","sources":["../src/FullFaceDescription.ts"],"names":[],"mappings":";;AAGA;IACE,6BACU,UAAyB,EACzB,UAA2B,EAC3B,WAAyB;QAFzB,eAAU,GAAV,UAAU,CAAe;QACzB,eAAU,GAAV,UAAU,CAAiB;QAC3B,gBAAW,GAAX,WAAW,CAAc;IAChC,CAAC;IAEJ,sBAAW,0CAAS;aAApB;YACE,OAAO,IAAI,CAAC,UAAU,CAAA;QACxB,CAAC;;;OAAA;IAED,sBAAW,0CAAS;aAApB;YACE,OAAO,IAAI,CAAC,UAAU,CAAA;QACxB,CAAC;;;OAAA;IAED,sBAAW,2CAAU;aAArB;YACE,OAAO,IAAI,CAAC,WAAW,CAAA;QACzB,CAAC;;;OAAA;IAEM,qCAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,mBAAmB,CAC5B,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,EACtC,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,EACtC,IAAI,CAAC,WAAW,CACjB,CAAA;IACH,CAAC;IACH,0BAAC;AAAD,CAAC,AA1BD,IA0BC;AA1BY,kDAAmB"}
\ No newline at end of file
{"version":3,"file":"FullFaceDescription.js","sourceRoot":"","sources":["../src/FullFaceDescription.ts"],"names":[],"mappings":";;AAGA;IACE,6BACU,UAAyB,EACzB,UAAyB,EACzB,WAAyB;QAFzB,eAAU,GAAV,UAAU,CAAe;QACzB,eAAU,GAAV,UAAU,CAAe;QACzB,gBAAW,GAAX,WAAW,CAAc;IAChC,CAAC;IAEJ,sBAAW,0CAAS;aAApB;YACE,OAAO,IAAI,CAAC,UAAU,CAAA;QACxB,CAAC;;;OAAA;IAED,sBAAW,0CAAS;aAApB;YACE,OAAO,IAAI,CAAC,UAAU,CAAA;QACxB,CAAC;;;OAAA;IAED,sBAAW,2CAAU;aAArB;YACE,OAAO,IAAI,CAAC,WAAW,CAAA;QACzB,CAAC;;;OAAA;IAEM,qCAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,mBAAmB,CAC5B,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,EACtC,IAAI,CAAC,UAAU,CAAC,OAAO,CAAC,KAAK,EAAE,MAAM,CAAC,EACtC,IAAI,CAAC,WAAW,CACjB,CAAA;IACH,CAAC;IACH,0BAAC;AAAD,CAAC,AA1BD,IA0BC;AA1BY,kDAAmB"}
\ No newline at end of file
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { FullFaceDescription } from './FullFaceDescription';
import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams } from './mtcnn/types';
import { Rect } from './Rect';
import { TNetInput } from './types';
export declare function allFacesFactory(detectionNet: FaceDetectionNet, landmarkNet: FaceLandmarkNet, recognitionNet: FaceRecognitionNet): (input: TNetInput, minConfidence: number, useBatchProcessing?: boolean) => Promise<FullFaceDescription[]>;
export declare function allFacesFactory(detectionNet: FaceDetectionNet, landmarkNet: FaceLandmarkNet, computeDescriptors: (input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) => Promise<Float32Array[]>): (input: TNetInput, minConfidence: number, useBatchProcessing?: boolean) => Promise<FullFaceDescription[]>;
export declare function allFacesMtcnnFactory(mtcnn: Mtcnn, computeDescriptors: (input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) => Promise<Float32Array[]>): (input: TNetInput, mtcnnForwardParams: MtcnnForwardParams, useBatchProcessing?: boolean) => Promise<FullFaceDescription[]>;
......@@ -3,47 +3,35 @@ Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var extractFaceTensors_1 = require("./extractFaceTensors");
var FullFaceDescription_1 = require("./FullFaceDescription");
function allFacesFactory(detectionNet, landmarkNet, recognitionNet) {
function allFacesFactory(detectionNet, landmarkNet, computeDescriptors) {
return function (input, minConfidence, useBatchProcessing) {
if (useBatchProcessing === void 0) { useBatchProcessing = false; }
return tslib_1.__awaiter(this, void 0, void 0, function () {
var detections, faceTensors, faceLandmarksByFace, _a, alignedFaceBoxes, alignedFaceTensors, descriptors, _b;
return tslib_1.__generator(this, function (_c) {
switch (_c.label) {
var detections, faceTensors, faceLandmarksByFace, _a, alignedFaceBoxes, descriptors;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0: return [4 /*yield*/, detectionNet.locateFaces(input, minConfidence)];
case 1:
detections = _c.sent();
detections = _b.sent();
return [4 /*yield*/, extractFaceTensors_1.extractFaceTensors(input, detections)];
case 2:
faceTensors = _c.sent();
faceTensors = _b.sent();
if (!useBatchProcessing) return [3 /*break*/, 4];
return [4 /*yield*/, landmarkNet.detectLandmarks(faceTensors)];
case 3:
_a = _c.sent();
_a = _b.sent();
return [3 /*break*/, 6];
case 4: return [4 /*yield*/, Promise.all(faceTensors.map(function (faceTensor) { return landmarkNet.detectLandmarks(faceTensor); }))];
case 5:
_a = _c.sent();
_c.label = 6;
_a = _b.sent();
_b.label = 6;
case 6:
faceLandmarksByFace = _a;
faceTensors.forEach(function (t) { return t.dispose(); });
alignedFaceBoxes = faceLandmarksByFace.map(function (landmarks, i) { return landmarks.align(detections[i].getBox()); });
return [4 /*yield*/, extractFaceTensors_1.extractFaceTensors(input, alignedFaceBoxes)];
return [4 /*yield*/, computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)];
case 7:
alignedFaceTensors = _c.sent();
if (!useBatchProcessing) return [3 /*break*/, 9];
return [4 /*yield*/, recognitionNet.computeFaceDescriptor(alignedFaceTensors)];
case 8:
_b = _c.sent();
return [3 /*break*/, 11];
case 9: return [4 /*yield*/, Promise.all(alignedFaceTensors.map(function (faceTensor) { return recognitionNet.computeFaceDescriptor(faceTensor); }))];
case 10:
_b = _c.sent();
_c.label = 11;
case 11:
descriptors = _b;
alignedFaceTensors.forEach(function (t) { return t.dispose(); });
descriptors = _b.sent();
return [2 /*return*/, detections.map(function (detection, i) {
return new FullFaceDescription_1.FullFaceDescription(detection, faceLandmarksByFace[i].shiftByPoint(detection.getBox()), descriptors[i]);
})];
......@@ -53,4 +41,31 @@ function allFacesFactory(detectionNet, landmarkNet, recognitionNet) {
};
}
exports.allFacesFactory = allFacesFactory;
function allFacesMtcnnFactory(mtcnn, computeDescriptors) {
return function (input, mtcnnForwardParams, useBatchProcessing) {
if (useBatchProcessing === void 0) { useBatchProcessing = false; }
return tslib_1.__awaiter(this, void 0, void 0, function () {
var results, alignedFaceBoxes, descriptors;
return tslib_1.__generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, mtcnn.forward(input, mtcnnForwardParams)];
case 1:
results = _a.sent();
alignedFaceBoxes = results.map(function (_a) {
var faceLandmarks = _a.faceLandmarks;
return faceLandmarks.align();
});
return [4 /*yield*/, computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)];
case 2:
descriptors = _a.sent();
return [2 /*return*/, results.map(function (_a, i) {
var faceDetection = _a.faceDetection, faceLandmarks = _a.faceLandmarks;
return new FullFaceDescription_1.FullFaceDescription(faceDetection, faceLandmarks, descriptors[i]);
})];
}
});
});
};
}
exports.allFacesMtcnnFactory = allFacesMtcnnFactory;
//# sourceMappingURL=allFacesFactory.js.map
\ No newline at end of file
{"version":3,"file":"allFacesFactory.js","sourceRoot":"","sources":["../src/allFacesFactory.ts"],"names":[],"mappings":";;;AAAA,2DAA0D;AAK1D,6DAA4D;AAG5D,yBACE,YAA8B,EAC9B,WAA4B,EAC5B,cAAkC;IAElC,OAAO,UACL,KAAgB,EAChB,aAAqB,EACrB,kBAAmC;QAAnC,mCAAA,EAAA,0BAAmC;;;;;4BAGhB,qBAAM,YAAY,CAAC,WAAW,CAAC,KAAK,EAAE,aAAa,CAAC,EAAA;;wBAAjE,UAAU,GAAG,SAAoD;wBAEnD,qBAAM,uCAAkB,CAAC,KAAK,EAAE,UAAU,CAAC,EAAA;;wBAAzD,WAAW,GAAG,SAA2C;6BAEnC,kBAAkB,EAAlB,wBAAkB;wBAC1C,qBAAM,WAAW,CAAC,eAAe,CAAC,WAAW,CAAC,EAAA;;wBAA9C,KAAA,SAAmE,CAAA;;4BACnE,qBAAM,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CACjC,UAAA,UAAU,IAAI,OAAA,WAAW,CAAC,eAAe,CAAC,UAAU,CAAC,EAAvC,CAAuC,CACtD,CAAC,EAAA;;wBAFA,KAAA,SAEqB,CAAA;;;wBAJnB,mBAAmB,KAIA;wBAEzB,WAAW,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,OAAO,EAAE,EAAX,CAAW,CAAC,CAAA;wBAE/B,gBAAgB,GAAG,mBAAmB,CAAC,GAAG,CAC9C,UAAC,SAAS,EAAE,CAAC,IAAK,OAAA,SAAS,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,EAAvC,CAAuC,CAC1D,CAAA;wBAC0B,qBAAM,uCAAkB,CAAC,KAAK,EAAE,gBAAgB,CAAC,EAAA;;wBAAtE,kBAAkB,GAAG,SAAiD;6BAExD,kBAAkB,EAAlB,wBAAkB;wBAClC,qBAAM,cAAc,CAAC,qBAAqB,CAAC,kBAAkB,CAAC,EAAA;;wBAA9D,KAAA,SAAgF,CAAA;;4BAChF,qBAAM,OAAO,CAAC,GAAG,CAAC,kBAAkB,CAAC,GAAG,CACxC,UAAA,UAAU,IAAI,OAAA,cAAc,CAAC,qBAAqB,CAAC,UAAU,CAAC,EAAhD,CAAgD,CAC/D,CAAC,EAAA;;wBAFA,KAAA,SAEkB,CAAA;;;wBAJhB,WAAW,KAIK;wBAEtB,kBAAkB,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,OAAO,EAAE,EAAX,CAAW,CAAC,CAAA;wBAE5C,sBAAO,UAAU,CAAC,GAAG,CAAC,UAAC,SAAS,EAAE,CAAC;gCACjC,OAAA,IAAI,yCAAmB,CACrB,SAAS,EACT,mBAAmB,CAAC,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,EACvD,WAAW,CAAC,CAAC,CAAC,CACf;4BAJD,CAIC,CACF,EAAA;;;;KAEF,CAAA;AACH,CAAC;AA7CD,0CA6CC"}
\ No newline at end of file
{"version":3,"file":"allFacesFactory.js","sourceRoot":"","sources":["../src/allFacesFactory.ts"],"names":[],"mappings":";;;AAAA,2DAA0D;AAI1D,6DAA4D;AAM5D,yBACE,YAA8B,EAC9B,WAA4B,EAC5B,kBAAwH;IAExH,OAAO,UACL,KAAgB,EAChB,aAAqB,EACrB,kBAAmC;QAAnC,mCAAA,EAAA,0BAAmC;;;;;4BAGhB,qBAAM,YAAY,CAAC,WAAW,CAAC,KAAK,EAAE,aAAa,CAAC,EAAA;;wBAAjE,UAAU,GAAG,SAAoD;wBAEnD,qBAAM,uCAAkB,CAAC,KAAK,EAAE,UAAU,CAAC,EAAA;;wBAAzD,WAAW,GAAG,SAA2C;6BAEnC,kBAAkB,EAAlB,wBAAkB;wBAC1C,qBAAM,WAAW,CAAC,eAAe,CAAC,WAAW,CAAC,EAAA;;wBAA9C,KAAA,SAAmE,CAAA;;4BACnE,qBAAM,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,GAAG,CACjC,UAAA,UAAU,IAAI,OAAA,WAAW,CAAC,eAAe,CAAC,UAAU,CAAC,EAAvC,CAAuC,CACtD,CAAC,EAAA;;wBAFA,KAAA,SAEqB,CAAA;;;wBAJnB,mBAAmB,KAIA;wBAEzB,WAAW,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,OAAO,EAAE,EAAX,CAAW,CAAC,CAAA;wBAE/B,gBAAgB,GAAG,mBAAmB,CAAC,GAAG,CAC9C,UAAC,SAAS,EAAE,CAAC,IAAK,OAAA,SAAS,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,MAAM,EAAE,CAAC,EAAvC,CAAuC,CAC1D,CAAA;wBAEmB,qBAAM,kBAAkB,CAAC,KAAK,EAAE,gBAAgB,EAAE,kBAAkB,CAAC,EAAA;;wBAAnF,WAAW,GAAG,SAAqE;wBAEzF,sBAAO,UAAU,CAAC,GAAG,CAAC,UAAC,SAAS,EAAE,CAAC;gCACjC,OAAA,IAAI,yCAAmB,CACrB,SAAS,EACT,mBAAmB,CAAC,CAAC,CAAC,CAAC,YAAY,CAAkB,SAAS,CAAC,MAAM,EAAE,CAAC,EACxE,WAAW,CAAC,CAAC,CAAC,CACf;4BAJD,CAIC,CACF,EAAA;;;;KAEF,CAAA;AACH,CAAC;AAtCD,0CAsCC;AAED,8BACE,KAAY,EACZ,kBAAwH;IAExH,OAAO,UACL,KAAgB,EAChB,kBAAsC,EACtC,kBAAmC;QAAnC,mCAAA,EAAA,0BAAmC;;;;;4BAGnB,qBAAM,KAAK,CAAC,OAAO,CAAC,KAAK,EAAE,kBAAkB,CAAC,EAAA;;wBAAxD,OAAO,GAAG,SAA8C;wBAExD,gBAAgB,GAAG,OAAO,CAAC,GAAG,CAClC,UAAC,EAAiB;gCAAf,gCAAa;4BAAO,OAAA,aAAa,CAAC,KAAK,EAAE;wBAArB,CAAqB,CAC7C,CAAA;wBAEmB,qBAAM,kBAAkB,CAAC,KAAK,EAAE,gBAAgB,EAAE,kBAAkB,CAAC,EAAA;;wBAAnF,WAAW,GAAG,SAAqE;wBAEzF,sBAAO,OAAO,CAAC,GAAG,CAAC,UAAC,EAAgC,EAAE,CAAC;oCAAjC,gCAAa,EAAE,gCAAa;gCAChD,OAAA,IAAI,yCAAmB,CACrB,aAAa,EACb,aAAa,EACb,WAAW,CAAC,CAAC,CAAC,CACf;4BAJD,CAIC,CACF,EAAA;;;;KAEF,CAAA;AACH,CAAC;AA3BD,oDA2BC"}
\ No newline at end of file
import { FaceDetection } from '../FaceDetection';
import { FaceLandmarks } from '../FaceLandmarks';
import { IPoint, Point } from '../Point';
import { Rect } from '../Rect';
import { Point } from '../Point';
export declare class FaceLandmarks68 extends FaceLandmarks {
getJawOutline(): Point[];
getLeftEyeBrow(): Point[];
......@@ -10,19 +8,5 @@ export declare class FaceLandmarks68 extends FaceLandmarks {
getLeftEye(): Point[];
getRightEye(): Point[];
getMouth(): Point[];
forSize(width: number, height: number): FaceLandmarks68;
shift(x: number, y: number): FaceLandmarks68;
shiftByPoint(pt: IPoint): FaceLandmarks68;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: FaceDetection | Rect): Rect;
protected getRefPointsForAlignment(): Point[];
}
......@@ -2,14 +2,7 @@
Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var getCenterPoint_1 = require("../commons/getCenterPoint");
var FaceDetection_1 = require("../FaceDetection");
var FaceLandmarks_1 = require("../FaceLandmarks");
var Point_1 = require("../Point");
var Rect_1 = require("../Rect");
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks68 = /** @class */ (function (_super) {
tslib_1.__extends(FaceLandmarks68, _super);
function FaceLandmarks68() {
......@@ -36,47 +29,12 @@ var FaceLandmarks68 = /** @class */ (function (_super) {
FaceLandmarks68.prototype.getMouth = function () {
return this._faceLandmarks.slice(48, 68);
};
FaceLandmarks68.prototype.forSize = function (width, height) {
return new FaceLandmarks68(this.getRelativePositions(), { width: width, height: height });
};
FaceLandmarks68.prototype.shift = function (x, y) {
return new FaceLandmarks68(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point_1.Point(x, y));
};
FaceLandmarks68.prototype.shiftByPoint = function (pt) {
return this.shift(pt.x, pt.y);
};
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks68.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection_1.FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = [
FaceLandmarks68.prototype.getRefPointsForAlignment = function () {
return [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint_1.getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint_1.getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect_1.Rect(x, y, size, size);
};
return FaceLandmarks68;
}(FaceLandmarks_1.FaceLandmarks));
......
{"version":3,"file":"FaceLandmarks68.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks68.ts"],"names":[],"mappings":";;;AAAA,4DAA2D;AAC3D,kDAAiD;AACjD,kDAAiD;AACjD,kCAAyC;AACzC,gCAA+B;AAE/B,2BAA2B;AAC3B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;IAAqC,2CAAa;IAAlD;;IAyFA,CAAC;IAxFQ,uCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,wCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,yCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,iCAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,oCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,qCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,iCAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,eAAe,CACxB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,+BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,eAAe,CACxB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,aAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IAEM,sCAAY,GAAnB,UAAoB,EAAU;QAC5B,OAAO,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAA;IAC/B,CAAC;IAED;;;;;;;;;;OAUG;IACI,+BAAK,GAAZ,UACE,SAAgC;QAEhC,IAAI,SAAS,EAAE;YACb,IAAM,GAAG,GAAG,SAAS,YAAY,6BAAa;gBAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;gBAC5B,CAAC,CAAC,SAAS,CAAA;YAEb,OAAO,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAA;SACxC;QAED,IAAM,OAAO,GAAG;YACd,IAAI,CAAC,UAAU,EAAE;YACjB,IAAI,CAAC,WAAW,EAAE;YAClB,IAAI,CAAC,QAAQ,EAAE;SAChB,CAAC,GAAG,CAAC,+BAAc,CAAC,CAAA;QAEd,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;QAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;QAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;QAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;QAElD,IAAM,QAAQ,GAAG,+BAAc,CAAC,OAAO,CAAC,CAAA;QACxC,qDAAqD;QACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAE7D,OAAO,IAAI,WAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;IACnC,CAAC;IACH,sBAAC;AAAD,CAAC,AAzFD,CAAqC,6BAAa,GAyFjD;AAzFY,0CAAe"}
\ No newline at end of file
{"version":3,"file":"FaceLandmarks68.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks68.ts"],"names":[],"mappings":";;;AAAA,4DAA2D;AAE3D,kDAAiD;AAIjD;IAAqC,2CAAa;IAAlD;;IAoCA,CAAC;IAnCQ,uCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,wCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,yCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,iCAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,oCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,qCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAES,kDAAwB,GAAlC;QACE,OAAO;YACL,IAAI,CAAC,UAAU,EAAE;YACjB,IAAI,CAAC,WAAW,EAAE;YAClB,IAAI,CAAC,QAAQ,EAAE;SAChB,CAAC,GAAG,CAAC,+BAAc,CAAC,CAAA;IACvB,CAAC;IACH,sBAAC;AAAD,CAAC,AApCD,CAAqC,6BAAa,GAoCjD;AApCY,0CAAe"}
\ No newline at end of file
......@@ -15,7 +15,7 @@ export declare const recognitionNet: FaceRecognitionNet;
export declare const nets: {
ssdMobilenet: FaceDetectionNet;
faceLandmark68Net: FaceLandmarkNet;
faceNet: FaceRecognitionNet;
faceRecognitionNet: FaceRecognitionNet;
mtcnn: Mtcnn;
};
export declare function loadFaceDetectionModel(url: string): Promise<void>;
......@@ -28,3 +28,4 @@ export declare function detectLandmarks(input: TNetInput): Promise<FaceLandmarks
export declare function computeFaceDescriptor(input: TNetInput): Promise<Float32Array | Float32Array[]>;
export declare function mtcnn(input: TNetInput, forwardParams: MtcnnForwardParams): Promise<MtcnnResult[]>;
export declare const allFaces: (input: tf.Tensor | NetInput | TNetInput, minConfidence: number, useBatchProcessing?: boolean) => Promise<FullFaceDescription[]>;
export declare const allFacesMtcnn: (input: tf.Tensor | NetInput | TNetInput, mtcnnForwardParams: MtcnnForwardParams, useBatchProcessing?: boolean) => Promise<FullFaceDescription[]>;
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var allFacesFactory_1 = require("./allFacesFactory");
var extractFaceTensors_1 = require("./extractFaceTensors");
var FaceDetectionNet_1 = require("./faceDetectionNet/FaceDetectionNet");
var FaceLandmarkNet_1 = require("./faceLandmarkNet/FaceLandmarkNet");
var FaceRecognitionNet_1 = require("./faceRecognitionNet/FaceRecognitionNet");
......@@ -13,7 +15,7 @@ exports.recognitionNet = new FaceRecognitionNet_1.FaceRecognitionNet();
exports.nets = {
ssdMobilenet: exports.detectionNet,
faceLandmark68Net: exports.landmarkNet,
faceNet: exports.recognitionNet,
faceRecognitionNet: exports.recognitionNet,
mtcnn: new Mtcnn_1.Mtcnn()
};
function loadFaceDetectionModel(url) {
......@@ -25,7 +27,7 @@ function loadFaceLandmarkModel(url) {
}
exports.loadFaceLandmarkModel = loadFaceLandmarkModel;
function loadFaceRecognitionModel(url) {
return exports.nets.faceNet.load(url);
return exports.nets.faceRecognitionNet.load(url);
}
exports.loadFaceRecognitionModel = loadFaceRecognitionModel;
function loadMtcnnModel(url) {
......@@ -50,12 +52,40 @@ function detectLandmarks(input) {
}
exports.detectLandmarks = detectLandmarks;
function computeFaceDescriptor(input) {
return exports.nets.faceNet.computeFaceDescriptor(input);
return exports.nets.faceRecognitionNet.computeFaceDescriptor(input);
}
exports.computeFaceDescriptor = computeFaceDescriptor;
function mtcnn(input, forwardParams) {
return exports.nets.mtcnn.forward(input, forwardParams);
}
exports.mtcnn = mtcnn;
exports.allFaces = allFacesFactory_1.allFacesFactory(exports.detectionNet, exports.landmarkNet, exports.recognitionNet);
exports.allFaces = allFacesFactory_1.allFacesFactory(exports.detectionNet, exports.landmarkNet, computeDescriptorsFactory(exports.nets.faceRecognitionNet));
exports.allFacesMtcnn = allFacesFactory_1.allFacesMtcnnFactory(exports.nets.mtcnn, computeDescriptorsFactory(exports.nets.faceRecognitionNet));
function computeDescriptorsFactory(recognitionNet) {
return function (input, alignedFaceBoxes, useBatchProcessing) {
return tslib_1.__awaiter(this, void 0, void 0, function () {
var alignedFaceTensors, descriptors, _a;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0: return [4 /*yield*/, extractFaceTensors_1.extractFaceTensors(input, alignedFaceBoxes)];
case 1:
alignedFaceTensors = _b.sent();
if (!useBatchProcessing) return [3 /*break*/, 3];
return [4 /*yield*/, recognitionNet.computeFaceDescriptor(alignedFaceTensors)];
case 2:
_a = _b.sent();
return [3 /*break*/, 5];
case 3: return [4 /*yield*/, Promise.all(alignedFaceTensors.map(function (faceTensor) { return recognitionNet.computeFaceDescriptor(faceTensor); }))];
case 4:
_a = _b.sent();
_b.label = 5;
case 5:
descriptors = _a;
alignedFaceTensors.forEach(function (t) { return t.dispose(); });
return [2 /*return*/, descriptors];
}
});
});
};
}
//# sourceMappingURL=globalApi.js.map
\ No newline at end of file
{"version":3,"file":"globalApi.js","sourceRoot":"","sources":["../src/globalApi.ts"],"names":[],"mappings":";;AAEA,qDAAoD;AAEpD,wEAAuE;AACvE,qEAAoE;AAEpE,8EAA6E;AAG7E,uCAAsC;AAKzB,QAAA,YAAY,GAAG,IAAI,mCAAgB,EAAE,CAAA;AACrC,QAAA,WAAW,GAAG,IAAI,iCAAe,EAAE,CAAA;AACnC,QAAA,cAAc,GAAG,IAAI,uCAAkB,EAAE,CAAA;AAEtD,8DAA8D;AAC9D,oDAAoD;AACvC,QAAA,IAAI,GAAG;IAClB,YAAY,EAAE,oBAAY;IAC1B,iBAAiB,EAAE,mBAAW;IAC9B,OAAO,EAAE,sBAAc;IACvB,KAAK,EAAE,IAAI,aAAK,EAAE;CACnB,CAAA;AAED,gCAAuC,GAAW;IAChD,OAAO,YAAI,CAAC,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AACpC,CAAC;AAFD,wDAEC;AAED,+BAAsC,GAAW;IAC/C,OAAO,YAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AACzC,CAAC;AAFD,sDAEC;AAED,kCAAyC,GAAW;IAClD,OAAO,YAAI,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AAC/B,CAAC;AAFD,4DAEC;AAED,wBAA+B,GAAW;IACxC,OAAO,YAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AAC7B,CAAC;AAFD,wCAEC;AAED,oBAA2B,GAAW;IACpC,OAAO,OAAO,CAAC,GAAG,CAAC;QACjB,sBAAsB,CAAC,GAAG,CAAC;QAC3B,qBAAqB,CAAC,GAAG,CAAC;QAC1B,wBAAwB,CAAC,GAAG,CAAC;QAC7B,cAAc,CAAC,GAAG,CAAC;KACpB,CAAC,CAAA;AACJ,CAAC;AAPD,gCAOC;AAED,qBACE,KAAgB,EAChB,aAAsB,EACtB,UAAmB;IAEnB,OAAO,YAAI,CAAC,YAAY,CAAC,WAAW,CAAC,KAAK,EAAE,aAAa,EAAE,UAAU,CAAC,CAAA;AACxE,CAAC;AAND,kCAMC;AAED,yBACE,KAAgB;IAEhB,OAAO,YAAI,CAAC,iBAAiB,CAAC,eAAe,CAAC,KAAK,CAAC,CAAA;AACtD,CAAC;AAJD,0CAIC;AAED,+BACE,KAAgB;IAEhB,OAAO,YAAI,CAAC,OAAO,CAAC,qBAAqB,CAAC,KAAK,CAAC,CAAA;AAClD,CAAC;AAJD,sDAIC;AAED,eACE,KAAgB,EAChB,aAAiC;IAEjC,OAAO,YAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,EAAE,aAAa,CAAC,CAAA;AACjD,CAAC;AALD,sBAKC;AAEY,QAAA,QAAQ,GAIiB,iCAAe,CACnD,oBAAY,EACZ,mBAAW,EACX,sBAAc,CACf,CAAA"}
\ No newline at end of file
{"version":3,"file":"globalApi.js","sourceRoot":"","sources":["../src/globalApi.ts"],"names":[],"mappings":";;;AAEA,qDAA0E;AAC1E,2DAA0D;AAE1D,wEAAuE;AACvE,qEAAoE;AAEpE,8EAA6E;AAE7E,uCAAsC;AAMzB,QAAA,YAAY,GAAG,IAAI,mCAAgB,EAAE,CAAA;AACrC,QAAA,WAAW,GAAG,IAAI,iCAAe,EAAE,CAAA;AACnC,QAAA,cAAc,GAAG,IAAI,uCAAkB,EAAE,CAAA;AAEtD,8DAA8D;AAC9D,oDAAoD;AACvC,QAAA,IAAI,GAAG;IAClB,YAAY,EAAE,oBAAY;IAC1B,iBAAiB,EAAE,mBAAW;IAC9B,kBAAkB,EAAE,sBAAc;IAClC,KAAK,EAAE,IAAI,aAAK,EAAE;CACnB,CAAA;AAED,gCAAuC,GAAW;IAChD,OAAO,YAAI,CAAC,YAAY,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AACpC,CAAC;AAFD,wDAEC;AAED,+BAAsC,GAAW;IAC/C,OAAO,YAAI,CAAC,iBAAiB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AACzC,CAAC;AAFD,sDAEC;AAED,kCAAyC,GAAW;IAClD,OAAO,YAAI,CAAC,kBAAkB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AAC1C,CAAC;AAFD,4DAEC;AAED,wBAA+B,GAAW;IACxC,OAAO,YAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAA;AAC7B,CAAC;AAFD,wCAEC;AAED,oBAA2B,GAAW;IACpC,OAAO,OAAO,CAAC,GAAG,CAAC;QACjB,sBAAsB,CAAC,GAAG,CAAC;QAC3B,qBAAqB,CAAC,GAAG,CAAC;QAC1B,wBAAwB,CAAC,GAAG,CAAC;QAC7B,cAAc,CAAC,GAAG,CAAC;KACpB,CAAC,CAAA;AACJ,CAAC;AAPD,gCAOC;AAED,qBACE,KAAgB,EAChB,aAAsB,EACtB,UAAmB;IAEnB,OAAO,YAAI,CAAC,YAAY,CAAC,WAAW,CAAC,KAAK,EAAE,aAAa,EAAE,UAAU,CAAC,CAAA;AACxE,CAAC;AAND,kCAMC;AAED,yBACE,KAAgB;IAEhB,OAAO,YAAI,CAAC,iBAAiB,CAAC,eAAe,CAAC,KAAK,CAAC,CAAA;AACtD,CAAC;AAJD,0CAIC;AAED,+BACE,KAAgB;IAEhB,OAAO,YAAI,CAAC,kBAAkB,CAAC,qBAAqB,CAAC,KAAK,CAAC,CAAA;AAC7D,CAAC;AAJD,sDAIC;AAED,eACE,KAAgB,EAChB,aAAiC;IAEjC,OAAO,YAAI,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,EAAE,aAAa,CAAC,CAAA;AACjD,CAAC;AALD,sBAKC;AAEY,QAAA,QAAQ,GAIiB,iCAAe,CACnD,oBAAY,EACZ,mBAAW,EACX,yBAAyB,CAAC,YAAI,CAAC,kBAAkB,CAAC,CACnD,CAAA;AAEY,QAAA,aAAa,GAIY,sCAAoB,CACxD,YAAI,CAAC,KAAK,EACV,yBAAyB,CAAC,YAAI,CAAC,kBAAkB,CAAC,CACnD,CAAA;AAED,mCACE,cAAkC;IAElC,OAAO,UAAe,KAAgB,EAAE,gBAAwB,EAAE,kBAA2B;;;;;4BAChE,qBAAM,uCAAkB,CAAC,KAAK,EAAE,gBAAgB,CAAC,EAAA;;wBAAtE,kBAAkB,GAAG,SAAiD;6BAExD,kBAAkB,EAAlB,wBAAkB;wBAClC,qBAAM,cAAc,CAAC,qBAAqB,CAAC,kBAAkB,CAAC,EAAA;;wBAA9D,KAAA,SAAgF,CAAA;;4BAChF,qBAAM,OAAO,CAAC,GAAG,CAAC,kBAAkB,CAAC,GAAG,CACxC,UAAA,UAAU,IAAI,OAAA,cAAc,CAAC,qBAAqB,CAAC,UAAU,CAAC,EAAhD,CAAgD,CAC/D,CAAC,EAAA;;wBAFA,KAAA,SAEkB,CAAA;;;wBAJhB,WAAW,KAIK;wBAEtB,kBAAkB,CAAC,OAAO,CAAC,UAAA,CAAC,IAAI,OAAA,CAAC,CAAC,OAAO,EAAE,EAAX,CAAW,CAAC,CAAA;wBAE5C,sBAAO,WAAW,EAAA;;;;KACnB,CAAA;AACH,CAAC"}
\ No newline at end of file
import { FaceLandmarks } from '../FaceLandmarks';
import { IPoint } from '../Point';
import { Point } from '../Point';
export declare class FaceLandmarks5 extends FaceLandmarks {
forSize(width: number, height: number): FaceLandmarks5;
shift(x: number, y: number): FaceLandmarks5;
shiftByPoint(pt: IPoint): FaceLandmarks5;
protected getRefPointsForAlignment(): Point[];
}
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var getCenterPoint_1 = require("../commons/getCenterPoint");
var FaceLandmarks_1 = require("../FaceLandmarks");
var Point_1 = require("../Point");
var FaceLandmarks5 = /** @class */ (function (_super) {
tslib_1.__extends(FaceLandmarks5, _super);
function FaceLandmarks5() {
return _super !== null && _super.apply(this, arguments) || this;
}
FaceLandmarks5.prototype.forSize = function (width, height) {
return new FaceLandmarks5(this.getRelativePositions(), { width: width, height: height });
};
FaceLandmarks5.prototype.shift = function (x, y) {
return new FaceLandmarks5(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point_1.Point(x, y));
};
FaceLandmarks5.prototype.shiftByPoint = function (pt) {
return this.shift(pt.x, pt.y);
FaceLandmarks5.prototype.getRefPointsForAlignment = function () {
var pts = this.getPositions();
return [
pts[0],
pts[1],
getCenterPoint_1.getCenterPoint([pts[3], pts[4]])
];
};
return FaceLandmarks5;
}(FaceLandmarks_1.FaceLandmarks));
......
{"version":3,"file":"FaceLandmarks5.js","sourceRoot":"","sources":["../../src/mtcnn/FaceLandmarks5.ts"],"names":[],"mappings":";;;AAAA,kDAAiD;AACjD,kCAAyC;AAEzC;IAAoC,0CAAa;IAAjD;;IAoBA,CAAC;IAlBQ,gCAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,cAAc,CACvB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,8BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,cAAc,CACvB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,aAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IAEM,qCAAY,GAAnB,UAAoB,EAAU;QAC5B,OAAO,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,CAAA;IAC/B,CAAC;IACH,qBAAC;AAAD,CAAC,AApBD,CAAoC,6BAAa,GAoBhD;AApBY,wCAAc"}
\ No newline at end of file
{"version":3,"file":"FaceLandmarks5.js","sourceRoot":"","sources":["../../src/mtcnn/FaceLandmarks5.ts"],"names":[],"mappings":";;;AAAA,4DAA2D;AAC3D,kDAAiD;AAGjD;IAAoC,0CAAa;IAAjD;;IAUA,CAAC;IARW,iDAAwB,GAAlC;QACE,IAAM,GAAG,GAAG,IAAI,CAAC,YAAY,EAAE,CAAA;QAC/B,OAAO;YACL,GAAG,CAAC,CAAC,CAAC;YACN,GAAG,CAAC,CAAC,CAAC;YACN,+BAAc,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;SACjC,CAAA;IACH,CAAC;IACH,qBAAC;AAAD,CAAC,AAVD,CAAoC,6BAAa,GAUhD;AAVY,wCAAc"}
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -127,6 +127,14 @@ function renderNavBar(navbarId, exampleUri) {
name: 'MTCNN Face Detection Webcam'
},
{
uri: 'mtcnn_face_recognition',
name: 'MTCNN Face Recognition'
},
{
uri: 'mtcnn_face_recognition_webcam',
name: 'MTCNN Face Recognition Webcam'
},
{
uri: 'batch_face_landmarks',
name: 'Batch Face Landmarks'
},
......
......@@ -27,6 +27,8 @@ app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(view
app.get('/mtcnn_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetection.html')))
app.get('/mtcnn_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionVideo.html')))
app.get('/mtcnn_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionWebcam.html')))
app.get('/mtcnn_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognition.html')))
app.get('/mtcnn_face_recognition_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognitionWebcam.html')))
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
......
......@@ -50,11 +50,17 @@
<i class="material-icons left">+</i>
</button>
</div>
<div class="row">
<p>
<input type="checkbox" id="drawLinesCheckbox" onchange="onChangeUseMtcnn(event)" />
<label for="drawLinesCheckbox">Use Mtcnn</label>
</p>
</div>
</div>
<script>
let minConfidence = 0.7
let drawLines = true
let useMtcnn = false
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
......@@ -68,26 +74,54 @@
updateResults()
}
function onChangeUseMtcnn(e) {
useMtcnn = $(e.target).prop('checked')
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
async function locateAndAlignFacesWithMtcnn(inputImgEl) {
const input = await faceapi.toNetInput(
inputImgEl,
// dispose input manually
false,
// keep canvases (required for mtcnn)
true
)
const results = await faceapi.mtcnn(input, { minFaceSize: 100 })
const unalignedFaceImages = await faceapi.extractFaces(input.inputs[0], results.map(res => res.faceDetection))
const alignedFaceBoxes = results
.filter(res => res.faceDetection.score > minConfidence)
.map(res => res.faceLandmarks.align())
const alignedFaceImages = await faceapi.extractFaces(input.inputs[0], alignedFaceBoxes)
// free memory for input tensors
input.dispose()
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function locateAndAlignFacesWithSSD(inputImgEl) {
const input = await faceapi.toNetInput(inputImgEl)
const locations = await faceapi.locateFaces(input, minConfidence)
const faceImages = await faceapi.extractFaces(input.inputs[0], locations)
const unalignedFaceImages = await faceapi.extractFaces(input.inputs[0], locations)
// detect landmarks and get the aligned face image bounding boxes
const alignedFaceBoxes = await Promise.all(faceImages.map(
const alignedFaceBoxes = await Promise.all(unalignedFaceImages.map(
async (faceCanvas, i) => {
const faceLandmarks = await faceapi.detectLandmarks(faceCanvas)
return faceLandmarks.align(locations[i])
......@@ -98,8 +132,28 @@
// free memory for input tensors
input.dispose()
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const {
unalignedFaceImages,
alignedFaceImages
} = useMtcnn
? await locateAndAlignFacesWithMtcnn(inputImgEl)
: await locateAndAlignFacesWithSSD(inputImgEl)
$('#facesContainer').empty()
faceImages.forEach(async (faceCanvas, i) => {
unalignedFaceImages.forEach(async (faceCanvas, i) => {
$('#facesContainer').append(faceCanvas)
$('#facesContainer').append(alignedFaceImages[i])
})
......@@ -114,6 +168,7 @@
async function run() {
await faceapi.loadFaceDetectionModel('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadMtcnnModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
......
......@@ -84,10 +84,7 @@
minFaceSize
}
const c = faceapi.createCanvas({ width: width , height: height })
c.getContext('2d').drawImage(videoEl, 0, 0)
const { results, stats } = await faceapi.nets.mtcnn.forwardWithStats(c, mtcnnParams)
const { results, stats } = await faceapi.nets.mtcnn.forwardWithStats(videoEl, mtcnnParams)
updateTimeStats(stats.total)
if (results) {
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="40" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
</div>
<script>
let maxDistance = 0.6
let minConfidence = 0.7
let minFaceSize = 40
let trainDescriptorsByClass = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 200)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 20)
$('#minFaceSize').val(minFaceSize)
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const fullFaceDescriptions = (await faceapi.allFacesMtcnn(inputImgEl, mtcnnParams))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, landmarks, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
faceapi.drawLandmarks('overlay', landmarks, { lineWidth: 4, color: 'red' })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadMtcnnModel('/')
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_recognition')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="200" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 200
let maxDistance = 0.6
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 50), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 50), 50)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const ts = Date.now()
const fullFaceDescriptions = (await faceapi.allFacesMtcnn(videoEl, mtcnnParams))
.map(fd => fd.forSize(width, height))
updateTimeStats(Date.now() - ts)
fullFaceDescriptions.forEach(({ detection, landmarks, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
faceapi.drawLandmarks('overlay', landmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
await faceapi.loadFaceRecognitionModel('/')
// init reference data, e.g. compute a face descriptor for each class
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet)
modelLoaded = true
// try to access users webcam and stream the images
// to the video element
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_recognition_webcam')
run()
})
</script>
</body>
</html>
\ No newline at end of file
import { Point } from './Point';
import { getCenterPoint } from './commons/getCenterPoint';
import { FaceDetection } from './FaceDetection';
import { IPoint, Point } from './Point';
import { Rect } from './Rect';
import { Dimensions } from './types';
// face alignment constants
const relX = 0.5
const relY = 0.43
const relScale = 0.45
export class FaceLandmarks {
protected _imageWidth: number
protected _imageHeight: number
......@@ -42,4 +50,65 @@ export class FaceLandmarks {
pt => pt.sub(this._shift).div(new Point(this._imageWidth, this._imageHeight))
)
}
public forSize<T extends FaceLandmarks>(width: number, height: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
{ width, height }
)
}
public shift<T extends FaceLandmarks>(x: number, y: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
{ width: this._imageWidth, height: this._imageHeight },
new Point(x, y)
)
}
public shiftByPoint<T extends FaceLandmarks>(pt: IPoint): T {
return this.shift(pt.x, pt.y)
}
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
public align(
detection?: FaceDetection | Rect
): Rect {
if (detection) {
const box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection
return this.shift(box.x, box.y).align()
}
const centers = this.getRefPointsForAlignment()
const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers
const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude()
const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2
const size = Math.floor(eyeToMouthDist / relScale)
const refPoint = getCenterPoint(centers)
// TODO: pad in case rectangle is out of image bounds
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, Math.min(size, this._imageWidth - x), Math.min(size, this._imageHeight - y))
}
protected getRefPointsForAlignment(): Point[] {
throw new Error('getRefPointsForAlignment not implemented by base class')
}
}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68';
import { FaceLandmarks } from './FaceLandmarks';
export class FullFaceDescription {
constructor(
private _detection: FaceDetection,
private _landmarks: FaceLandmarks68,
private _landmarks: FaceLandmarks,
private _descriptor: Float32Array
) {}
......@@ -12,7 +12,7 @@ export class FullFaceDescription {
return this._detection
}
public get landmarks(): FaceLandmarks68 {
public get landmarks(): FaceLandmarks {
return this._landmarks
}
......
......@@ -2,14 +2,16 @@ import { extractFaceTensors } from './extractFaceTensors';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { FullFaceDescription } from './FullFaceDescription';
import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams } from './mtcnn/types';
import { Rect } from './Rect';
import { TNetInput } from './types';
export function allFacesFactory(
detectionNet: FaceDetectionNet,
landmarkNet: FaceLandmarkNet,
recognitionNet: FaceRecognitionNet
computeDescriptors: (input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) => Promise<Float32Array[]>
) {
return async function(
input: TNetInput,
......@@ -32,20 +34,42 @@ export function allFacesFactory(
const alignedFaceBoxes = faceLandmarksByFace.map(
(landmarks, i) => landmarks.align(detections[i].getBox())
)
const alignedFaceTensors = await extractFaceTensors(input, alignedFaceBoxes)
const descriptors = useBatchProcessing
? await recognitionNet.computeFaceDescriptor(alignedFaceTensors) as Float32Array[]
: await Promise.all(alignedFaceTensors.map(
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
)) as Float32Array[]
alignedFaceTensors.forEach(t => t.dispose())
const descriptors = await computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)
return detections.map((detection, i) =>
new FullFaceDescription(
detection,
faceLandmarksByFace[i].shiftByPoint(detection.getBox()),
faceLandmarksByFace[i].shiftByPoint<FaceLandmarks68>(detection.getBox()),
descriptors[i]
)
)
}
}
export function allFacesMtcnnFactory(
mtcnn: Mtcnn,
computeDescriptors: (input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) => Promise<Float32Array[]>
) {
return async function(
input: TNetInput,
mtcnnForwardParams: MtcnnForwardParams,
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const results = await mtcnn.forward(input, mtcnnForwardParams)
const alignedFaceBoxes = results.map(
({ faceLandmarks }) => faceLandmarks.align()
)
const descriptors = await computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)
return results.map(({ faceDetection, faceLandmarks }, i) =>
new FullFaceDescription(
faceDetection,
faceLandmarks,
descriptors[i]
)
)
......
import { getCenterPoint } from '../commons/getCenterPoint';
import { FaceDetection } from '../FaceDetection';
import { FaceLandmarks } from '../FaceLandmarks';
import { IPoint, Point } from '../Point';
import { Point } from '../Point';
import { Rect } from '../Rect';
// face alignment constants
const relX = 0.5
const relY = 0.43
const relScale = 0.45
export class FaceLandmarks68 extends FaceLandmarks {
public getJawOutline(): Point[] {
return this._faceLandmarks.slice(0, 17)
......@@ -38,64 +33,11 @@ export class FaceLandmarks68 extends FaceLandmarks {
return this._faceLandmarks.slice(48, 68)
}
public forSize(width: number, height: number): FaceLandmarks68 {
return new FaceLandmarks68(
this.getRelativePositions(),
{ width, height }
)
}
public shift(x: number, y: number): FaceLandmarks68 {
return new FaceLandmarks68(
this.getRelativePositions(),
{ width: this._imageWidth, height: this._imageHeight },
new Point(x, y)
)
}
public shiftByPoint(pt: IPoint): FaceLandmarks68 {
return this.shift(pt.x, pt.y)
}
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
public align(
detection?: FaceDetection | Rect
): Rect {
if (detection) {
const box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection
return this.shift(box.x, box.y).align()
}
const centers = [
protected getRefPointsForAlignment(): Point[] {
return [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint)
const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers
const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude()
const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2
const size = Math.floor(eyeToMouthDist / relScale)
const refPoint = getCenterPoint(centers)
// TODO: pad in case rectangle is out of image bounds
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, size, size)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { allFacesFactory } from './allFacesFactory';
import { allFacesFactory, allFacesMtcnnFactory } from './allFacesFactory';
import { extractFaceTensors } from './extractFaceTensors';
import { FaceDetection } from './FaceDetection';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { FullFaceDescription } from './FullFaceDescription';
import { getDefaultMtcnnForwardParams } from './mtcnn/getDefaultMtcnnForwardParams';
import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams, MtcnnResult } from './mtcnn/types';
import { NetInput } from './NetInput';
import { Rect } from './Rect';
import { TNetInput } from './types';
export const detectionNet = new FaceDetectionNet()
......@@ -22,7 +23,7 @@ export const recognitionNet = new FaceRecognitionNet()
export const nets = {
ssdMobilenet: detectionNet,
faceLandmark68Net: landmarkNet,
faceNet: recognitionNet,
faceRecognitionNet: recognitionNet,
mtcnn: new Mtcnn()
}
......@@ -35,7 +36,7 @@ export function loadFaceLandmarkModel(url: string) {
}
export function loadFaceRecognitionModel(url: string) {
return nets.faceNet.load(url)
return nets.faceRecognitionNet.load(url)
}
export function loadMtcnnModel(url: string) {
......@@ -68,7 +69,7 @@ export function detectLandmarks(
export function computeFaceDescriptor(
input: TNetInput
): Promise<Float32Array | Float32Array[]> {
return nets.faceNet.computeFaceDescriptor(input)
return nets.faceRecognitionNet.computeFaceDescriptor(input)
}
export function mtcnn(
......@@ -85,5 +86,32 @@ export const allFaces: (
) => Promise<FullFaceDescription[]> = allFacesFactory(
detectionNet,
landmarkNet,
recognitionNet
)
\ No newline at end of file
computeDescriptorsFactory(nets.faceRecognitionNet)
)
export const allFacesMtcnn: (
input: tf.Tensor | NetInput | TNetInput,
mtcnnForwardParams: MtcnnForwardParams,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]> = allFacesMtcnnFactory(
nets.mtcnn,
computeDescriptorsFactory(nets.faceRecognitionNet)
)
function computeDescriptorsFactory(
recognitionNet: FaceRecognitionNet
) {
return async function(input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) {
const alignedFaceTensors = await extractFaceTensors(input, alignedFaceBoxes)
const descriptors = useBatchProcessing
? await recognitionNet.computeFaceDescriptor(alignedFaceTensors) as Float32Array[]
: await Promise.all(alignedFaceTensors.map(
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
)) as Float32Array[]
alignedFaceTensors.forEach(t => t.dispose())
return descriptors
}
}
\ No newline at end of file
import { getCenterPoint } from '../commons/getCenterPoint';
import { FaceLandmarks } from '../FaceLandmarks';
import { IPoint, Point } from '../Point';
import { Point } from '../Point';
export class FaceLandmarks5 extends FaceLandmarks {
public forSize(width: number, height: number): FaceLandmarks5 {
return new FaceLandmarks5(
this.getRelativePositions(),
{ width, height }
)
}
public shift(x: number, y: number): FaceLandmarks5 {
return new FaceLandmarks5(
this.getRelativePositions(),
{ width: this._imageWidth, height: this._imageHeight },
new Point(x, y)
)
}
public shiftByPoint(pt: IPoint): FaceLandmarks5 {
return this.shift(pt.x, pt.y)
protected getRefPointsForAlignment(): Point[] {
const pts = this.getPositions()
return [
pts[0],
pts[1],
getCenterPoint([pts[3], pts[4]])
]
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment