Unverified Commit 43ec90b4 by justadudewhohacks Committed by GitHub

Merge pull request #12 from justadudewhohacks/face-alignment

Face alignment
parents 5f46b72d aa200d18
...@@ -10,4 +10,7 @@ export declare class Point implements IPoint { ...@@ -10,4 +10,7 @@ export declare class Point implements IPoint {
sub(pt: IPoint): Point; sub(pt: IPoint): Point;
mul(pt: IPoint): Point; mul(pt: IPoint): Point;
div(pt: IPoint): Point; div(pt: IPoint): Point;
abs(): Point;
magnitude(): number;
floor(): Point;
} }
...@@ -15,6 +15,15 @@ var Point = /** @class */ (function () { ...@@ -15,6 +15,15 @@ var Point = /** @class */ (function () {
Point.prototype.div = function (pt) { Point.prototype.div = function (pt) {
return new Point(this.x / pt.x, this.y / pt.y); return new Point(this.x / pt.x, this.y / pt.y);
}; };
Point.prototype.abs = function () {
return new Point(Math.abs(this.x), Math.abs(this.y));
};
Point.prototype.magnitude = function () {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
};
Point.prototype.floor = function () {
return new Point(Math.floor(this.x), Math.floor(this.y));
};
return Point; return Point;
}()); }());
export { Point }; export { Point };
......
{"version":3,"file":"Point.js","sourceRoot":"","sources":["../src/Point.ts"],"names":[],"mappings":"AAKA;IAIE,eAAY,CAAS,EAAE,CAAS;QAC9B,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;QACV,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;IACZ,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IACH,YAAC;AAAD,CAAC,AAxBD,IAwBC"} {"version":3,"file":"Point.js","sourceRoot":"","sources":["../src/Point.ts"],"names":[],"mappings":"AAKA;IAIE,eAAY,CAAS,EAAE,CAAS;QAC9B,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;QACV,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;IACZ,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAA;IACtD,CAAC;IAEM,yBAAS,GAAhB;QACE,OAAO,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC7D,CAAC;IAEM,qBAAK,GAAZ;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1D,CAAC;IACH,YAAC;AAAD,CAAC,AApCD,IAoCC"}
\ No newline at end of file \ No newline at end of file
import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks';
import { Rect } from './Rect';
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
export declare function alignFace(faceLandmarks: FaceLandmarks, detection?: Rect): Rect;
import { getCenterPoint } from './commons/getCenterPoint';
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { Rect } from './Rect';
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
export function alignFace(faceLandmarks, detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return alignFace(faceLandmarks.shift(box.x, box.y));
}
var centers = [
faceLandmarks.getLeftEye(),
faceLandmarks.getRightEye(),
faceLandmarks.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
}
//# sourceMappingURL=alignFace.js.map
\ No newline at end of file
{"version":3,"file":"alignFace.js","sourceRoot":"","sources":["../src/alignFace.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,0BAA0B,CAAC;AAC1D,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AAGjE,OAAO,EAAE,IAAI,EAAE,MAAM,QAAQ,CAAC;AAE9B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;;;;;;;;;;GAUG;AACH,MAAM,oBACJ,aAA4B,EAC5B,SAAgB;IAEhB,IAAI,SAAS,EAAE;QACb,IAAM,GAAG,GAAG,SAAS,YAAY,aAAa;YAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;YAC5B,CAAC,CAAC,SAAS,CAAA;QAEb,OAAO,SAAS,CAAC,aAAa,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAA;KACpD;IAED,IAAM,OAAO,GAAG;QACd,aAAa,CAAC,UAAU,EAAE;QAC1B,aAAa,CAAC,WAAW,EAAE;QAC3B,aAAa,CAAC,QAAQ,EAAE;KACzB,CAAC,GAAG,CAAC,cAAc,CAAC,CAAA;IAEd,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;IAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;IAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;IAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;IAElD,IAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,CAAC,CAAA;IACxC,qDAAqD;IACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;IAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;IAE7D,OAAO,IAAI,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;AACnC,CAAC"}
\ No newline at end of file
import { Point } from '../Point';
export declare function getCenterPoint(pts: Point[]): Point;
import { Point } from '../Point';
export function getCenterPoint(pts) {
return pts.reduce(function (sum, pt) { return sum.add(pt); }, new Point(0, 0))
.div(new Point(pts.length, pts.length));
}
//# sourceMappingURL=getCenterPoint.js.map
\ No newline at end of file
{"version":3,"file":"getCenterPoint.js","sourceRoot":"","sources":["../../src/commons/getCenterPoint.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AAEjC,MAAM,yBAAyB,GAAY;IACzC,OAAO,GAAG,CAAC,MAAM,CAAC,UAAC,GAAG,EAAE,EAAE,IAAK,OAAA,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,EAAX,CAAW,EAAE,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;SACzD,GAAG,CAAC,IAAI,KAAK,CAAC,GAAG,CAAC,MAAM,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC,CAAA;AAC3C,CAAC"}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { NetInput } from './NetInput'; import { NetInput } from './NetInput';
import { Rect } from './Rect';
import { TNetInput } from './types'; import { TNetInput } from './types';
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
export declare function extractFaceTensors(image: tf.Tensor | NetInput | TNetInput, detections: FaceDetection[]): tf.Tensor4D[]; export declare function extractFaceTensors(image: tf.Tensor | NetInput | TNetInput, detections: Array<FaceDetection | Rect>): tf.Tensor4D[];
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { getImageTensor } from './getImageTensor'; import { getImageTensor } from './getImageTensor';
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
export function extractFaceTensors(image, detections) { export function extractFaceTensors(image, detections) {
...@@ -16,8 +16,11 @@ export function extractFaceTensors(image, detections) { ...@@ -16,8 +16,11 @@ export function extractFaceTensors(image, detections) {
var imgTensor = getImageTensor(image); var imgTensor = getImageTensor(image);
// TODO handle batches // TODO handle batches
var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3]; var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3];
var faceTensors = detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(imgWidth, imgHeight).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(imgWidth, imgHeight).getBox().floor()
: det; });
var faceTensors = boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
return tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]); return tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]);
}); });
return faceTensors; return faceTensors;
......
{"version":3,"file":"extractFaceTensors.js","sourceRoot":"","sources":["../src/extractFaceTensors.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAG5C,OAAO,EAAE,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAIlD;;;;;;;;;;GAUG;AACH,MAAM,6BACJ,KAAuC,EACvC,UAA2B;IAE3B,OAAO,EAAE,CAAC,IAAI,CAAC;QACb,IAAM,SAAS,GAAG,cAAc,CAAC,KAAK,CAAC,CAAA;QAEvC,sBAAsB;QAChB,IAAA,oBAA+D,EAA9D,iBAAS,EAAE,iBAAS,EAAE,gBAAQ,EAAE,mBAAW,CAAmB;QAErE,IAAM,WAAW,GAAG,UAAU,CAAC,GAAG,CAAC,UAAA,GAAG;YAC9B,IAAA,sDAA2E,EAAzE,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM,CAAsD;YACjF,OAAO,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC,CAAA;QAC3E,CAAC,CAAC,CAAA;QAEF,OAAO,WAAW,CAAA;IACpB,CAAC,CAAC,CAAA;AACJ,CAAC"} {"version":3,"file":"extractFaceTensors.js","sourceRoot":"","sources":["../src/extractFaceTensors.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAE5C,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AACjE,OAAO,EAAE,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAKlD;;;;;;;;;GASG;AACH,MAAM,6BACJ,KAAuC,EACvC,UAAqC;IAErC,OAAO,EAAE,CAAC,IAAI,CAAC;QACb,IAAM,SAAS,GAAG,cAAc,CAAC,KAAK,CAAC,CAAA;QAEvC,sBAAsB;QAChB,IAAA,oBAA+D,EAA9D,iBAAS,EAAE,iBAAS,EAAE,gBAAQ,EAAE,mBAAW,CAAmB;QAErE,IAAM,KAAK,GAAG,UAAU,CAAC,GAAG,CAC1B,UAAA,GAAG,IAAI,OAAA,GAAG,YAAY,aAAa;YACjC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;YACnD,CAAC,CAAC,GAAG,EAFA,CAEA,CACR,CAAA;QACD,IAAM,WAAW,GAAG,KAAK,CAAC,GAAG,CAAC,UAAC,EAAuB;gBAArB,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM;YAClD,OAAA,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;QAAlE,CAAkE,CACnE,CAAA;QAED,OAAO,WAAW,CAAA;IACpB,CAAC,CAAC,CAAA;AACJ,CAAC"}
\ No newline at end of file \ No newline at end of file
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { Rect } from './Rect';
/** /**
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
export declare function extractFaces(image: HTMLCanvasElement, detections: FaceDetection[]): HTMLCanvasElement[]; export declare function extractFaces(image: HTMLCanvasElement, detections: Array<FaceDetection | Rect>): HTMLCanvasElement[];
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { createCanvas, getContext2dOrThrow } from './utils'; import { createCanvas, getContext2dOrThrow } from './utils';
/** /**
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
export function extractFaces(image, detections) { export function extractFaces(image, detections) {
var ctx = getContext2dOrThrow(image); var ctx = getContext2dOrThrow(image);
return detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(image.width, image.height).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(image.width, image.height).getBox().floor()
: det; });
return boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
var faceImg = createCanvas({ width: width, height: height }); var faceImg = createCanvas({ width: width, height: height });
getContext2dOrThrow(faceImg) getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0); .putImageData(ctx.getImageData(x, y, width, height), 0, 0);
......
{"version":3,"file":"extractFaces.js","sourceRoot":"","sources":["../src/extractFaces.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,SAAS,CAAC;AAE5D;;;;;;GAMG;AACH,MAAM,uBACJ,KAAwB,EACxB,UAA2B;IAE3B,IAAM,GAAG,GAAG,mBAAmB,CAAC,KAAK,CAAC,CAAA;IAEtC,OAAO,UAAU,CAAC,GAAG,CAAC,UAAA,GAAG;QACjB,IAAA,4DAAiF,EAA/E,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM,CAA4D;QACvF,IAAM,OAAO,GAAG,YAAY,CAAC,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAAC,CAAA;QAC/C,mBAAmB,CAAC,OAAO,CAAC;aACzB,YAAY,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAA;QAC5D,OAAO,OAAO,CAAA;IAChB,CAAC,CAAC,CAAA;AACJ,CAAC"} {"version":3,"file":"extractFaces.js","sourceRoot":"","sources":["../src/extractFaces.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AAEjE,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,SAAS,CAAC;AAE5D;;;;;;GAMG;AACH,MAAM,uBACJ,KAAwB,EACxB,UAAqC;IAErC,IAAM,GAAG,GAAG,mBAAmB,CAAC,KAAK,CAAC,CAAA;IAEtC,IAAM,KAAK,GAAG,UAAU,CAAC,GAAG,CAC1B,UAAA,GAAG,IAAI,OAAA,GAAG,YAAY,aAAa;QACjC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;QACzD,CAAC,CAAC,GAAG,EAFA,CAEA,CACR,CAAA;IACD,OAAO,KAAK,CAAC,GAAG,CAAC,UAAC,EAAuB;YAArB,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM;QACrC,IAAM,OAAO,GAAG,YAAY,CAAC,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAAC,CAAA;QAC/C,mBAAmB,CAAC,OAAO,CAAC;aACzB,YAAY,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAA;QAC5D,OAAO,OAAO,CAAA;IAChB,CAAC,CAAC,CAAA;AACJ,CAAC"}
\ No newline at end of file \ No newline at end of file
...@@ -351,6 +351,22 @@ ...@@ -351,6 +351,22 @@
reader.readAsDataURL(buf); reader.readAsDataURL(buf);
}); });
} }
function imageTensorToCanvas(imgTensor, canvas) {
return __awaiter$1(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return __generator$1(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
function getDefaultDrawOptions() { function getDefaultDrawOptions() {
return { return {
color: 'blue', color: 'blue',
...@@ -521,7 +537,7 @@ ...@@ -521,7 +537,7 @@
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
function padToSquare(imgTensor, isCenterImage) { function padToSquare(imgTensor, isCenterImage) {
...@@ -1045,6 +1061,15 @@ ...@@ -1045,6 +1061,15 @@
Point.prototype.div = function (pt) { Point.prototype.div = function (pt) {
return new Point(this.x / pt.x, this.y / pt.y); return new Point(this.x / pt.x, this.y / pt.y);
}; };
Point.prototype.abs = function () {
return new Point(Math.abs(this.x), Math.abs(this.y));
};
Point.prototype.magnitude = function () {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
};
Point.prototype.floor = function () {
return new Point(Math.floor(this.x), Math.floor(this.y));
};
return Point; return Point;
}()); }());
...@@ -1097,6 +1122,15 @@ ...@@ -1097,6 +1122,15 @@
}; };
} }
function getCenterPoint(pts) {
return pts.reduce(function (sum, pt) { return sum.add(pt); }, new Point(0, 0))
.div(new Point(pts.length, pts.length));
}
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks = /** @class */ (function () { var FaceLandmarks = /** @class */ (function () {
function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) { function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) {
if (shift === void 0) { shift = new Point(0, 0); } if (shift === void 0) { shift = new Point(0, 0); }
...@@ -1106,6 +1140,15 @@ ...@@ -1106,6 +1140,15 @@
this._shift = shift; this._shift = shift;
this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); }); this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); });
} }
FaceLandmarks.prototype.getShift = function () {
return new Point(this._shift.x, this._shift.y);
};
FaceLandmarks.prototype.getImageWidth = function () {
return this._imageWidth;
};
FaceLandmarks.prototype.getImageHeight = function () {
return this._imageHeight;
};
FaceLandmarks.prototype.getPositions = function () { FaceLandmarks.prototype.getPositions = function () {
return this._faceLandmarks; return this._faceLandmarks;
}; };
...@@ -1140,6 +1183,39 @@ ...@@ -1140,6 +1183,39 @@
FaceLandmarks.prototype.shift = function (x, y) { FaceLandmarks.prototype.shift = function (x, y) {
return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y)); return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y));
}; };
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
};
return FaceLandmarks; return FaceLandmarks;
}()); }());
...@@ -1415,13 +1491,16 @@ ...@@ -1415,13 +1491,16 @@
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
function extractFaces(image, detections) { function extractFaces(image, detections) {
var ctx = getContext2dOrThrow(image); var ctx = getContext2dOrThrow(image);
return detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(image.width, image.height).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(image.width, image.height).getBox().floor()
: det; });
return boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
var faceImg = createCanvas({ width: width, height: height }); var faceImg = createCanvas({ width: width, height: height });
getContext2dOrThrow(faceImg) getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0); .putImageData(ctx.getImageData(x, y, width, height), 0, 0);
...@@ -1431,13 +1510,12 @@ ...@@ -1431,13 +1510,12 @@
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
function extractFaceTensors(image$$1, detections) { function extractFaceTensors(image$$1, detections) {
...@@ -1445,8 +1523,11 @@ ...@@ -1445,8 +1523,11 @@
var imgTensor = getImageTensor(image$$1); var imgTensor = getImageTensor(image$$1);
// TODO handle batches // TODO handle batches
var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3]; var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3];
var faceTensors = detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(imgWidth, imgHeight).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(imgWidth, imgHeight).getBox().floor()
: det; });
var faceTensors = boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
return slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]); return slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]);
}); });
return faceTensors; return faceTensors;
...@@ -1470,6 +1551,7 @@ ...@@ -1470,6 +1551,7 @@
exports.createCanvasFromMedia = createCanvasFromMedia; exports.createCanvasFromMedia = createCanvasFromMedia;
exports.getMediaDimensions = getMediaDimensions; exports.getMediaDimensions = getMediaDimensions;
exports.bufferToImage = bufferToImage; exports.bufferToImage = bufferToImage;
exports.imageTensorToCanvas = imageTensorToCanvas;
exports.getDefaultDrawOptions = getDefaultDrawOptions; exports.getDefaultDrawOptions = getDefaultDrawOptions;
exports.drawBox = drawBox; exports.drawBox = drawBox;
exports.drawText = drawText; exports.drawText = drawText;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -2,9 +2,9 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,9 +2,9 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
import { FaceDetection } from './FaceDetection'; import { FaceDetection } from './FaceDetection';
export declare function faceDetectionNet(weights: Float32Array): { export declare function faceDetectionNet(weights: Float32Array): {
forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => { forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => {
boxes: tf.Tensor<tf.Rank.R2>[]; boxes: tf.Tensor<tf.Rank.R2>[];
scores: tf.Tensor<tf.Rank.R1>[]; scores: tf.Tensor<tf.Rank.R1>[];
}; };
locateFaces: (input: NetInput | tf.Tensor<tf.Rank>, minConfidence?: number, maxResults?: number) => Promise<FaceDetection[]>; locateFaces: (input: tf.Tensor<tf.Rank> | NetInput, minConfidence?: number, maxResults?: number) => Promise<FaceDetection[]>;
}; };
import { Point } from '../Point'; import { Point } from '../Point';
import { Rect } from '../Rect';
import { Dimensions } from '../types'; import { Dimensions } from '../types';
export declare class FaceLandmarks { export declare class FaceLandmarks {
private _faceLandmarks;
private _imageWidth; private _imageWidth;
private _imageHeight; private _imageHeight;
private _shift; private _shift;
private _faceLandmarks;
constructor(relativeFaceLandmarkPositions: Point[], imageDims: Dimensions, shift?: Point); constructor(relativeFaceLandmarkPositions: Point[], imageDims: Dimensions, shift?: Point);
getShift(): Point;
getImageWidth(): number;
getImageHeight(): number;
getPositions(): Point[]; getPositions(): Point[];
getRelativePositions(): Point[]; getRelativePositions(): Point[];
getJawOutline(): Point[]; getJawOutline(): Point[];
...@@ -17,4 +21,16 @@ export declare class FaceLandmarks { ...@@ -17,4 +21,16 @@ export declare class FaceLandmarks {
getMouth(): Point[]; getMouth(): Point[];
forSize(width: number, height: number): FaceLandmarks; forSize(width: number, height: number): FaceLandmarks;
shift(x: number, y: number): FaceLandmarks; shift(x: number, y: number): FaceLandmarks;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: Rect): Rect;
} }
import { getCenterPoint } from '../commons/getCenterPoint';
import { FaceDetection } from '../faceDetectionNet/FaceDetection';
import { Point } from '../Point'; import { Point } from '../Point';
import { Rect } from '../Rect';
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks = /** @class */ (function () { var FaceLandmarks = /** @class */ (function () {
function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) { function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) {
if (shift === void 0) { shift = new Point(0, 0); } if (shift === void 0) { shift = new Point(0, 0); }
...@@ -8,6 +15,15 @@ var FaceLandmarks = /** @class */ (function () { ...@@ -8,6 +15,15 @@ var FaceLandmarks = /** @class */ (function () {
this._shift = shift; this._shift = shift;
this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); }); this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); });
} }
FaceLandmarks.prototype.getShift = function () {
return new Point(this._shift.x, this._shift.y);
};
FaceLandmarks.prototype.getImageWidth = function () {
return this._imageWidth;
};
FaceLandmarks.prototype.getImageHeight = function () {
return this._imageHeight;
};
FaceLandmarks.prototype.getPositions = function () { FaceLandmarks.prototype.getPositions = function () {
return this._faceLandmarks; return this._faceLandmarks;
}; };
...@@ -42,6 +58,39 @@ var FaceLandmarks = /** @class */ (function () { ...@@ -42,6 +58,39 @@ var FaceLandmarks = /** @class */ (function () {
FaceLandmarks.prototype.shift = function (x, y) { FaceLandmarks.prototype.shift = function (x, y) {
return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y)); return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y));
}; };
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
};
return FaceLandmarks; return FaceLandmarks;
}()); }());
export { FaceLandmarks }; export { FaceLandmarks };
......
{"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAU,MAAM,UAAU,CAAC;AAGzC;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,uCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,mCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,6BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IACH,oBAAC;AAAD,CAAC,AAxED,IAwEC"} {"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,2BAA2B,CAAC;AAC3D,OAAO,EAAE,aAAa,EAAE,MAAM,mCAAmC,CAAC;AAClE,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AACjC,OAAO,EAAE,IAAI,EAAE,MAAM,SAAS,CAAC;AAG/B,2BAA2B;AAC3B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,WAAW,CAAA;IACzB,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,YAAY,CAAA;IAC1B,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,uCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,mCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,6BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IAED;;;;;;;;;;OAUG;IACI,6BAAK,GAAZ,UACE,SAAgB;QAEhB,IAAI,SAAS,EAAE;YACb,IAAM,GAAG,GAAG,SAAS,YAAY,aAAa;gBAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;gBAC5B,CAAC,CAAC,SAAS,CAAA;YAEb,OAAO,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAA;SACxC;QAED,IAAM,OAAO,GAAG;YACd,IAAI,CAAC,UAAU,EAAE;YACjB,IAAI,CAAC,WAAW,EAAE;YAClB,IAAI,CAAC,QAAQ,EAAE;SAChB,CAAC,GAAG,CAAC,cAAc,CAAC,CAAA;QAEd,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;QAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;QAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;QAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;QAElD,IAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,CAAC,CAAA;QACxC,qDAAqD;QACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAE7D,OAAO,IAAI,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;IACnC,CAAC;IACH,oBAAC;AAAD,CAAC,AA9HD,IA8HC"}
\ No newline at end of file \ No newline at end of file
...@@ -2,5 +2,5 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,5 +2,5 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
import { FaceLandmarks } from './FaceLandmarks'; import { FaceLandmarks } from './FaceLandmarks';
export declare function faceLandmarkNet(weights: Float32Array): { export declare function faceLandmarkNet(weights: Float32Array): {
detectLandmarks: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Promise<FaceLandmarks>; detectLandmarks: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Promise<FaceLandmarks>;
}; };
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
export declare function faceRecognitionNet(weights: Float32Array): { export declare function faceRecognitionNet(weights: Float32Array): {
computeFaceDescriptor: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Promise<Int32Array | Uint8Array | Float32Array>; computeFaceDescriptor: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Promise<Int32Array | Uint8Array | Float32Array>;
computeFaceDescriptorSync: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Int32Array | Uint8Array | Float32Array; computeFaceDescriptorSync: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Int32Array | Uint8Array | Float32Array;
forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => tf.Tensor<tf.Rank.R2>; forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => tf.Tensor<tf.Rank.R2>;
}; };
import * as tf from '@tensorflow/tfjs-core';
export declare function imageTensorToCanvas(imgTensor: tf.Tensor4D, canvas?: HTMLCanvasElement): Promise<HTMLCanvasElement>;
import * as tslib_1 from "tslib";
import * as tf from '@tensorflow/tfjs-core';
export function imageTensorToCanvas(imgTensor, canvas) {
return tslib_1.__awaiter(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, tf.toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
//# sourceMappingURL=imageTensorToCanvas.js.map
\ No newline at end of file
{"version":3,"file":"imageTensorToCanvas.js","sourceRoot":"","sources":["../src/imageTensorToCanvas.ts"],"names":[],"mappings":";AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAK5C,MAAM,8BACJ,SAAsB,EACtB,MAA0B;;;;;;oBAEpB,YAAY,GAAG,MAAM,IAAK,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAA;oBAE1D,KAAkC,SAAS,CAAC,KAAK,EAAhD,CAAC,QAAA,EAAE,MAAM,QAAA,EAAE,KAAK,QAAA,EAAE,WAAW,QAAA,CAAmB;oBACvD,qBAAM,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC,KAAK,EAAE,EAAE,YAAY,CAAC,EAAA;;oBAAnF,SAAmF,CAAA;oBAEnF,sBAAO,YAAY,EAAA;;;;CACpB"}
\ No newline at end of file
...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core';
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D; export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D;
...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core';
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
export function padToSquare(imgTensor, isCenterImage) { export function padToSquare(imgTensor, isCenterImage) {
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks'; import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks';
import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types'; import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types';
...@@ -12,6 +13,7 @@ export declare function getMediaDimensions(media: HTMLImageElement | HTMLVideoEl ...@@ -12,6 +13,7 @@ export declare function getMediaDimensions(media: HTMLImageElement | HTMLVideoEl
height: number; height: number;
}; };
export declare function bufferToImage(buf: Blob): Promise<HTMLImageElement>; export declare function bufferToImage(buf: Blob): Promise<HTMLImageElement>;
export declare function imageTensorToCanvas(imgTensor: tf.Tensor4D, canvas?: HTMLCanvasElement): Promise<HTMLCanvasElement>;
export declare function getDefaultDrawOptions(): DrawOptions; export declare function getDefaultDrawOptions(): DrawOptions;
export declare function drawBox(ctx: CanvasRenderingContext2D, x: number, y: number, w: number, h: number, options: DrawBoxOptions): void; export declare function drawBox(ctx: CanvasRenderingContext2D, x: number, y: number, w: number, h: number, options: DrawBoxOptions): void;
export declare function drawText(ctx: CanvasRenderingContext2D, x: number, y: number, text: string, options: DrawTextOptions): void; export declare function drawText(ctx: CanvasRenderingContext2D, x: number, y: number, text: string, options: DrawTextOptions): void;
......
import * as tslib_1 from "tslib";
import * as tf from '@tensorflow/tfjs-core';
export function isFloat(num) { export function isFloat(num) {
return num % 1 !== 0; return num % 1 !== 0;
} }
...@@ -55,6 +57,22 @@ export function bufferToImage(buf) { ...@@ -55,6 +57,22 @@ export function bufferToImage(buf) {
reader.readAsDataURL(buf); reader.readAsDataURL(buf);
}); });
} }
export function imageTensorToCanvas(imgTensor, canvas) {
return tslib_1.__awaiter(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, tf.toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
export function getDefaultDrawOptions() { export function getDefaultDrawOptions() {
return { return {
color: 'blue', color: 'blue',
......
...@@ -101,6 +101,10 @@ function renderNavBar(navbarId, exampleUri) { ...@@ -101,6 +101,10 @@ function renderNavBar(navbarId, exampleUri) {
name: 'Detect and Draw Faces' name: 'Detect and Draw Faces'
}, },
{ {
uri: 'face_alignment',
name: 'Face Alignment'
},
{
uri: 'detect_and_recognize_faces', uri: 'detect_and_recognize_faces',
name: 'Detect and Recognize Faces' name: 'Detect and Recognize Faces'
} }
......
...@@ -18,6 +18,7 @@ app.get('/face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'face ...@@ -18,6 +18,7 @@ app.get('/face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'face
app.get('/face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarks.html'))) app.get('/face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarks.html')))
app.get('/detect_and_draw_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawFaces.html'))) app.get('/detect_and_draw_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawFaces.html')))
app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawLandmarks.html'))) app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawLandmarks.html')))
app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html')))
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html'))) app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
app.listen(3000, () => console.log('Listening on port 3000!')) app.listen(3000, () => console.log('Listening on port 3000!'))
\ No newline at end of file
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
<script> <script>
let maxDistance = 0.8 let maxDistance = 0.8
let minConfidence = 0.7 let minConfidence = 0.7
let detectionNet, recognitionNet let detectionNet, recognitionNet, landmarkNet
let trainDescriptorsByClass = [] let trainDescriptorsByClass = []
function onIncreaseMinConfidence() { function onIncreaseMinConfidence() {
...@@ -95,16 +95,33 @@ ...@@ -95,16 +95,33 @@
const input = new faceapi.NetInput(inputImgEl) const input = new faceapi.NetInput(inputImgEl)
const detections = await detectionNet.locateFaces(input, minConfidence) const detections = await detectionNet.locateFaces(input, minConfidence)
const detectionsForSize = detections.map(det => det.forSize(width, height))
faceapi.drawDetection('overlay', detectionsForSize, { withScore: false })
const faceTensors = (await faceapi.extractFaceTensors(input, detections)) const faceTensors = (await faceapi.extractFaceTensors(input, detections))
const descriptors = await Promise.all(faceTensors.map(t => recognitionNet.computeFaceDescriptor(t)))
// detect landmarks and get the aligned face image bounding boxes
const alignedFaceBoxes = await Promise.all(faceTensors.map(
async (faceTensor, i) => {
const faceLandmarks = await landmarkNet.detectLandmarks(faceTensor)
return faceLandmarks.align(detections[i])
}
))
// free memory for face image tensors after we computed their descriptors // free memory for face image tensors after we computed their descriptors
faceTensors.forEach(t => t.dispose()) faceTensors.forEach(t => t.dispose())
const alignedFaceTensors = (await faceapi.extractFaceTensors(input, alignedFaceBoxes))
const descriptors = await Promise.all(alignedFaceTensors.map(
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
))
// free memory for face image tensors after we computed their descriptors
alignedFaceTensors.forEach(t => t.dispose())
// draw detections
const detectionsForSize = detections.map(det => det.forSize(width, height))
faceapi.drawDetection('overlay', detectionsForSize, { withScore: false })
// draw the recognition results
descriptors.forEach((descriptor, i) => { descriptors.forEach((descriptor, i) => {
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor) const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})` const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
...@@ -128,6 +145,7 @@ ...@@ -128,6 +145,7 @@
async function run() { async function run() {
detectionNet = await initFaceDetectionNet() detectionNet = await initFaceDetectionNet()
recognitionNet = await initFaceRecognitionNet() recognitionNet = await initFaceRecognitionNet()
landmarkNet = await initFaceLandmarkNet()
trainDescriptorsByClass = await initTrainDescriptorsByClass(recognitionNet, 1) trainDescriptorsByClass = await initTrainDescriptorsByClass(recognitionNet, 1)
$('#loader').hide() $('#loader').hide()
onSelectionChanged($('#selectList select').val()) onSelectionChanged($('#selectList select').val())
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="axios.min.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div class="center-content page-container">
<div id="navbar"></div>
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
let drawLines = true
let detectionNet, landmarkNet
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = new faceapi.NetInput(inputImgEl)
const locations = await detectionNet.locateFaces(input, minConfidence)
const faceImages = await faceapi.extractFaces(input.canvases[0], locations)
// detect landmarks and get the aligned face image bounding boxes
const alignedFaceBoxes = await Promise.all(faceImages.map(
async (faceCanvas, i) => {
const faceLandmarks = await landmarkNet.detectLandmarks(faceCanvas)
return faceLandmarks.align(locations[i])
}
))
const alignedFaceImages = await faceapi.extractFaces(input.canvases[0], alignedFaceBoxes)
$('#facesContainer').empty()
faceImages.forEach(async (faceCanvas, i) => {
$('#facesContainer').append(faceCanvas)
$('#facesContainer').append(alignedFaceImages[i])
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
detectionNet = await initFaceDetectionNet()
landmarkNet = await initFaceLandmarkNet()
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_alignment')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
...@@ -27,4 +27,16 @@ export class Point implements IPoint { ...@@ -27,4 +27,16 @@ export class Point implements IPoint {
public div(pt: IPoint): Point { public div(pt: IPoint): Point {
return new Point(this.x / pt.x, this.y / pt.y) return new Point(this.x / pt.x, this.y / pt.y)
} }
public abs(): Point {
return new Point(Math.abs(this.x), Math.abs(this.y))
}
public magnitude(): number {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2))
}
public floor(): Point {
return new Point(Math.floor(this.x), Math.floor(this.y))
}
} }
\ No newline at end of file
import { Point } from '../Point';
export function getCenterPoint(pts: Point[]): Point {
return pts.reduce((sum, pt) => sum.add(pt), new Point(0, 0))
.div(new Point(pts.length, pts.length))
}
\ No newline at end of file
...@@ -3,22 +3,22 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -3,22 +3,22 @@ import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { getImageTensor } from './getImageTensor'; import { getImageTensor } from './getImageTensor';
import { NetInput } from './NetInput'; import { NetInput } from './NetInput';
import { Rect } from './Rect';
import { TNetInput } from './types'; import { TNetInput } from './types';
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
export function extractFaceTensors( export function extractFaceTensors(
image: tf.Tensor | NetInput | TNetInput, image: tf.Tensor | NetInput | TNetInput,
detections: FaceDetection[] detections: Array<FaceDetection|Rect>
): tf.Tensor4D[] { ): tf.Tensor4D[] {
return tf.tidy(() => { return tf.tidy(() => {
const imgTensor = getImageTensor(image) const imgTensor = getImageTensor(image)
...@@ -26,10 +26,14 @@ export function extractFaceTensors( ...@@ -26,10 +26,14 @@ export function extractFaceTensors(
// TODO handle batches // TODO handle batches
const [batchSize, imgHeight, imgWidth, numChannels] = imgTensor.shape const [batchSize, imgHeight, imgWidth, numChannels] = imgTensor.shape
const faceTensors = detections.map(det => { const boxes = detections.map(
const { x, y, width, height } = det.forSize(imgWidth, imgHeight).getBox().floor() det => det instanceof FaceDetection
return tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]) ? det.forSize(imgWidth, imgHeight).getBox().floor()
}) : det
)
const faceTensors = boxes.map(({ x, y, width, height }) =>
tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels])
)
return faceTensors return faceTensors
}) })
......
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { Rect } from './Rect';
import { createCanvas, getContext2dOrThrow } from './utils'; import { createCanvas, getContext2dOrThrow } from './utils';
/** /**
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
export function extractFaces( export function extractFaces(
image: HTMLCanvasElement, image: HTMLCanvasElement,
detections: FaceDetection[] detections: Array<FaceDetection|Rect>
): HTMLCanvasElement[] { ): HTMLCanvasElement[] {
const ctx = getContext2dOrThrow(image) const ctx = getContext2dOrThrow(image)
return detections.map(det => { const boxes = detections.map(
const { x, y, width, height } = det.forSize(image.width, image.height).getBox().floor() det => det instanceof FaceDetection
? det.forSize(image.width, image.height).getBox().floor()
: det
)
return boxes.map(({ x, y, width, height }) => {
const faceImg = createCanvas({ width, height }) const faceImg = createCanvas({ width, height })
getContext2dOrThrow(faceImg) getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0) .putImageData(ctx.getImageData(x, y, width, height), 0, 0)
......
import { Point, IPoint } from '../Point'; import { getCenterPoint } from '../commons/getCenterPoint';
import { FaceDetection } from '../faceDetectionNet/FaceDetection';
import { Point } from '../Point';
import { Rect } from '../Rect';
import { Dimensions } from '../types'; import { Dimensions } from '../types';
// face alignment constants
const relX = 0.5
const relY = 0.43
const relScale = 0.45
export class FaceLandmarks { export class FaceLandmarks {
private _faceLandmarks: Point[]
private _imageWidth: number private _imageWidth: number
private _imageHeight: number private _imageHeight: number
private _shift: Point private _shift: Point
private _faceLandmarks: Point[]
constructor( constructor(
relativeFaceLandmarkPositions: Point[], relativeFaceLandmarkPositions: Point[],
...@@ -21,41 +29,53 @@ export class FaceLandmarks { ...@@ -21,41 +29,53 @@ export class FaceLandmarks {
) )
} }
public getPositions() { public getShift(): Point {
return new Point(this._shift.x, this._shift.y)
}
public getImageWidth(): number {
return this._imageWidth
}
public getImageHeight(): number {
return this._imageHeight
}
public getPositions(): Point[] {
return this._faceLandmarks return this._faceLandmarks
} }
public getRelativePositions() { public getRelativePositions(): Point[] {
return this._faceLandmarks.map( return this._faceLandmarks.map(
pt => pt.sub(this._shift).div(new Point(this._imageWidth, this._imageHeight)) pt => pt.sub(this._shift).div(new Point(this._imageWidth, this._imageHeight))
) )
} }
public getJawOutline() { public getJawOutline(): Point[] {
return this._faceLandmarks.slice(0, 17) return this._faceLandmarks.slice(0, 17)
} }
public getLeftEyeBrow() { public getLeftEyeBrow(): Point[] {
return this._faceLandmarks.slice(17, 22) return this._faceLandmarks.slice(17, 22)
} }
public getRightEyeBrow() { public getRightEyeBrow(): Point[] {
return this._faceLandmarks.slice(22, 27) return this._faceLandmarks.slice(22, 27)
} }
public getNose() { public getNose(): Point[] {
return this._faceLandmarks.slice(27, 36) return this._faceLandmarks.slice(27, 36)
} }
public getLeftEye() { public getLeftEye(): Point[] {
return this._faceLandmarks.slice(36, 42) return this._faceLandmarks.slice(36, 42)
} }
public getRightEye() { public getRightEye(): Point[] {
return this._faceLandmarks.slice(42, 48) return this._faceLandmarks.slice(42, 48)
} }
public getMouth() { public getMouth(): Point[] {
return this._faceLandmarks.slice(48, 68) return this._faceLandmarks.slice(48, 68)
} }
...@@ -73,4 +93,46 @@ export class FaceLandmarks { ...@@ -73,4 +93,46 @@ export class FaceLandmarks {
new Point(x, y) new Point(x, y)
) )
} }
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
public align(
detection?: Rect
): Rect {
if (detection) {
const box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection
return this.shift(box.x, box.y).align()
}
const centers = [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint)
const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers
const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude()
const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2
const size = Math.floor(eyeToMouthDist / relScale)
const refPoint = getCenterPoint(centers)
// TODO: pad in case rectangle is out of image bounds
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, size, size)
}
} }
\ No newline at end of file
...@@ -4,7 +4,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -4,7 +4,7 @@ import * as tf from '@tensorflow/tfjs-core';
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
export function padToSquare( export function padToSquare(
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks'; import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks';
import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types';
import { Point } from './Point'; import { Point } from './Point';
import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types';
export function isFloat(num: number) { export function isFloat(num: number) {
return num % 1 !== 0 return num % 1 !== 0
...@@ -68,6 +70,18 @@ export function bufferToImage(buf: Blob): Promise<HTMLImageElement> { ...@@ -68,6 +70,18 @@ export function bufferToImage(buf: Blob): Promise<HTMLImageElement> {
}) })
} }
export async function imageTensorToCanvas(
imgTensor: tf.Tensor4D,
canvas?: HTMLCanvasElement
): Promise<HTMLCanvasElement> {
const targetCanvas = canvas || document.createElement('canvas')
const [_, height, width, numChannels] = imgTensor.shape
await tf.toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)
return targetCanvas
}
export function getDefaultDrawOptions(): DrawOptions { export function getDefaultDrawOptions(): DrawOptions {
return { return {
color: 'blue', color: 'blue',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment