Commit aa200d18 by vincent

build dist

parent 8263bfd2
...@@ -10,4 +10,7 @@ export declare class Point implements IPoint { ...@@ -10,4 +10,7 @@ export declare class Point implements IPoint {
sub(pt: IPoint): Point; sub(pt: IPoint): Point;
mul(pt: IPoint): Point; mul(pt: IPoint): Point;
div(pt: IPoint): Point; div(pt: IPoint): Point;
abs(): Point;
magnitude(): number;
floor(): Point;
} }
...@@ -15,6 +15,15 @@ var Point = /** @class */ (function () { ...@@ -15,6 +15,15 @@ var Point = /** @class */ (function () {
Point.prototype.div = function (pt) { Point.prototype.div = function (pt) {
return new Point(this.x / pt.x, this.y / pt.y); return new Point(this.x / pt.x, this.y / pt.y);
}; };
Point.prototype.abs = function () {
return new Point(Math.abs(this.x), Math.abs(this.y));
};
Point.prototype.magnitude = function () {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
};
Point.prototype.floor = function () {
return new Point(Math.floor(this.x), Math.floor(this.y));
};
return Point; return Point;
}()); }());
export { Point }; export { Point };
......
{"version":3,"file":"Point.js","sourceRoot":"","sources":["../src/Point.ts"],"names":[],"mappings":"AAKA;IAIE,eAAY,CAAS,EAAE,CAAS;QAC9B,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;QACV,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;IACZ,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IACH,YAAC;AAAD,CAAC,AAxBD,IAwBC"} {"version":3,"file":"Point.js","sourceRoot":"","sources":["../src/Point.ts"],"names":[],"mappings":"AAKA;IAIE,eAAY,CAAS,EAAE,CAAS;QAC9B,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;QACV,IAAI,CAAC,CAAC,GAAG,CAAC,CAAA;IACZ,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV,UAAW,EAAU;QACnB,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,GAAG,EAAE,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,mBAAG,GAAV;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAA;IACtD,CAAC;IAEM,yBAAS,GAAhB;QACE,OAAO,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC7D,CAAC;IAEM,qBAAK,GAAZ;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1D,CAAC;IACH,YAAC;AAAD,CAAC,AApCD,IAoCC"}
\ No newline at end of file \ No newline at end of file
import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks';
import { Rect } from './Rect';
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
export declare function alignFace(faceLandmarks: FaceLandmarks, detection?: Rect): Rect;
import { getCenterPoint } from './commons/getCenterPoint';
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { Rect } from './Rect';
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
export function alignFace(faceLandmarks, detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return alignFace(faceLandmarks.shift(box.x, box.y));
}
var centers = [
faceLandmarks.getLeftEye(),
faceLandmarks.getRightEye(),
faceLandmarks.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
}
//# sourceMappingURL=alignFace.js.map
\ No newline at end of file
{"version":3,"file":"alignFace.js","sourceRoot":"","sources":["../src/alignFace.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,0BAA0B,CAAC;AAC1D,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AAGjE,OAAO,EAAE,IAAI,EAAE,MAAM,QAAQ,CAAC;AAE9B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;;;;;;;;;;GAUG;AACH,MAAM,oBACJ,aAA4B,EAC5B,SAAgB;IAEhB,IAAI,SAAS,EAAE;QACb,IAAM,GAAG,GAAG,SAAS,YAAY,aAAa;YAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;YAC5B,CAAC,CAAC,SAAS,CAAA;QAEb,OAAO,SAAS,CAAC,aAAa,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,CAAA;KACpD;IAED,IAAM,OAAO,GAAG;QACd,aAAa,CAAC,UAAU,EAAE;QAC1B,aAAa,CAAC,WAAW,EAAE;QAC3B,aAAa,CAAC,QAAQ,EAAE;KACzB,CAAC,GAAG,CAAC,cAAc,CAAC,CAAA;IAEd,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;IAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;IAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;IAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;IAElD,IAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,CAAC,CAAA;IACxC,qDAAqD;IACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;IAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;IAE7D,OAAO,IAAI,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;AACnC,CAAC"}
\ No newline at end of file
import { Point } from '../Point';
export declare function getCenterPoint(pts: Point[]): Point;
import { Point } from '../Point';
export function getCenterPoint(pts) {
return pts.reduce(function (sum, pt) { return sum.add(pt); }, new Point(0, 0))
.div(new Point(pts.length, pts.length));
}
//# sourceMappingURL=getCenterPoint.js.map
\ No newline at end of file
{"version":3,"file":"getCenterPoint.js","sourceRoot":"","sources":["../../src/commons/getCenterPoint.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AAEjC,MAAM,yBAAyB,GAAY;IACzC,OAAO,GAAG,CAAC,MAAM,CAAC,UAAC,GAAG,EAAE,EAAE,IAAK,OAAA,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,EAAX,CAAW,EAAE,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;SACzD,GAAG,CAAC,IAAI,KAAK,CAAC,GAAG,CAAC,MAAM,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC,CAAA;AAC3C,CAAC"}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { NetInput } from './NetInput'; import { NetInput } from './NetInput';
import { Rect } from './Rect';
import { TNetInput } from './types'; import { TNetInput } from './types';
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
export declare function extractFaceTensors(image: tf.Tensor | NetInput | TNetInput, detections: FaceDetection[]): tf.Tensor4D[]; export declare function extractFaceTensors(image: tf.Tensor | NetInput | TNetInput, detections: Array<FaceDetection | Rect>): tf.Tensor4D[];
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { getImageTensor } from './getImageTensor'; import { getImageTensor } from './getImageTensor';
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
export function extractFaceTensors(image, detections) { export function extractFaceTensors(image, detections) {
...@@ -16,8 +16,11 @@ export function extractFaceTensors(image, detections) { ...@@ -16,8 +16,11 @@ export function extractFaceTensors(image, detections) {
var imgTensor = getImageTensor(image); var imgTensor = getImageTensor(image);
// TODO handle batches // TODO handle batches
var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3]; var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3];
var faceTensors = detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(imgWidth, imgHeight).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(imgWidth, imgHeight).getBox().floor()
: det; });
var faceTensors = boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
return tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]); return tf.slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]);
}); });
return faceTensors; return faceTensors;
......
{"version":3,"file":"extractFaceTensors.js","sourceRoot":"","sources":["../src/extractFaceTensors.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAG5C,OAAO,EAAE,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAIlD;;;;;;;;;;GAUG;AACH,MAAM,6BACJ,KAAuC,EACvC,UAA2B;IAE3B,OAAO,EAAE,CAAC,IAAI,CAAC;QACb,IAAM,SAAS,GAAG,cAAc,CAAC,KAAK,CAAC,CAAA;QAEvC,sBAAsB;QAChB,IAAA,oBAA+D,EAA9D,iBAAS,EAAE,iBAAS,EAAE,gBAAQ,EAAE,mBAAW,CAAmB;QAErE,IAAM,WAAW,GAAG,UAAU,CAAC,GAAG,CAAC,UAAA,GAAG;YAC9B,IAAA,sDAA2E,EAAzE,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM,CAAsD;YACjF,OAAO,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC,CAAA;QAC3E,CAAC,CAAC,CAAA;QAEF,OAAO,WAAW,CAAA;IACpB,CAAC,CAAC,CAAA;AACJ,CAAC"} {"version":3,"file":"extractFaceTensors.js","sourceRoot":"","sources":["../src/extractFaceTensors.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAE5C,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AACjE,OAAO,EAAE,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAKlD;;;;;;;;;GASG;AACH,MAAM,6BACJ,KAAuC,EACvC,UAAqC;IAErC,OAAO,EAAE,CAAC,IAAI,CAAC;QACb,IAAM,SAAS,GAAG,cAAc,CAAC,KAAK,CAAC,CAAA;QAEvC,sBAAsB;QAChB,IAAA,oBAA+D,EAA9D,iBAAS,EAAE,iBAAS,EAAE,gBAAQ,EAAE,mBAAW,CAAmB;QAErE,IAAM,KAAK,GAAG,UAAU,CAAC,GAAG,CAC1B,UAAA,GAAG,IAAI,OAAA,GAAG,YAAY,aAAa;YACjC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,QAAQ,EAAE,SAAS,CAAC,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;YACnD,CAAC,CAAC,GAAG,EAFA,CAEA,CACR,CAAA;QACD,IAAM,WAAW,GAAG,KAAK,CAAC,GAAG,CAAC,UAAC,EAAuB;gBAArB,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM;YAClD,OAAA,EAAE,CAAC,KAAK,CAAC,SAAS,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC;QAAlE,CAAkE,CACnE,CAAA;QAED,OAAO,WAAW,CAAA;IACpB,CAAC,CAAC,CAAA;AACJ,CAAC"}
\ No newline at end of file \ No newline at end of file
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { Rect } from './Rect';
/** /**
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
export declare function extractFaces(image: HTMLCanvasElement, detections: FaceDetection[]): HTMLCanvasElement[]; export declare function extractFaces(image: HTMLCanvasElement, detections: Array<FaceDetection | Rect>): HTMLCanvasElement[];
import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { createCanvas, getContext2dOrThrow } from './utils'; import { createCanvas, getContext2dOrThrow } from './utils';
/** /**
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
export function extractFaces(image, detections) { export function extractFaces(image, detections) {
var ctx = getContext2dOrThrow(image); var ctx = getContext2dOrThrow(image);
return detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(image.width, image.height).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(image.width, image.height).getBox().floor()
: det; });
return boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
var faceImg = createCanvas({ width: width, height: height }); var faceImg = createCanvas({ width: width, height: height });
getContext2dOrThrow(faceImg) getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0); .putImageData(ctx.getImageData(x, y, width, height), 0, 0);
......
{"version":3,"file":"extractFaces.js","sourceRoot":"","sources":["../src/extractFaces.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,SAAS,CAAC;AAE5D;;;;;;GAMG;AACH,MAAM,uBACJ,KAAwB,EACxB,UAA2B;IAE3B,IAAM,GAAG,GAAG,mBAAmB,CAAC,KAAK,CAAC,CAAA;IAEtC,OAAO,UAAU,CAAC,GAAG,CAAC,UAAA,GAAG;QACjB,IAAA,4DAAiF,EAA/E,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM,CAA4D;QACvF,IAAM,OAAO,GAAG,YAAY,CAAC,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAAC,CAAA;QAC/C,mBAAmB,CAAC,OAAO,CAAC;aACzB,YAAY,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAA;QAC5D,OAAO,OAAO,CAAA;IAChB,CAAC,CAAC,CAAA;AACJ,CAAC"} {"version":3,"file":"extractFaces.js","sourceRoot":"","sources":["../src/extractFaces.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,kCAAkC,CAAC;AAEjE,OAAO,EAAE,YAAY,EAAE,mBAAmB,EAAE,MAAM,SAAS,CAAC;AAE5D;;;;;;GAMG;AACH,MAAM,uBACJ,KAAwB,EACxB,UAAqC;IAErC,IAAM,GAAG,GAAG,mBAAmB,CAAC,KAAK,CAAC,CAAA;IAEtC,IAAM,KAAK,GAAG,UAAU,CAAC,GAAG,CAC1B,UAAA,GAAG,IAAI,OAAA,GAAG,YAAY,aAAa;QACjC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,KAAK,CAAC,KAAK,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;QACzD,CAAC,CAAC,GAAG,EAFA,CAEA,CACR,CAAA;IACD,OAAO,KAAK,CAAC,GAAG,CAAC,UAAC,EAAuB;YAArB,QAAC,EAAE,QAAC,EAAE,gBAAK,EAAE,kBAAM;QACrC,IAAM,OAAO,GAAG,YAAY,CAAC,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAAC,CAAA;QAC/C,mBAAmB,CAAC,OAAO,CAAC;aACzB,YAAY,CAAC,GAAG,CAAC,YAAY,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAA;QAC5D,OAAO,OAAO,CAAA;IAChB,CAAC,CAAC,CAAA;AACJ,CAAC"}
\ No newline at end of file \ No newline at end of file
...@@ -351,6 +351,22 @@ ...@@ -351,6 +351,22 @@
reader.readAsDataURL(buf); reader.readAsDataURL(buf);
}); });
} }
function imageTensorToCanvas(imgTensor, canvas) {
return __awaiter$1(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return __generator$1(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
function getDefaultDrawOptions() { function getDefaultDrawOptions() {
return { return {
color: 'blue', color: 'blue',
...@@ -521,7 +537,7 @@ ...@@ -521,7 +537,7 @@
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
function padToSquare(imgTensor, isCenterImage) { function padToSquare(imgTensor, isCenterImage) {
...@@ -1045,6 +1061,15 @@ ...@@ -1045,6 +1061,15 @@
Point.prototype.div = function (pt) { Point.prototype.div = function (pt) {
return new Point(this.x / pt.x, this.y / pt.y); return new Point(this.x / pt.x, this.y / pt.y);
}; };
Point.prototype.abs = function () {
return new Point(Math.abs(this.x), Math.abs(this.y));
};
Point.prototype.magnitude = function () {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2));
};
Point.prototype.floor = function () {
return new Point(Math.floor(this.x), Math.floor(this.y));
};
return Point; return Point;
}()); }());
...@@ -1097,6 +1122,15 @@ ...@@ -1097,6 +1122,15 @@
}; };
} }
function getCenterPoint(pts) {
return pts.reduce(function (sum, pt) { return sum.add(pt); }, new Point(0, 0))
.div(new Point(pts.length, pts.length));
}
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks = /** @class */ (function () { var FaceLandmarks = /** @class */ (function () {
function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) { function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) {
if (shift === void 0) { shift = new Point(0, 0); } if (shift === void 0) { shift = new Point(0, 0); }
...@@ -1106,6 +1140,15 @@ ...@@ -1106,6 +1140,15 @@
this._shift = shift; this._shift = shift;
this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); }); this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); });
} }
FaceLandmarks.prototype.getShift = function () {
return new Point(this._shift.x, this._shift.y);
};
FaceLandmarks.prototype.getImageWidth = function () {
return this._imageWidth;
};
FaceLandmarks.prototype.getImageHeight = function () {
return this._imageHeight;
};
FaceLandmarks.prototype.getPositions = function () { FaceLandmarks.prototype.getPositions = function () {
return this._faceLandmarks; return this._faceLandmarks;
}; };
...@@ -1140,6 +1183,39 @@ ...@@ -1140,6 +1183,39 @@
FaceLandmarks.prototype.shift = function (x, y) { FaceLandmarks.prototype.shift = function (x, y) {
return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y)); return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y));
}; };
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
};
return FaceLandmarks; return FaceLandmarks;
}()); }());
...@@ -1415,13 +1491,16 @@ ...@@ -1415,13 +1491,16 @@
* Extracts the image regions containing the detected faces. * Extracts the image regions containing the detected faces.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face. * @returns The Canvases of the corresponding image region for each detected face.
*/ */
function extractFaces(image, detections) { function extractFaces(image, detections) {
var ctx = getContext2dOrThrow(image); var ctx = getContext2dOrThrow(image);
return detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(image.width, image.height).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(image.width, image.height).getBox().floor()
: det; });
return boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
var faceImg = createCanvas({ width: width, height: height }); var faceImg = createCanvas({ width: width, height: height });
getContext2dOrThrow(faceImg) getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0); .putImageData(ctx.getImageData(x, y, width, height), 0, 0);
...@@ -1431,13 +1510,12 @@ ...@@ -1431,13 +1510,12 @@
/** /**
* Extracts the tensors of the image regions containing the detected faces. * Extracts the tensors of the image regions containing the detected faces.
* Returned tensors have to be disposed manually once you don't need them anymore! * Useful if you want to compute the face descriptors for the face images.
* Useful if you want to compute the face descriptors for the face * Using this method is faster then extracting a canvas for each face and
* images. Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually. * converting them to tensors individually.
* *
* @param input The image that face detection has been performed on. * @param input The image that face detection has been performed on.
* @param detections The face detection results for that image. * @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face. * @returns Tensors of the corresponding image region for each detected face.
*/ */
function extractFaceTensors(image$$1, detections) { function extractFaceTensors(image$$1, detections) {
...@@ -1445,8 +1523,11 @@ ...@@ -1445,8 +1523,11 @@
var imgTensor = getImageTensor(image$$1); var imgTensor = getImageTensor(image$$1);
// TODO handle batches // TODO handle batches
var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3]; var _a = imgTensor.shape, batchSize = _a[0], imgHeight = _a[1], imgWidth = _a[2], numChannels = _a[3];
var faceTensors = detections.map(function (det) { var boxes = detections.map(function (det) { return det instanceof FaceDetection
var _a = det.forSize(imgWidth, imgHeight).getBox().floor(), x = _a.x, y = _a.y, width = _a.width, height = _a.height; ? det.forSize(imgWidth, imgHeight).getBox().floor()
: det; });
var faceTensors = boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
return slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]); return slice(imgTensor, [0, y, x, 0], [1, height, width, numChannels]);
}); });
return faceTensors; return faceTensors;
...@@ -1470,6 +1551,7 @@ ...@@ -1470,6 +1551,7 @@
exports.createCanvasFromMedia = createCanvasFromMedia; exports.createCanvasFromMedia = createCanvasFromMedia;
exports.getMediaDimensions = getMediaDimensions; exports.getMediaDimensions = getMediaDimensions;
exports.bufferToImage = bufferToImage; exports.bufferToImage = bufferToImage;
exports.imageTensorToCanvas = imageTensorToCanvas;
exports.getDefaultDrawOptions = getDefaultDrawOptions; exports.getDefaultDrawOptions = getDefaultDrawOptions;
exports.drawBox = drawBox; exports.drawBox = drawBox;
exports.drawText = drawText; exports.drawText = drawText;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -2,9 +2,9 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,9 +2,9 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
import { FaceDetection } from './FaceDetection'; import { FaceDetection } from './FaceDetection';
export declare function faceDetectionNet(weights: Float32Array): { export declare function faceDetectionNet(weights: Float32Array): {
forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => { forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => {
boxes: tf.Tensor<tf.Rank.R2>[]; boxes: tf.Tensor<tf.Rank.R2>[];
scores: tf.Tensor<tf.Rank.R1>[]; scores: tf.Tensor<tf.Rank.R1>[];
}; };
locateFaces: (input: NetInput | tf.Tensor<tf.Rank>, minConfidence?: number, maxResults?: number) => Promise<FaceDetection[]>; locateFaces: (input: tf.Tensor<tf.Rank> | NetInput, minConfidence?: number, maxResults?: number) => Promise<FaceDetection[]>;
}; };
import { Point } from '../Point'; import { Point } from '../Point';
import { Rect } from '../Rect';
import { Dimensions } from '../types'; import { Dimensions } from '../types';
export declare class FaceLandmarks { export declare class FaceLandmarks {
private _faceLandmarks;
private _imageWidth; private _imageWidth;
private _imageHeight; private _imageHeight;
private _shift; private _shift;
private _faceLandmarks;
constructor(relativeFaceLandmarkPositions: Point[], imageDims: Dimensions, shift?: Point); constructor(relativeFaceLandmarkPositions: Point[], imageDims: Dimensions, shift?: Point);
getShift(): Point;
getImageWidth(): number;
getImageHeight(): number;
getPositions(): Point[]; getPositions(): Point[];
getRelativePositions(): Point[]; getRelativePositions(): Point[];
getJawOutline(): Point[]; getJawOutline(): Point[];
...@@ -17,4 +21,16 @@ export declare class FaceLandmarks { ...@@ -17,4 +21,16 @@ export declare class FaceLandmarks {
getMouth(): Point[]; getMouth(): Point[];
forSize(width: number, height: number): FaceLandmarks; forSize(width: number, height: number): FaceLandmarks;
shift(x: number, y: number): FaceLandmarks; shift(x: number, y: number): FaceLandmarks;
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
align(detection?: Rect): Rect;
} }
import { getCenterPoint } from '../commons/getCenterPoint';
import { FaceDetection } from '../faceDetectionNet/FaceDetection';
import { Point } from '../Point'; import { Point } from '../Point';
import { Rect } from '../Rect';
// face alignment constants
var relX = 0.5;
var relY = 0.43;
var relScale = 0.45;
var FaceLandmarks = /** @class */ (function () { var FaceLandmarks = /** @class */ (function () {
function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) { function FaceLandmarks(relativeFaceLandmarkPositions, imageDims, shift) {
if (shift === void 0) { shift = new Point(0, 0); } if (shift === void 0) { shift = new Point(0, 0); }
...@@ -8,6 +15,15 @@ var FaceLandmarks = /** @class */ (function () { ...@@ -8,6 +15,15 @@ var FaceLandmarks = /** @class */ (function () {
this._shift = shift; this._shift = shift;
this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); }); this._faceLandmarks = relativeFaceLandmarkPositions.map(function (pt) { return pt.mul(new Point(width, height)).add(shift); });
} }
FaceLandmarks.prototype.getShift = function () {
return new Point(this._shift.x, this._shift.y);
};
FaceLandmarks.prototype.getImageWidth = function () {
return this._imageWidth;
};
FaceLandmarks.prototype.getImageHeight = function () {
return this._imageHeight;
};
FaceLandmarks.prototype.getPositions = function () { FaceLandmarks.prototype.getPositions = function () {
return this._faceLandmarks; return this._faceLandmarks;
}; };
...@@ -42,6 +58,39 @@ var FaceLandmarks = /** @class */ (function () { ...@@ -42,6 +58,39 @@ var FaceLandmarks = /** @class */ (function () {
FaceLandmarks.prototype.shift = function (x, y) { FaceLandmarks.prototype.shift = function (x, y) {
return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y)); return new FaceLandmarks(this.getRelativePositions(), { width: this._imageWidth, height: this._imageHeight }, new Point(x, y));
}; };
/**
* Aligns the face landmarks after face detection from the relative positions of the faces
* bounding box, or it's current shift. This function should be used to align the face images
* after face detection has been performed, before they are passed to the face recognition net.
* This will make the computed face descriptor more accurate.
*
* @param detection (optional) The bounding box of the face or the face detection result. If
* no argument was passed the position of the face landmarks are assumed to be relative to
* it's current shift.
* @returns The bounding box of the aligned face.
*/
FaceLandmarks.prototype.align = function (detection) {
if (detection) {
var box = detection instanceof FaceDetection
? detection.getBox().floor()
: detection;
return this.shift(box.x, box.y).align();
}
var centers = [
this.getLeftEye(),
this.getRightEye(),
this.getMouth()
].map(getCenterPoint);
var leftEyeCenter = centers[0], rightEyeCenter = centers[1], mouthCenter = centers[2];
var distToMouth = function (pt) { return mouthCenter.sub(pt).magnitude(); };
var eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;
var size = Math.floor(eyeToMouthDist / relScale);
var refPoint = getCenterPoint(centers);
// TODO: pad in case rectangle is out of image bounds
var x = Math.floor(Math.max(0, refPoint.x - (relX * size)));
var y = Math.floor(Math.max(0, refPoint.y - (relY * size)));
return new Rect(x, y, size, size);
};
return FaceLandmarks; return FaceLandmarks;
}()); }());
export { FaceLandmarks }; export { FaceLandmarks };
......
{"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAU,MAAM,UAAU,CAAC;AAGzC;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,uCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,mCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,6BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IACH,oBAAC;AAAD,CAAC,AAxED,IAwEC"} {"version":3,"file":"FaceLandmarks.js","sourceRoot":"","sources":["../../src/faceLandmarkNet/FaceLandmarks.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAE,MAAM,2BAA2B,CAAC;AAC3D,OAAO,EAAE,aAAa,EAAE,MAAM,mCAAmC,CAAC;AAClE,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AACjC,OAAO,EAAE,IAAI,EAAE,MAAM,SAAS,CAAC;AAG/B,2BAA2B;AAC3B,IAAM,IAAI,GAAG,GAAG,CAAA;AAChB,IAAM,IAAI,GAAG,IAAI,CAAA;AACjB,IAAM,QAAQ,GAAG,IAAI,CAAA;AAErB;IAME,uBACE,6BAAsC,EACtC,SAAqB,EACrB,KAA8B;QAA9B,sBAAA,EAAA,YAAmB,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC;QAEtB,IAAA,uBAAK,EAAE,yBAAM,CAAc;QACnC,IAAI,CAAC,WAAW,GAAG,KAAK,CAAA;QACxB,IAAI,CAAC,YAAY,GAAG,MAAM,CAAA;QAC1B,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,CAAC,cAAc,GAAG,6BAA6B,CAAC,GAAG,CACrD,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,EAA3C,CAA2C,CAClD,CAAA;IACH,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAA;IAChD,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,WAAW,CAAA;IACzB,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,YAAY,CAAA;IAC1B,CAAC;IAEM,oCAAY,GAAnB;QACE,OAAO,IAAI,CAAC,cAAc,CAAA;IAC5B,CAAC;IAEM,4CAAoB,GAA3B;QAAA,iBAIC;QAHC,OAAO,IAAI,CAAC,cAAc,CAAC,GAAG,CAC5B,UAAA,EAAE,IAAI,OAAA,EAAE,CAAC,GAAG,CAAC,KAAI,CAAC,MAAM,CAAC,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,KAAI,CAAC,WAAW,EAAE,KAAI,CAAC,YAAY,CAAC,CAAC,EAAvE,CAAuE,CAC9E,CAAA;IACH,CAAC;IAEM,qCAAa,GAApB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAA;IACzC,CAAC;IAEM,sCAAc,GAArB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,uCAAe,GAAtB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,kCAAU,GAAjB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,mCAAW,GAAlB;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,cAAc,CAAC,KAAK,CAAC,EAAE,EAAE,EAAE,CAAC,CAAA;IAC1C,CAAC;IAEM,+BAAO,GAAd,UAAe,KAAa,EAAE,MAAc;QAC1C,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,OAAA,EAAE,MAAM,QAAA,EAAE,CAClB,CAAA;IACH,CAAC;IAEM,6BAAK,GAAZ,UAAa,CAAS,EAAE,CAAS;QAC/B,OAAO,IAAI,aAAa,CACtB,IAAI,CAAC,oBAAoB,EAAE,EAC3B,EAAE,KAAK,EAAE,IAAI,CAAC,WAAW,EAAE,MAAM,EAAE,IAAI,CAAC,YAAY,EAAE,EACtD,IAAI,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAChB,CAAA;IACH,CAAC;IAED;;;;;;;;;;OAUG;IACI,6BAAK,GAAZ,UACE,SAAgB;QAEhB,IAAI,SAAS,EAAE;YACb,IAAM,GAAG,GAAG,SAAS,YAAY,aAAa;gBAC5C,CAAC,CAAC,SAAS,CAAC,MAAM,EAAE,CAAC,KAAK,EAAE;gBAC5B,CAAC,CAAC,SAAS,CAAA;YAEb,OAAO,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,EAAE,CAAA;SACxC;QAED,IAAM,OAAO,GAAG;YACd,IAAI,CAAC,UAAU,EAAE;YACjB,IAAI,CAAC,WAAW,EAAE;YAClB,IAAI,CAAC,QAAQ,EAAE;SAChB,CAAC,GAAG,CAAC,cAAc,CAAC,CAAA;QAEd,IAAA,0BAAa,EAAE,2BAAc,EAAE,wBAAW,CAAW;QAC5D,IAAM,WAAW,GAAG,UAAC,EAAS,IAAK,OAAA,WAAW,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC,SAAS,EAAE,EAA/B,CAA+B,CAAA;QAClE,IAAM,cAAc,GAAG,CAAC,WAAW,CAAC,aAAa,CAAC,GAAG,WAAW,CAAC,cAAc,CAAC,CAAC,GAAG,CAAC,CAAA;QAErF,IAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,cAAc,GAAG,QAAQ,CAAC,CAAA;QAElD,IAAM,QAAQ,GAAG,cAAc,CAAC,OAAO,CAAC,CAAA;QACxC,qDAAqD;QACrD,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAC7D,IAAM,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAC,GAAG,CAAC,IAAI,GAAG,IAAI,CAAC,CAAC,CAAC,CAAA;QAE7D,OAAO,IAAI,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;IACnC,CAAC;IACH,oBAAC;AAAD,CAAC,AA9HD,IA8HC"}
\ No newline at end of file \ No newline at end of file
...@@ -2,5 +2,5 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,5 +2,5 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
import { FaceLandmarks } from './FaceLandmarks'; import { FaceLandmarks } from './FaceLandmarks';
export declare function faceLandmarkNet(weights: Float32Array): { export declare function faceLandmarkNet(weights: Float32Array): {
detectLandmarks: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Promise<FaceLandmarks>; detectLandmarks: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Promise<FaceLandmarks>;
}; };
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../NetInput'; import { NetInput } from '../NetInput';
export declare function faceRecognitionNet(weights: Float32Array): { export declare function faceRecognitionNet(weights: Float32Array): {
computeFaceDescriptor: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Promise<Int32Array | Uint8Array | Float32Array>; computeFaceDescriptor: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Promise<Int32Array | Uint8Array | Float32Array>;
computeFaceDescriptorSync: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => Int32Array | Uint8Array | Float32Array; computeFaceDescriptorSync: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => Int32Array | Uint8Array | Float32Array;
forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | NetInput | tf.Tensor<tf.Rank>) => tf.Tensor<tf.Rank.R2>; forward: (input: string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement | (string | HTMLCanvasElement | HTMLImageElement | HTMLVideoElement)[] | tf.Tensor<tf.Rank> | NetInput) => tf.Tensor<tf.Rank.R2>;
}; };
import * as tf from '@tensorflow/tfjs-core';
export declare function imageTensorToCanvas(imgTensor: tf.Tensor4D, canvas?: HTMLCanvasElement): Promise<HTMLCanvasElement>;
import * as tslib_1 from "tslib";
import * as tf from '@tensorflow/tfjs-core';
export function imageTensorToCanvas(imgTensor, canvas) {
return tslib_1.__awaiter(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, tf.toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
//# sourceMappingURL=imageTensorToCanvas.js.map
\ No newline at end of file
{"version":3,"file":"imageTensorToCanvas.js","sourceRoot":"","sources":["../src/imageTensorToCanvas.ts"],"names":[],"mappings":";AAAA,OAAO,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAK5C,MAAM,8BACJ,SAAsB,EACtB,MAA0B;;;;;;oBAEpB,YAAY,GAAG,MAAM,IAAK,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAA;oBAE1D,KAAkC,SAAS,CAAC,KAAK,EAAhD,CAAC,QAAA,EAAE,MAAM,QAAA,EAAE,KAAK,QAAA,EAAE,WAAW,QAAA,CAAmB;oBACvD,qBAAM,EAAE,CAAC,QAAQ,CAAC,SAAS,CAAC,IAAI,CAAC,MAAM,EAAE,KAAK,EAAE,WAAW,CAAC,CAAC,KAAK,EAAE,EAAE,YAAY,CAAC,EAAA;;oBAAnF,SAAmF,CAAA;oBAEnF,sBAAO,YAAY,EAAA;;;;CACpB"}
\ No newline at end of file
...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core';
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D; export declare function padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D;
...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -3,7 +3,7 @@ import * as tf from '@tensorflow/tfjs-core';
* Pads the smaller dimension of an image tensor with zeros, such that width === height. * Pads the smaller dimension of an image tensor with zeros, such that width === height.
* *
* @param imgTensor The image tensor. * @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image * @param isCenterImage (optional, default: false) If true, add padding on both sides of the image, such that the image.
* @returns The padded tensor with width === height. * @returns The padded tensor with width === height.
*/ */
export function padToSquare(imgTensor, isCenterImage) { export function padToSquare(imgTensor, isCenterImage) {
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceDetection } from './faceDetectionNet/FaceDetection'; import { FaceDetection } from './faceDetectionNet/FaceDetection';
import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks'; import { FaceLandmarks } from './faceLandmarkNet/FaceLandmarks';
import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types'; import { Dimensions, DrawBoxOptions, DrawLandmarksOptions, DrawOptions, DrawTextOptions } from './types';
...@@ -12,6 +13,7 @@ export declare function getMediaDimensions(media: HTMLImageElement | HTMLVideoEl ...@@ -12,6 +13,7 @@ export declare function getMediaDimensions(media: HTMLImageElement | HTMLVideoEl
height: number; height: number;
}; };
export declare function bufferToImage(buf: Blob): Promise<HTMLImageElement>; export declare function bufferToImage(buf: Blob): Promise<HTMLImageElement>;
export declare function imageTensorToCanvas(imgTensor: tf.Tensor4D, canvas?: HTMLCanvasElement): Promise<HTMLCanvasElement>;
export declare function getDefaultDrawOptions(): DrawOptions; export declare function getDefaultDrawOptions(): DrawOptions;
export declare function drawBox(ctx: CanvasRenderingContext2D, x: number, y: number, w: number, h: number, options: DrawBoxOptions): void; export declare function drawBox(ctx: CanvasRenderingContext2D, x: number, y: number, w: number, h: number, options: DrawBoxOptions): void;
export declare function drawText(ctx: CanvasRenderingContext2D, x: number, y: number, text: string, options: DrawTextOptions): void; export declare function drawText(ctx: CanvasRenderingContext2D, x: number, y: number, text: string, options: DrawTextOptions): void;
......
import * as tslib_1 from "tslib";
import * as tf from '@tensorflow/tfjs-core';
export function isFloat(num) { export function isFloat(num) {
return num % 1 !== 0; return num % 1 !== 0;
} }
...@@ -55,6 +57,22 @@ export function bufferToImage(buf) { ...@@ -55,6 +57,22 @@ export function bufferToImage(buf) {
reader.readAsDataURL(buf); reader.readAsDataURL(buf);
}); });
} }
export function imageTensorToCanvas(imgTensor, canvas) {
return tslib_1.__awaiter(this, void 0, void 0, function () {
var targetCanvas, _a, _, height, width, numChannels;
return tslib_1.__generator(this, function (_b) {
switch (_b.label) {
case 0:
targetCanvas = canvas || document.createElement('canvas');
_a = imgTensor.shape, _ = _a[0], height = _a[1], width = _a[2], numChannels = _a[3];
return [4 /*yield*/, tf.toPixels(imgTensor.as3D(height, width, numChannels).toInt(), targetCanvas)];
case 1:
_b.sent();
return [2 /*return*/, targetCanvas];
}
});
});
}
export function getDefaultDrawOptions() { export function getDefaultDrawOptions() {
return { return {
color: 'blue', color: 'blue',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment