Commit 17b72bd4 by vincent

remove FaceDetectionWithLandmarks and FullFaceDescription and instead produce…

remove FaceDetectionWithLandmarks and FullFaceDescription and instead produce dynamically extensible objects
parent 387a03b4
...@@ -13,4 +13,8 @@ export class FaceDetection extends ObjectDetection implements IFaceDetecion { ...@@ -13,4 +13,8 @@ export class FaceDetection extends ObjectDetection implements IFaceDetecion {
) { ) {
super(score, score, '', relativeBox, imageDims) super(score, score, '', relativeBox, imageDims)
} }
public forSize(width: number, height: number): FaceDetection {
return super.forSize(width, height)
}
} }
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export interface IFaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> {
detection: FaceDetection,
landmarks: TFaceLandmarks
}
export class FaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
implements IFaceDetectionWithLandmarks<TFaceLandmarks> {
private _detection: FaceDetection
private _unshiftedLandmarks: TFaceLandmarks
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks
) {
this._detection = detection
this._unshiftedLandmarks = unshiftedLandmarks
}
public get detection(): FaceDetection { return this._detection }
public get unshiftedLandmarks(): TFaceLandmarks { return this._unshiftedLandmarks }
public get alignedRect(): FaceDetection {
const rect = this.landmarks.align()
const { imageDims } = this.detection
return new FaceDetection(this._detection.score, rect.rescale(imageDims.reverse()), imageDims)
}
public get landmarks(): TFaceLandmarks {
const { x, y } = this.detection.box
return this._unshiftedLandmarks.shiftBy(x, y)
}
// aliases for backward compatibily
get faceDetection(): FaceDetection { return this.detection }
get faceLandmarks(): TFaceLandmarks { return this.landmarks }
public forSize(width: number, height: number): FaceDetectionWithLandmarks<TFaceLandmarks> {
const resizedDetection = this._detection.forSize(width, height)
const resizedLandmarks = this._unshiftedLandmarks.forSize<TFaceLandmarks>(resizedDetection.box.width, resizedDetection.box.height)
return new FaceDetectionWithLandmarks<TFaceLandmarks>(resizedDetection, resizedLandmarks)
}
}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceDetectionWithLandmarks, IFaceDetectionWithLandmarks } from './FaceDetectionWithLandmarks';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export interface IFullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends IFaceDetectionWithLandmarks<TFaceLandmarks> {
detection: FaceDetection,
landmarks: TFaceLandmarks,
descriptor: Float32Array
}
export class FullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends FaceDetectionWithLandmarks<TFaceLandmarks>
implements IFullFaceDescription<TFaceLandmarks> {
private _descriptor: Float32Array
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks,
descriptor: Float32Array
) {
super(detection, unshiftedLandmarks)
this._descriptor = descriptor
}
public get descriptor(): Float32Array {
return this._descriptor
}
public forSize(width: number, height: number): FullFaceDescription<TFaceLandmarks> {
const { detection, landmarks } = super.forSize(width, height)
return new FullFaceDescription<TFaceLandmarks>(detection, landmarks, this.descriptor)
}
}
\ No newline at end of file
export * from './FaceDetection'; export * from './FaceDetection';
export * from './FaceDetectionWithLandmarks';
export * from './FaceLandmarks'; export * from './FaceLandmarks';
export * from './FaceLandmarks5'; export * from './FaceLandmarks5';
export * from './FaceLandmarks68'; export * from './FaceLandmarks68';
export * from './FaceMatch'; export * from './FaceMatch';
export * from './FullFaceDescription';
export * from './LabeledFaceDescriptors'; export * from './LabeledFaceDescriptors';
\ No newline at end of file
export type WithFaceDescriptor<TSource> = TSource & {
descriptor: Float32Array
}
export function extendWithFaceDescriptor<
TSource
> (
sourceObj: TSource,
descriptor: Float32Array
): WithFaceDescriptor<TSource> {
const extension = { descriptor }
return Object.assign({}, sourceObj, extension)
}
import { FaceDetection } from '../classes/FaceDetection';
export type WithFaceDetection<TSource> = TSource & {
detection: FaceDetection
}
export function extendWithFaceDetection<
TSource
> (
sourceObj: TSource,
detection: FaceDetection
): WithFaceDetection<TSource> {
const extension = { detection }
return Object.assign({}, sourceObj, extension)
}
// TODO
export type FaceExpressions = number[]
export type WithFaceExpressions<TSource> = TSource & {
expressions: FaceExpressions
}
export function extendWithFaceExpressions<
TSource
> (
sourceObj: TSource,
expressions: FaceExpressions
): WithFaceExpressions<TSource> {
const extension = { expressions }
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from './WithFaceDetection';
export type WithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
> = TSource & {
landmarks: TFaceLandmarks
unshiftedLandmarks: TFaceLandmarks
alignedRect: FaceDetection
}
export function extendWithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
> (
sourceObj: TSource,
unshiftedLandmarks: TFaceLandmarks
): WithFaceLandmarks<TSource, TFaceLandmarks> {
const { box: shift } = sourceObj.detection
const landmarks = unshiftedLandmarks.shiftBy<TFaceLandmarks>(shift.x, shift.y)
const rect = landmarks.align()
const { imageDims } = sourceObj.detection
const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims)
const extension = {
landmarks,
unshiftedLandmarks,
alignedRect
}
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
export * from './WithFaceDescriptor'
export * from './WithFaceDetection'
export * from './WithFaceExpressions'
export * from './WithFaceLandmarks'
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base'; import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { extractFaces, extractFaceTensors } from '../dom'; import { extractFaces, extractFaceTensors } from '../dom';
import { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask'; import { ComposableTask } from './ComposableTask';
import { nets } from './nets'; import { nets } from './nets';
export class ComputeFaceDescriptorsTaskBase<TReturn, DetectFaceLandmarksReturnType> extends ComposableTask<TReturn> { export class ComputeFaceDescriptorsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor( constructor(
protected detectFaceLandmarksTask: ComposableTask<DetectFaceLandmarksReturnType> | Promise<DetectFaceLandmarksReturnType>, protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput protected input: TNetInput
) { ) {
super() super()
} }
} }
export class ComputeAllFaceDescriptorsTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription[], FaceDetectionWithLandmarks[]> { export class ComputeAllFaceDescriptorsTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource>[], TSource[]> {
public async run(): Promise<FullFaceDescription[]> { public async run(): Promise<WithFaceDescriptor<TSource>[]> {
const facesWithLandmarks = await this.detectFaceLandmarksTask const parentResults = await this.parentTask
const alignedRects = facesWithLandmarks.map(({ alignedRect }) => alignedRect) const alignedRects = parentResults.map(({ alignedRect }) => alignedRect)
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, alignedRects) ? await extractFaceTensors(this.input, alignedRects)
: await extractFaces(this.input, alignedRects) : await extractFaces(this.input, alignedRects)
const fullFaceDescriptions = await Promise.all(facesWithLandmarks.map(async ({ detection, landmarks }, i) => { const results = await Promise.all(parentResults.map(async (parentResult, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[i]) as Float32Array const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[i]) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor) return extendWithFaceDescriptor<TSource>(parentResult, descriptor)
})) }))
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose()) alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return fullFaceDescriptions return results
} }
} }
export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription | undefined, FaceDetectionWithLandmarks | undefined> { export class ComputeSingleFaceDescriptorTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource> | undefined, TSource | undefined> {
public async run(): Promise<FullFaceDescription | undefined> { public async run(): Promise<WithFaceDescriptor<TSource> | undefined> {
const detectionWithLandmarks = await this.detectFaceLandmarksTask const parentResult = await this.parentTask
if (!detectionWithLandmarks) { if (!parentResult) {
return return
} }
const { detection, landmarks, alignedRect } = detectionWithLandmarks const { alignedRect } = parentResult
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [alignedRect]) ? await extractFaceTensors(this.input, [alignedRect])
: await extractFaces(this.input, [alignedRect]) : await extractFaces(this.input, [alignedRect])
...@@ -55,6 +60,6 @@ export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskB ...@@ -55,6 +60,6 @@ export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskB
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose()) alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FullFaceDescription(detection, landmarks, descriptor) return extendWithFaceDescriptor(parentResult, descriptor)
} }
} }
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base'; import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { extractFaces, extractFaceTensors } from '../dom'; import { extractFaces, extractFaceTensors } from '../dom';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net'; import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet'; import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask'; import { ComposableTask } from './ComposableTask';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks'; import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { nets } from './nets'; import { nets } from './nets';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> extends ComposableTask<ReturnType> { export class DetectFaceLandmarksTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor( constructor(
protected detectFacesTask: ComposableTask<DetectFacesReturnType> | Promise<DetectFacesReturnType>, protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput, protected input: TNetInput,
protected useTinyLandmarkNet: boolean protected useTinyLandmarkNet: boolean
) { ) {
...@@ -27,11 +28,14 @@ export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> exte ...@@ -27,11 +28,14 @@ export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> exte
} }
} }
export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks[], FaceDetection[]> { export class DetectAllFaceLandmarksTask<
TSource extends WithFaceDetection<{}>
> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource>[], TSource[]> {
public async run(): Promise<FaceDetectionWithLandmarks[]> { public async run(): Promise<WithFaceLandmarks<TSource>[]> {
const detections = await this.detectFacesTask const parentResults = await this.parentTask
const detections = parentResults.map(res => res.detection)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, detections) ? await extractFaceTensors(this.input, detections)
...@@ -43,25 +47,32 @@ export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<Face ...@@ -43,25 +47,32 @@ export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<Face
faces.forEach(f => f instanceof tf.Tensor && f.dispose()) faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return detections.map((detection, i) => return parentResults.map((parentResult, i) =>
new FaceDetectionWithLandmarks(detection, faceLandmarksByFace[i]) extendWithFaceLandmarks<TSource>(parentResult, faceLandmarksByFace[i])
) )
} }
withFaceDescriptors(): ComputeAllFaceDescriptorsTask { withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask(this, this.input) return new PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>>(this, this.input)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
} }
export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks | undefined, FaceDetection | undefined> { export class DetectSingleFaceLandmarksTask<
TSource extends WithFaceDetection<{}>
> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource> | undefined, TSource | undefined> {
public async run(): Promise<FaceDetectionWithLandmarks | undefined> { public async run(): Promise<WithFaceLandmarks<TSource> | undefined> {
const detection = await this.detectFacesTask const parentResult = await this.parentTask
if (!detection) { if (!parentResult) {
return return
} }
const { detection } = parentResult
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [detection]) ? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection]) : await extractFaces(this.input, [detection])
...@@ -71,13 +82,14 @@ export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<F ...@@ -71,13 +82,14 @@ export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<F
faces.forEach(f => f instanceof tf.Tensor && f.dispose()) faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FaceDetectionWithLandmarks( return extendWithFaceLandmarks<TSource>(parentResult, landmarks)
detection, }
landmarks
) withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>> {
return new PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
withFaceDescriptor(): ComputeSingleFaceDescriptorTask { withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> {
return new ComputeSingleFaceDescriptorTask(this, this.input) return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
} }
\ No newline at end of file
...@@ -2,6 +2,7 @@ import { TNetInput } from 'tfjs-image-recognition-base'; ...@@ -2,6 +2,7 @@ import { TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Options } from 'tfjs-tiny-yolov2'; import { TinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection'; import { FaceDetection } from '../classes/FaceDetection';
import { extendWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions'; import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options'; import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions'; import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
...@@ -9,6 +10,7 @@ import { ComposableTask } from './ComposableTask'; ...@@ -9,6 +10,7 @@ import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks'; import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets'; import { nets } from './nets';
import { FaceDetectionOptions } from './types'; import { FaceDetectionOptions } from './types';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> { export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
constructor( constructor(
...@@ -27,7 +29,7 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> { ...@@ -27,7 +29,7 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
if (options instanceof MtcnnOptions) { if (options instanceof MtcnnOptions) {
return (await nets.mtcnn.forward(input, options)) return (await nets.mtcnn.forward(input, options))
.map(result => result.faceDetection) .map(result => result.detection)
} }
const faceDetectionFunction = options instanceof TinyFaceDetectorOptions const faceDetectionFunction = options instanceof TinyFaceDetectorOptions
...@@ -49,10 +51,27 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> { ...@@ -49,10 +51,27 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
return faceDetectionFunction(input) return faceDetectionFunction(input)
} }
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask { private runAndExtendWithFaceDetections(): Promise<WithFaceDetection<{}>[]> {
return new DetectAllFaceLandmarksTask(this, this.input, useTinyLandmarkNet) return new Promise<WithFaceDetection<{}>[]>(async res => {
const detections = await this.run()
return res(detections.map(detection => extendWithFaceDetection({}, detection)))
})
} }
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectAllFaceLandmarksTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetections(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceDetection<{}>> {
return new PredictAllFaceExpressionsTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetections(),
this.input
)
}
} }
export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | undefined> { export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | undefined> {
...@@ -68,8 +87,26 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un ...@@ -68,8 +87,26 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un
return faceDetectionWithHighestScore; return faceDetectionWithHighestScore;
} }
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask { private runAndExtendWithFaceDetection(): Promise<WithFaceDetection<{}>> {
return new DetectSingleFaceLandmarksTask(this, this.input, useTinyLandmarkNet) return new Promise<WithFaceDetection<{}>>(async res => {
const detection = await this.run()
return detection ? res(extendWithFaceDetection<{}>({}, detection)) : null
})
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectSingleFaceLandmarksTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> {
return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(),
this.input
)
} }
} }
\ No newline at end of file
import { FaceMatch } from '../classes/FaceMatch'; import { FaceMatch } from '../classes/FaceMatch';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors'; import { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors';
import { euclideanDistance } from '../euclideanDistance'; import { euclideanDistance } from '../euclideanDistance';
import { WithFaceDescriptor } from '../factories';
export class FaceMatcher { export class FaceMatcher {
...@@ -9,7 +9,7 @@ export class FaceMatcher { ...@@ -9,7 +9,7 @@ export class FaceMatcher {
private _distanceThreshold: number private _distanceThreshold: number
constructor( constructor(
inputs: LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>, inputs: LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array | Array<LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array>,
distanceThreshold: number = 0.6 distanceThreshold: number = 0.6
) { ) {
...@@ -29,15 +29,15 @@ export class FaceMatcher { ...@@ -29,15 +29,15 @@ export class FaceMatcher {
return desc return desc
} }
if (desc instanceof FullFaceDescription) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor])
}
if (desc instanceof Float32Array) { if (desc instanceof Float32Array) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc]) return new LabeledFaceDescriptors(createUniqueLabel(), [desc])
} }
throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>`) if (desc.descriptor && desc.descriptor instanceof Float32Array) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor])
}
throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array | Array<LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array>`)
}) })
} }
......
import { TNetInput } from 'tfjs-image-recognition-base';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
export class PredictFaceExpressionsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput
) {
super()
}
}
export class PredictAllFaceExpressionsTask<
TSource extends WithFaceDetection<{}>
> extends PredictFaceExpressionsTaskBase<WithFaceExpressions<TSource>[], TSource[]> {
public async run(): Promise<WithFaceExpressions<TSource>[]> {
const parentResults = await this.parentTask
// TODO: implement me
return parentResults.map(parentResult => extendWithFaceExpressions<TSource>(parentResult, []))
}
withFaceLandmarks(): DetectAllFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectAllFaceLandmarksTask(this, this.input, false)
}
}
export class PredictSingleFaceExpressionTask<
TSource extends WithFaceDetection<{}>
> extends PredictFaceExpressionsTaskBase<WithFaceExpressions<TSource> | undefined, TSource | undefined> {
public async run(): Promise<WithFaceExpressions<TSource> | undefined> {
const parentResult = await this.parentTask
if (!parentResult) {
return
}
// TODO: implement me
return extendWithFaceExpressions(parentResult, [])
}
withFaceLandmarks(): DetectSingleFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectSingleFaceLandmarksTask(this, this.input, false)
}
}
\ No newline at end of file
import { TNetInput } from 'tfjs-image-recognition-base'; import { TNetInput } from 'tfjs-image-recognition-base';
import { ITinyYolov2Options, TinyYolov2Options } from 'tfjs-tiny-yolov2'; import { ITinyYolov2Options, TinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FullFaceDescription } from '../classes'; import { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories';
import { IMtcnnOptions, MtcnnOptions } from '../mtcnn/MtcnnOptions'; import { IMtcnnOptions, MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1'; import { SsdMobilenetv1Options } from '../ssdMobilenetv1';
import { detectAllFaces } from './detectFaces'; import { detectAllFaces } from './detectFaces';
...@@ -11,7 +11,7 @@ import { detectAllFaces } from './detectFaces'; ...@@ -11,7 +11,7 @@ import { detectAllFaces } from './detectFaces';
export async function allFacesSsdMobilenetv1( export async function allFacesSsdMobilenetv1(
input: TNetInput, input: TNetInput,
minConfidence?: number minConfidence?: number
): Promise<FullFaceDescription[]> { ): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {})) return await detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {}))
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors() .withFaceDescriptors()
...@@ -20,7 +20,7 @@ export async function allFacesSsdMobilenetv1( ...@@ -20,7 +20,7 @@ export async function allFacesSsdMobilenetv1(
export async function allFacesTinyYolov2( export async function allFacesTinyYolov2(
input: TNetInput, input: TNetInput,
forwardParams: ITinyYolov2Options = {} forwardParams: ITinyYolov2Options = {}
): Promise<FullFaceDescription[]> { ): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new TinyYolov2Options(forwardParams)) return await detectAllFaces(input, new TinyYolov2Options(forwardParams))
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors() .withFaceDescriptors()
...@@ -29,7 +29,7 @@ export async function allFacesTinyYolov2( ...@@ -29,7 +29,7 @@ export async function allFacesTinyYolov2(
export async function allFacesMtcnn( export async function allFacesMtcnn(
input: TNetInput, input: TNetInput,
forwardParams: IMtcnnOptions = {} forwardParams: IMtcnnOptions = {}
): Promise<FullFaceDescription[]> { ): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new MtcnnOptions(forwardParams)) return await detectAllFaces(input, new MtcnnOptions(forwardParams))
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors() .withFaceDescriptors()
......
...@@ -2,12 +2,13 @@ import { TNetInput } from 'tfjs-image-recognition-base'; ...@@ -2,12 +2,13 @@ import { TNetInput } from 'tfjs-image-recognition-base';
import { ITinyYolov2Options } from 'tfjs-tiny-yolov2'; import { ITinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection'; import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5'; import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net'; import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet'; import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet'; import { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { Mtcnn } from '../mtcnn/Mtcnn'; import { Mtcnn } from '../mtcnn/Mtcnn';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions'; import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1'; import { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1';
...@@ -64,7 +65,7 @@ export const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promi ...@@ -64,7 +65,7 @@ export const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promi
* @param options (optional, default: see MtcnnOptions constructor for default parameters). * @param options (optional, default: see MtcnnOptions constructor for default parameters).
* @returns Bounding box of each face with score and 5 point face landmarks. * @returns Bounding box of each face with score and 5 point face landmarks.
*/ */
export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> => export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>[]> =>
nets.mtcnn.forward(input, options) nets.mtcnn.forward(input, options)
/** /**
......
...@@ -10,6 +10,7 @@ export * from './classes/index'; ...@@ -10,6 +10,7 @@ export * from './classes/index';
export * from './dom/index' export * from './dom/index'
export * from './faceLandmarkNet/index'; export * from './faceLandmarkNet/index';
export * from './faceRecognitionNet/index'; export * from './faceRecognitionNet/index';
export * from './factories/index';
export * from './globalApi/index'; export * from './globalApi/index';
export * from './mtcnn/index'; export * from './mtcnn/index';
export * from './ssdMobilenetv1/index'; export * from './ssdMobilenetv1/index';
......
...@@ -2,8 +2,8 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,8 +2,8 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, Point, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base'; import { NetInput, NeuralNetwork, Point, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection'; import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5'; import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { extendWithFaceDetection, extendWithFaceLandmarks } from '../factories';
import { bgrToRgbTensor } from './bgrToRgbTensor'; import { bgrToRgbTensor } from './bgrToRgbTensor';
import { CELL_SIZE } from './config'; import { CELL_SIZE } from './config';
import { extractParams } from './extractParams'; import { extractParams } from './extractParams';
...@@ -14,7 +14,7 @@ import { pyramidDown } from './pyramidDown'; ...@@ -14,7 +14,7 @@ import { pyramidDown } from './pyramidDown';
import { stage1 } from './stage1'; import { stage1 } from './stage1';
import { stage2 } from './stage2'; import { stage2 } from './stage2';
import { stage3 } from './stage3'; import { stage3 } from './stage3';
import { NetParams } from './types'; import { MtcnnResult, NetParams } from './types';
export class Mtcnn extends NeuralNetwork<NetParams> { export class Mtcnn extends NeuralNetwork<NetParams> {
...@@ -25,7 +25,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -25,7 +25,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardInput( public async forwardInput(
input: NetInput, input: NetInput,
forwardParams: IMtcnnOptions = {} forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> { ): Promise<{ results: MtcnnResult[], stats: any }> {
const { params } = this const { params } = this
...@@ -101,7 +101,9 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -101,7 +101,9 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
const out3 = await stage3(inputCanvas, out2.boxes, scoreThresholds[2], params.onet, stats) const out3 = await stage3(inputCanvas, out2.boxes, scoreThresholds[2], params.onet, stats)
stats.total_stage3 = Date.now() - ts stats.total_stage3 = Date.now() - ts
const results = out3.boxes.map((box, idx) => new FaceDetectionWithLandmarks<FaceLandmarks5>( const results = out3.boxes.map((box, idx) => extendWithFaceLandmarks(
extendWithFaceDetection<{}>(
{},
new FaceDetection( new FaceDetection(
out3.scores[idx], out3.scores[idx],
new Rect( new Rect(
...@@ -114,6 +116,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -114,6 +116,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
height, height,
width width
} }
)
), ),
new FaceLandmarks5( new FaceLandmarks5(
out3.points[idx].map(pt => pt.sub(new Point(box.left, box.top)).div(new Point(box.width, box.height))), out3.points[idx].map(pt => pt.sub(new Point(box.left, box.top)).div(new Point(box.width, box.height))),
...@@ -127,7 +130,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -127,7 +130,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forward( public async forward(
input: TNetInput, input: TNetInput,
forwardParams: IMtcnnOptions = {} forwardParams: IMtcnnOptions = {}
): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> { ): Promise<MtcnnResult[]> {
return ( return (
await this.forwardInput( await this.forwardInput(
await toNetInput(input), await toNetInput(input),
...@@ -139,7 +142,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -139,7 +142,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardWithStats( public async forwardWithStats(
input: TNetInput, input: TNetInput,
forwardParams: IMtcnnOptions = {} forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> { ): Promise<{ results: MtcnnResult[], stats: any }> {
return this.forwardInput( return this.forwardInput(
await toNetInput(input), await toNetInput(input),
forwardParams forwardParams
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, FCParams } from 'tfjs-tiny-yolov2'; import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5'; import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { WithFaceDetection, WithFaceLandmarks } from '../factories';
export type SharedParams = { export type SharedParams = {
conv1: ConvParams conv1: ConvParams
...@@ -40,3 +40,5 @@ export type NetParams = { ...@@ -40,3 +40,5 @@ export type NetParams = {
rnet: RNetParams rnet: RNetParams
onet: ONetParams onet: ONetParams
} }
export type MtcnnResult = WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>
import { IDimensions } from 'tfjs-image-recognition-base';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks } from './classes/FaceLandmarks';
import { extendWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks } from './factories/WithFaceLandmarks';
export function resizeResults<T>(obj: T, { width, height }: IDimensions): T {
const hasLandmarks = obj['unshiftedLandmarks'] && obj['unshiftedLandmarks'] instanceof FaceLandmarks
const hasDetection = obj['detection'] && obj['detection'] instanceof FaceDetection
if (hasLandmarks) {
const resizedDetection = obj['detection'].forSize(width, height)
const resizedLandmarks = obj['unshiftedLandmarks'].forSize(resizedDetection.box.width, resizedDetection.box.height)
return extendWithFaceLandmarks(extendWithFaceDetection(obj as any, resizedDetection), resizedLandmarks)
}
if (hasDetection) {
return extendWithFaceDetection(obj as any, obj['detection'].forSize(width, height))
}
if (obj instanceof FaceLandmarks || obj instanceof FaceDetection) {
return (obj as any).forSize(width, height)
}
return obj
}
\ No newline at end of file
import { FaceDetectionWithLandmarks } from '../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks } from '../src/classes/FaceLandmarks'; import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmarks68 } from '../src/classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../src/classes/FaceLandmarks68';
import { ExpectedFaceDetectionWithLandmarks, expectPointClose, expectRectClose, sortByFaceDetection } from './utils'; import { WithFaceDetection } from '../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks';
import { ExpectedFaceDetectionWithLandmarks, expectPointsClose, expectRectClose, sortByFaceDetection } from './utils';
export type BoxAndLandmarksDeltas = { export type BoxAndLandmarksDeltas = {
maxScoreDelta: number maxScoreDelta: number
...@@ -10,7 +11,7 @@ export type BoxAndLandmarksDeltas = { ...@@ -10,7 +11,7 @@ export type BoxAndLandmarksDeltas = {
} }
export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>( export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(
results: FaceDetectionWithLandmarks<TFaceLandmarks>[], results: WithFaceLandmarks<WithFaceDetection<{}>, TFaceLandmarks>[],
allExpectedFullFaceDescriptions: ExpectedFaceDetectionWithLandmarks[], allExpectedFullFaceDescriptions: ExpectedFaceDetectionWithLandmarks[],
expectedScores: number[], expectedScores: number[],
deltas: BoxAndLandmarksDeltas deltas: BoxAndLandmarksDeltas
...@@ -29,6 +30,6 @@ export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLan ...@@ -29,6 +30,6 @@ export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLan
const { detection, landmarks } = sortedResults[i] const { detection, landmarks } = sortedResults[i]
expect(Math.abs(detection.score - expected.score)).toBeLessThan(deltas.maxScoreDelta) expect(Math.abs(detection.score - expected.score)).toBeLessThan(deltas.maxScoreDelta)
expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta) expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta)
landmarks.positions.forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta)) expectPointsClose(landmarks.positions, expected.landmarks, deltas.maxLandmarksDelta)
}) })
} }
\ No newline at end of file
import { FullFaceDescription } from '../src/classes/FullFaceDescription';
import { euclideanDistance } from '../src/euclideanDistance'; import { euclideanDistance } from '../src/euclideanDistance';
import { WithFaceDescriptor } from '../src/factories/WithFaceDescriptor';
import { WithFaceDetection } from '../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks';
import { BoxAndLandmarksDeltas } from './expectFaceDetectionsWithLandmarks'; import { BoxAndLandmarksDeltas } from './expectFaceDetectionsWithLandmarks';
import { ExpectedFullFaceDescription, expectPointClose, expectRectClose, sortByFaceDetection } from './utils'; import { ExpectedFullFaceDescription, expectPointClose, expectRectClose, sortByFaceDetection } from './utils';
...@@ -8,7 +10,7 @@ export type FullFaceDescriptionDeltas = BoxAndLandmarksDeltas & { ...@@ -8,7 +10,7 @@ export type FullFaceDescriptionDeltas = BoxAndLandmarksDeltas & {
} }
export function expectFullFaceDescriptions( export function expectFullFaceDescriptions(
results: FullFaceDescription[], results: WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[],
allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[], allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[],
expectedScores: number[], expectedScores: number[],
deltas: FullFaceDescriptionDeltas deltas: FullFaceDescriptionDeltas
......
import { Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
describe('extendWithFaceDetection', () => {
it('returns WithFaceDetection', () => {
const withFaceDetection = extendWithFaceDetection({}, detection)
expect(withFaceDetection.detection).toEqual(detection)
})
it('extends source object', () => {
const srcProp = { foo: true }
const withFaceDetection = extendWithFaceDetection({ srcProp }, detection)
expect(withFaceDetection.detection).toEqual(detection)
expect(withFaceDetection.srcProp).toEqual(srcProp)
})
})
import { Point, Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks';
const detection = new FaceDetection(1.0, new Rect(0.5, 0.5, 0.5, 0.5), { width: 100, height: 100 })
const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 })
const makeSrcObjectWithFaceDetection = <T> (srcObject: T) => extendWithFaceDetection(srcObject, detection)
describe('extendWithFaceDetection', () => {
it('returns WithFaceLandmarks', () => {
const srcObj = {}
const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj)
const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks)
expect(withFaceLandmarks.detection).toEqual(detection)
expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks)
expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true)
expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true)
})
it('extends source object', () => {
const srcObj = { srcProp: { foo: true } }
const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj)
const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks)
expect(withFaceLandmarks.srcProp).toEqual(srcObj.srcProp)
expect(withFaceLandmarks.detection).toEqual(detection)
expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks)
expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true)
expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true)
})
})
import { IPoint, IRect } from '../../../src'; import { IPoint, IRect } from '../../../src';
import { FaceDetectionWithLandmarks } from '../../../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../../../src/classes/FaceLandmarks5'; import { FaceLandmarks5 } from '../../../src/classes/FaceLandmarks5';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks';
import { BoxAndLandmarksDeltas, expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { BoxAndLandmarksDeltas, expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { sortBoxes, sortByDistanceToOrigin } from '../../utils'; import { sortBoxes, sortByDistanceToOrigin } from '../../utils';
...@@ -14,7 +15,7 @@ export const expectedMtcnnBoxes: IRect[] = sortBoxes([ ...@@ -14,7 +15,7 @@ export const expectedMtcnnBoxes: IRect[] = sortBoxes([
]) ])
export function expectMtcnnResults( export function expectMtcnnResults(
results: FaceDetectionWithLandmarks<FaceLandmarks5>[], results: WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>[],
expectedMtcnnFaceLandmarks: IPoint[][], expectedMtcnnFaceLandmarks: IPoint[][],
expectedScores: number[], expectedScores: number[],
deltas: BoxAndLandmarksDeltas deltas: BoxAndLandmarksDeltas
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions'; import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections'; import { expectFaceDetections } from '../../expectFaceDetections';
...@@ -92,9 +91,10 @@ describe('mtcnn', () => { ...@@ -92,9 +91,10 @@ describe('mtcnn', () => {
maxLandmarksDelta: 6, maxLandmarksDelta: 6,
maxDescriptorDelta: 0.2 maxDescriptorDelta: 0.2
} }
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions( expectFullFaceDescriptions(
[result as FullFaceDescription], result ? [result] : [],
[expectedFullFaceDescriptions[0]], [expectedFullFaceDescriptions[0]],
[expectedScores[0]], [expectedScores[0]],
deltas deltas
......
import { Point } from 'tfjs-image-recognition-base';
import { Rect } from '../../src';
import { FaceDetection } from '../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../src/factories/WithFaceLandmarks';
import { resizeResults } from '../../src/resizeResults';
import { expectPointsClose, expectRectClose } from '../utils';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 })
describe('resizeResults', () => {
it('resizes FaceDetection', () => {
const width = 200
const height = 400
const expected = detection.forSize(width, height)
const resized = resizeResults(detection, { width, height })
expect(resized.imageWidth).toEqual(width)
expect(resized.imageHeight).toEqual(height)
expectRectClose(resized.box, expected.box, 0)
})
it('resizes FaceLandmarks', () => {
const width = 200
const height = 400
const expected = unshiftedLandmarks.forSize(width, height)
const resized = resizeResults(unshiftedLandmarks, { width, height })
expect(resized.imageWidth).toEqual(width)
expect(resized.imageHeight).toEqual(height)
expectPointsClose(resized.positions, expected.positions, 0)
})
it('resizes WithFaceDetection', () => {
const width = 200
const height = 400
const expected = detection.forSize(width, height)
const resized = resizeResults(extendWithFaceDetection({}, detection), { width, height })
expect(resized.detection.imageWidth).toEqual(width)
expect(resized.detection.imageHeight).toEqual(height)
expectRectClose(resized.detection.box, expected.box, 0)
})
it('resizes WithFaceLandmarks', () => {
const width = 200
const height = 400
const expectedRect = detection.forSize(width, height)
const expectedLandmarks = unshiftedLandmarks.forSize(expectedRect.box.width, expectedRect.box.height)
const resized = resizeResults(
extendWithFaceLandmarks(
extendWithFaceDetection({}, detection),
unshiftedLandmarks
),
{ width, height }
)
expect(resized.detection.imageWidth).toEqual(width)
expect(resized.detection.imageHeight).toEqual(height)
expectRectClose(resized.detection.box, expectedRect.box, 0)
expect(resized.unshiftedLandmarks.imageWidth).toEqual(expectedRect.box.width)
expect(resized.unshiftedLandmarks.imageHeight).toEqual(expectedRect.box.height)
expectPointsClose(resized.unshiftedLandmarks.positions, expectedLandmarks.positions, 0)
})
})
...@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit ...@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('ssdMobilenetv1 - node', () => { describe('ssdMobilenetv1 - node', () => {
...@@ -90,9 +89,9 @@ describe('ssdMobilenetv1 - node', () => { ...@@ -90,9 +89,9 @@ describe('ssdMobilenetv1 - node', () => {
maxDescriptorDelta: 0.2 maxDescriptorDelta: 0.2
} }
expect(result instanceof FullFaceDescription).toBe(true) expect(!!result).toBeTruthy()
expectFullFaceDescriptions( expectFullFaceDescriptions(
[result as FullFaceDescription], result ? [result] : [],
[expectedFullFaceDescriptions[2]], [expectedFullFaceDescriptions[2]],
[expectedScores[2]], [expectedScores[2]],
deltas deltas
......
...@@ -6,7 +6,6 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; ...@@ -6,7 +6,6 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('ssdMobilenetv1', () => { describe('ssdMobilenetv1', () => {
...@@ -89,9 +88,9 @@ describe('ssdMobilenetv1', () => { ...@@ -89,9 +88,9 @@ describe('ssdMobilenetv1', () => {
maxDescriptorDelta: 0.2 maxDescriptorDelta: 0.2
} }
expect(result instanceof FullFaceDescription).toBe(true) expect(!!result).toBeTruthy()
expectFullFaceDescriptions( expectFullFaceDescriptions(
[result as FullFaceDescription], result ? [result] : [],
[expectedFullFaceDescriptions[2]], [expectedFullFaceDescriptions[2]],
[expectedScores[2]], [expectedScores[2]],
deltas deltas
......
...@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit ...@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes'; import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('tinyFaceDetector - node', () => { describe('tinyFaceDetector - node', () => {
...@@ -89,9 +88,10 @@ describe('tinyFaceDetector - node', () => { ...@@ -89,9 +88,10 @@ describe('tinyFaceDetector - node', () => {
maxLandmarksDelta: 10, maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2 maxDescriptorDelta: 0.2
} }
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions( expectFullFaceDescriptions(
[result as FullFaceDescription], result ? [result] : [],
[expectedFullFaceDescriptions[2]], [expectedFullFaceDescriptions[2]],
[expectedScores[2]], [expectedScores[2]],
deltas deltas
......
...@@ -6,7 +6,6 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; ...@@ -6,7 +6,6 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes'; import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('tinyFaceDetector', () => { describe('tinyFaceDetector', () => {
...@@ -88,9 +87,10 @@ describe('tinyFaceDetector', () => { ...@@ -88,9 +87,10 @@ describe('tinyFaceDetector', () => {
maxLandmarksDelta: 10, maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2 maxDescriptorDelta: 0.2
} }
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions( expectFullFaceDescriptions(
[result as FullFaceDescription], result ? [result] : [],
[expectedFullFaceDescriptions[2]], [expectedFullFaceDescriptions[2]],
[expectedScores[2]], [expectedScores[2]],
deltas deltas
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { getContext2dOrThrow } from 'tfjs-image-recognition-base';
import * as faceapi from '../src'; import * as faceapi from '../src';
import { createCanvasFromMedia, FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/'; import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection'; import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks'; import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net'; import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
...@@ -12,7 +11,7 @@ import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector'; ...@@ -12,7 +11,7 @@ import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
import { initNet, loadJson } from './env'; import { initNet, loadJson } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) { export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
expect(Math.abs(val1 - val2)).toBeLessThan(maxDelta) expect(Math.abs(val1 - val2)).toBeLessThanOrEqual(maxDelta)
} }
export async function expectAllTensorsReleased(fn: () => any) { export async function expectAllTensorsReleased(fn: () => any) {
...@@ -30,7 +29,16 @@ export function expectPointClose( ...@@ -30,7 +29,16 @@ export function expectPointClose(
expectedPoint: IPoint, expectedPoint: IPoint,
maxDelta: number maxDelta: number
) { ) {
expect(pointDistance(result, expectedPoint)).toBeLessThan(maxDelta) expect(pointDistance(result, expectedPoint)).toBeLessThanOrEqual(maxDelta)
}
export function expectPointsClose(
results: IPoint[],
expectedPoints: IPoint[],
maxDelta: number
) {
expect(results.length).toEqual(expectedPoints.length)
results.forEach((pt, j) => expectPointClose(pt, expectedPoints[j], maxDelta))
} }
export function expectRectClose( export function expectRectClose(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment