Commit fdf28275 by vincent

testcases for allFaces, allFacesMtcnn

parent d098169b
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose, expectPointClose } from '../../utils';
import { expectedSsdBoxes } from './expectedResults';
import { NetInput } from '../../../src/NetInput';
import { toNetInput } from '../../../src';
import * as tf from '@tensorflow/tfjs-core';
import { Point } from '../../../src/Point';
describe('allFaces', () => {
let imgEl: HTMLImageElement
let facesFaceLandmarkPositions: Point[][]
let facesFaceDescriptors: number[][]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
facesFaceLandmarkPositions = await (await fetch('base/test/data/facesFaceLandmarkPositions.json')).json()
facesFaceDescriptors = await (await fetch('base/test/data/facesFaceDescriptorsSsd.json')).json()
})
describeWithNets('computes full face descriptions', { withAllFaces: true }, ({ allFaces }) => {
const expectedScores = [0.97, 0.88, 0.83, 0.82, 0.59, 0.52]
const maxBoxDelta = 5
const maxLandmarkPointsDelta = 1
it('scores > 0.8', async () => {
const results = await allFaces(imgEl, 0.8)
expect(results.length).toEqual(4)
results.forEach(({ detection, landmarks, descriptor }, i) => {
expect(detection.getImageWidth()).toEqual(imgEl.width)
expect(detection.getImageHeight()).toEqual(imgEl.height)
expect(detection.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(detection.getBox(), expectedSsdBoxes[i], maxBoxDelta)
landmarks.getPositions().forEach((pt, j) => expectPointClose(pt, facesFaceLandmarkPositions[i][j], maxLandmarkPointsDelta))
expect(descriptor).toEqual(new Float32Array(facesFaceDescriptors[i]))
})
})
it('scores > 0.5', async () => {
const results = await allFaces(imgEl, 0.5)
expect(results.length).toEqual(6)
results.forEach(({ detection, landmarks, descriptor }, i) => {
expect(detection.getImageWidth()).toEqual(imgEl.width)
expect(detection.getImageHeight()).toEqual(imgEl.height)
expect(detection.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(detection.getBox(), expectedSsdBoxes[i], maxBoxDelta)
landmarks.getPositions().forEach((pt, j) => expectPointClose(pt, facesFaceLandmarkPositions[i][j], maxLandmarkPointsDelta))
expect(descriptor).toEqual(new Float32Array(facesFaceDescriptors[i]))
})
})
})
describeWithNets('no memory leaks', { withAllFaces: true }, ({ allFaces }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFaces(imgEl)
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl)
await expectAllTensorsReleased(async () => {
const netInput = (new NetInput([tensor])).managed()
await allFaces(netInput)
})
tensor.dispose()
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await allFaces(await toNetInput(tensor, true))
})
tensor.dispose()
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { FaceLandmarks5 } from '../../../src/mtcnn/FaceLandmarks5';
import { NetInput } from '../../../src/NetInput';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectMtcnnResults } from './expectedResults';
describe('allFacesMtcnn', () => {
let imgEl: HTMLImageElement
let facesFaceDescriptors: number[][]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
facesFaceDescriptors = await (await fetch('base/test/data/facesFaceDescriptorsMtcnn.json')).json()
})
describeWithNets('computes full face descriptions', { withAllFacesMtcnn: true }, ({ allFacesMtcnn }) => {
it('minFaceSize = 20', async () => {
const forwardParams = {
minFaceSize: 20
}
const results = await allFacesMtcnn(imgEl, forwardParams)
expect(results.length).toEqual(6)
const mtcnnResult = results.map(res => ({
faceDetection: res.detection,
faceLandmarks: res.landmarks as FaceLandmarks5
}))
expectMtcnnResults(mtcnnResult, [0, 1, 2, 3, 4, 5], 1, 1)
results.forEach(({ descriptor }, i) => {
expect(descriptor).toEqual(new Float32Array(facesFaceDescriptors[i]))
})
})
})
describeWithNets('no memory leaks', { withAllFacesMtcnn: true }, ({ allFacesMtcnn }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFacesMtcnn(imgEl)
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { FaceLandmarks5 } from '../../../src/mtcnn/FaceLandmarks5';
import { Point } from '../../../src/Point';
import { expectMaxDelta, expectPointClose, expectRectClose } from '../../utils';
export const expectedSsdBoxes = [
{ x: 48, y: 253, width: 104, height: 129 },
{ x: 260, y: 227, width: 76, height: 117 },
{ x: 466, y: 165, width: 88, height: 130 },
{ x: 234, y: 36, width: 84, height: 119 },
{ x: 577, y: 65, width: 84, height: 105 },
{ x: 84, y: 14, width: 79, height: 132 }
]
export const expectedMtcnnBoxes = [
{ x: 70, y: 21, width: 112, height: 112 },
{ x: 36, y: 250, width: 133, height: 132 },
{ x: 221, y: 43, width: 112, height: 111 },
{ x: 247, y: 231, width: 106, height: 107 },
{ x: 566, y: 67, width: 104, height: 104 },
{ x: 451, y: 176, width: 122, height: 122 }
]
export const expectedMtcnnFaceLandmarks = [
[new Point(117, 58), new Point(156, 63), new Point(141, 86), new Point(109, 98), new Point(147, 104)],
[new Point(82, 292), new Point(134, 304), new Point(104, 330), new Point(72, 342), new Point(120, 353)],
[new Point(261, 82), new Point(306, 83), new Point(282, 113), new Point(257, 124), new Point(306, 126)],
[new Point(277, 273), new Point(318, 273), new Point(295, 300), new Point(279, 311), new Point(316, 313)],
[new Point(607, 110), new Point(645, 115), new Point(626, 138), new Point(601, 144), new Point(639, 150)],
[new Point(489, 224), new Point(534, 223), new Point(507, 250), new Point(493, 271), new Point(530, 270)]
]
export function expectMtcnnResults(
results: { faceDetection: faceapi.FaceDetection, faceLandmarks: faceapi.FaceLandmarks5 }[],
boxOrder: number[],
maxBoxDelta: number,
maxLandmarkPointsDelta: number
) {
results.forEach((result, i) => {
const { faceDetection, faceLandmarks } = result
expect(faceDetection instanceof faceapi.FaceDetection).toBe(true)
expect(faceLandmarks instanceof faceapi.FaceLandmarks5).toBe(true)
expectRectClose(faceDetection.getBox(), expectedMtcnnBoxes[boxOrder[i]], maxBoxDelta)
faceLandmarks.getPositions().forEach((pt, j) => expectPointClose(pt, expectedMtcnnFaceLandmarks[boxOrder[i]][j], maxLandmarkPointsDelta))
expectMaxDelta(faceDetection.getScore(), 0.99, 0.01)
})
}
\ No newline at end of file
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils'; import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { expectedSsdBoxes } from './expectedResults';
const expectedBoxes = [
{ x: 48, y: 253, width: 104, height: 129 },
{ x: 260, y: 227, width: 76, height: 117 },
{ x: 466, y: 165, width: 88, height: 130 },
{ x: 234, y: 36, width: 84, height: 119 },
{ x: 577, y: 65, width: 84, height: 105 },
{ x: 84, y: 14, width: 79, height: 132 }
]
describe('faceDetectionNet', () => { describe('faceDetectionNet', () => {
...@@ -32,7 +24,7 @@ describe('faceDetectionNet', () => { ...@@ -32,7 +24,7 @@ describe('faceDetectionNet', () => {
expect(det.getImageWidth()).toEqual(imgEl.width) expect(det.getImageWidth()).toEqual(imgEl.width)
expect(det.getImageHeight()).toEqual(imgEl.height) expect(det.getImageHeight()).toEqual(imgEl.height)
expect(det.getScore()).toBeCloseTo(expectedScores[i], 2) expect(det.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(det.getBox(), expectedBoxes[i], maxBoxDelta) expectRectClose(det.getBox(), expectedSsdBoxes[i], maxBoxDelta)
}) })
}) })
...@@ -44,7 +36,7 @@ describe('faceDetectionNet', () => { ...@@ -44,7 +36,7 @@ describe('faceDetectionNet', () => {
expect(det.getImageWidth()).toEqual(imgEl.width) expect(det.getImageWidth()).toEqual(imgEl.width)
expect(det.getImageHeight()).toEqual(imgEl.height) expect(det.getImageHeight()).toEqual(imgEl.height)
expect(det.getScore()).toBeCloseTo(expectedScores[i], 2) expect(det.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(det.getBox(), expectedBoxes[i], maxBoxDelta) expectRectClose(det.getBox(), expectedSsdBoxes[i], maxBoxDelta)
}) })
}) })
...@@ -63,7 +55,7 @@ describe('faceDetectionNet', () => { ...@@ -63,7 +55,7 @@ describe('faceDetectionNet', () => {
expect(det.getImageWidth()).toEqual(imgEl.width) expect(det.getImageWidth()).toEqual(imgEl.width)
expect(det.getImageHeight()).toEqual(imgEl.height) expect(det.getImageHeight()).toEqual(imgEl.height)
expect(det.getScore()).toBeCloseTo(expectedScores[i], 2) expect(det.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(det.getBox(), expectedBoxes[i], maxBoxDelta) expectRectClose(det.getBox(), expectedSsdBoxes[i], maxBoxDelta)
}) })
}) })
...@@ -75,7 +67,7 @@ describe('faceDetectionNet', () => { ...@@ -75,7 +67,7 @@ describe('faceDetectionNet', () => {
expect(det.getImageWidth()).toEqual(imgEl.width) expect(det.getImageWidth()).toEqual(imgEl.width)
expect(det.getImageHeight()).toEqual(imgEl.height) expect(det.getImageHeight()).toEqual(imgEl.height)
expect(det.getScore()).toBeCloseTo(expectedScores[i], 2) expect(det.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(det.getBox(), expectedBoxes[i], maxBoxDelta) expectRectClose(det.getBox(), expectedSsdBoxes[i], maxBoxDelta)
}) })
}) })
......
...@@ -4,7 +4,7 @@ import * as faceapi from '../../../src'; ...@@ -4,7 +4,7 @@ import * as faceapi from '../../../src';
import { isTensor3D } from '../../../src/commons/isTensor'; import { isTensor3D } from '../../../src/commons/isTensor';
import { Point } from '../../../src/Point'; import { Point } from '../../../src/Point';
import { Dimensions, TMediaElement } from '../../../src/types'; import { Dimensions, TMediaElement } from '../../../src/types';
import { expectMaxDelta, expectAllTensorsReleased, tensor3D, describeWithNets } from '../../utils'; import { expectMaxDelta, expectAllTensorsReleased, describeWithNets } from '../../utils';
import { NetInput } from '../../../src/NetInput'; import { NetInput } from '../../../src/NetInput';
import { toNetInput } from '../../../src'; import { toNetInput } from '../../../src';
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { MtcnnResult } from '../../../src/mtcnn/types'; import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { Point } from '../../../src/Point'; import { expectMtcnnResults } from './expectedResults';
import { describeWithNets, expectAllTensorsReleased, expectPointClose, expectRectClose, expectMaxDelta } from '../../utils';
const expectedBoxes = [
{ x: 70, y: 21, width: 112, height: 112 },
{ x: 36, y: 250, width: 133, height: 132 },
{ x: 221, y: 43, width: 112, height: 111 },
{ x: 247, y: 231, width: 106, height: 107 },
{ x: 566, y: 67, width: 104, height: 104 },
{ x: 451, y: 176, width: 122, height: 122 }
]
const expectedFaceLandmarks = [
[new Point(117, 58), new Point(156, 63), new Point(141, 86), new Point(109, 98), new Point(147, 104)],
[new Point(82, 292), new Point(134, 304), new Point(104, 330), new Point(72, 342), new Point(120, 353)],
[new Point(261, 82), new Point(306, 83), new Point(282, 113), new Point(257, 124), new Point(306, 126)],
[new Point(277, 273), new Point(318, 273), new Point(295, 300), new Point(279, 311), new Point(316, 313)],
[new Point(607, 110), new Point(645, 115), new Point(626, 138), new Point(601, 144), new Point(639, 150)],
[new Point(489, 224), new Point(534, 223), new Point(507, 250), new Point(493, 271), new Point(530, 270)]
]
describe('mtcnn', () => { describe('mtcnn', () => {
...@@ -32,21 +14,6 @@ describe('mtcnn', () => { ...@@ -32,21 +14,6 @@ describe('mtcnn', () => {
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => { describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
function expectResults(
results: MtcnnResult[],
boxOrder: number[],
maxBoxDelta: number,
maxLandmarkPointsDelta: number
) {
results.forEach((result, i) => {
const { faceDetection, faceLandmarks } = result
expect(faceDetection instanceof faceapi.FaceDetection).toBe(true)
expect(faceLandmarks instanceof faceapi.FaceLandmarks5).toBe(true)
expectRectClose(faceDetection.getBox(), expectedBoxes[boxOrder[i]], maxBoxDelta)
faceLandmarks.getPositions().forEach((pt, j) => expectPointClose(pt, expectedFaceLandmarks[boxOrder[i]][j], maxLandmarkPointsDelta))
expectMaxDelta(faceDetection.getScore(), 0.99, 0.01)
})
}
it('minFaceSize = 20, finds all faces', async () => { it('minFaceSize = 20, finds all faces', async () => {
const forwardParams = { const forwardParams = {
...@@ -55,7 +22,7 @@ describe('mtcnn', () => { ...@@ -55,7 +22,7 @@ describe('mtcnn', () => {
const results = await mtcnn.forward(imgEl, forwardParams) const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
expectResults(results, [0, 1, 2, 3, 4, 5], 1, 1) expectMtcnnResults(results, [0, 1, 2, 3, 4, 5], 1, 1)
}) })
it('minFaceSize = 80, finds all faces', async () => { it('minFaceSize = 80, finds all faces', async () => {
...@@ -66,7 +33,7 @@ describe('mtcnn', () => { ...@@ -66,7 +33,7 @@ describe('mtcnn', () => {
const results = await mtcnn.forward(imgEl, forwardParams) const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
expectResults(results, [0, 5, 3, 1, 2, 4], 12, 12) expectMtcnnResults(results, [0, 5, 3, 1, 2, 4], 12, 12)
}) })
it('all optional params passed, finds all faces', async () => { it('all optional params passed, finds all faces', async () => {
...@@ -79,7 +46,7 @@ describe('mtcnn', () => { ...@@ -79,7 +46,7 @@ describe('mtcnn', () => {
const results = await mtcnn.forward(imgEl, forwardParams) const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
expectResults(results, [5, 1, 4, 3, 2, 0], 6, 10) expectMtcnnResults(results, [5, 1, 4, 3, 2, 0], 6, 10)
}) })
it('scale steps passed, finds all faces', async () => { it('scale steps passed, finds all faces', async () => {
...@@ -89,7 +56,7 @@ describe('mtcnn', () => { ...@@ -89,7 +56,7 @@ describe('mtcnn', () => {
const results = await mtcnn.forward(imgEl, forwardParams) const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
expectResults(results, [5, 1, 3, 0, 2, 4], 7, 15) expectMtcnnResults(results, [5, 1, 3, 0, 2, 4], 7, 15)
}) })
}) })
......
...@@ -4,6 +4,8 @@ import { IRect } from '../build/Rect'; ...@@ -4,6 +4,8 @@ import { IRect } from '../build/Rect';
import * as faceapi from '../src/'; import * as faceapi from '../src/';
import { NeuralNetwork } from '../src/commons/NeuralNetwork'; import { NeuralNetwork } from '../src/commons/NeuralNetwork';
import { IPoint } from '../src/'; import { IPoint } from '../src/';
import { allFacesFactory, allFacesMtcnnFactory } from '../src/allFacesFactory';
import { allFacesMtcnnFunction, allFacesFunction } from '../src/globalApi';
export function zeros(length: number): Float32Array { export function zeros(length: number): Float32Array {
return new Float32Array(length) return new Float32Array(length)
...@@ -57,9 +59,14 @@ export type InjectNetArgs = { ...@@ -57,9 +59,14 @@ export type InjectNetArgs = {
faceLandmarkNet: faceapi.FaceLandmarkNet faceLandmarkNet: faceapi.FaceLandmarkNet
faceRecognitionNet: faceapi.FaceRecognitionNet faceRecognitionNet: faceapi.FaceRecognitionNet
mtcnn: faceapi.Mtcnn mtcnn: faceapi.Mtcnn
allFaces: allFacesFunction
allFacesMtcnn: allFacesMtcnnFunction
} }
export type DescribeWithNetsOptions = { export type DescribeWithNetsOptions = {
withAllFaces?: boolean
withAllFacesMtcnn?: boolean
withFaceDetectionNet?: WithNetOptions withFaceDetectionNet?: WithNetOptions
withFaceLandmarkNet?: WithNetOptions withFaceLandmarkNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions withFaceRecognitionNet?: WithNetOptions
...@@ -92,37 +99,43 @@ export function describeWithNets( ...@@ -92,37 +99,43 @@ export function describeWithNets(
let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet() let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet()
let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet() let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet()
let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn() let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn()
let allFaces = allFacesFactory(faceDetectionNet, faceLandmarkNet, faceRecognitionNet)
let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet)
beforeAll(async () => { beforeAll(async () => {
const { const {
withFaceDetectionNet, withFaceDetectionNet,
withFaceLandmarkNet, withFaceLandmarkNet,
withFaceRecognitionNet, withFaceRecognitionNet,
withMtcnn withMtcnn,
withAllFaces,
withAllFacesMtcnn
} = options } = options
if (withFaceDetectionNet) { if (withFaceDetectionNet || withAllFaces) {
await initNet<faceapi.FaceDetectionNet>( await initNet<faceapi.FaceDetectionNet>(
faceDetectionNet, faceDetectionNet,
!withFaceDetectionNet.quantized && 'face_detection_model.weights' !!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'face_detection_model.weights'
) )
} }
if (withFaceLandmarkNet) { if (withFaceLandmarkNet || withAllFaces) {
await initNet<faceapi.FaceLandmarkNet>( await initNet<faceapi.FaceLandmarkNet>(
faceLandmarkNet, faceLandmarkNet,
!withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights' !!withFaceLandmarkNet && !withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights'
) )
} }
if (withFaceRecognitionNet) {
if (withFaceRecognitionNet || withAllFaces || withAllFacesMtcnn) {
await initNet<faceapi.FaceRecognitionNet>( await initNet<faceapi.FaceRecognitionNet>(
faceRecognitionNet, faceRecognitionNet,
!withFaceRecognitionNet.quantized && 'face_recognition_model.weights' // TODO: figure out why quantized weights results in NaNs in testcases
'face_recognition_model.weights'
) )
} }
if (withMtcnn) { if (withMtcnn || withAllFacesMtcnn) {
await initNet<faceapi.Mtcnn>( await initNet<faceapi.Mtcnn>(
mtcnn, mtcnn,
!withMtcnn.quantized && 'mtcnn_model.weights' !!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights'
) )
} }
}) })
...@@ -134,7 +147,7 @@ export function describeWithNets( ...@@ -134,7 +147,7 @@ export function describeWithNets(
mtcnn && mtcnn.dispose() mtcnn && mtcnn.dispose()
}) })
specDefinitions({ faceDetectionNet, faceLandmarkNet, faceRecognitionNet, mtcnn }) specDefinitions({ faceDetectionNet, faceLandmarkNet, faceRecognitionNet, mtcnn, allFaces, allFacesMtcnn })
}) })
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment