Commit 96b41df9 by vincent

added describeWithNets test utility, which handles net creation and disposing of…

added describeWithNets test utility, which handles net creation and disposing of net params for the testbench
parent 9fef1428
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { FaceDetection } from '../../../src/faceDetectionNet/FaceDetection'; import { FaceDetection } from '../../../src/faceDetectionNet/FaceDetection';
import { IRect } from '../../../src/Rect'; import { IRect } from '../../../src/Rect';
import { expectAllTensorsReleased, expectMaxDelta } from '../../utils'; import { describeWithNets, expectAllTensorsReleased, expectMaxDelta } from '../../utils';
function expectRectClose( function expectRectClose(
result: IRect, result: IRect,
...@@ -33,19 +33,11 @@ describe('faceDetectionNet', () => { ...@@ -33,19 +33,11 @@ describe('faceDetectionNet', () => {
imgEl = await faceapi.bufferToImage(img) imgEl = await faceapi.bufferToImage(img)
}) })
describe('uncompressed weights', () => { describeWithNets('uncompressed weights', { withFaceDetectionNet: { quantized: false } }, ({ faceDetectionNet }) => {
let faceDetectionNet: faceapi.FaceDetectionNet
const expectedScores = [0.98, 0.89, 0.82, 0.75, 0.58, 0.55] const expectedScores = [0.98, 0.89, 0.82, 0.75, 0.58, 0.55]
const maxBoxDelta = 1 const maxBoxDelta = 1
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_detection_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceDetectionNet = faceapi.faceDetectionNet(weights)
})
it('scores > 0.8', async () => { it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[]
...@@ -72,18 +64,11 @@ describe('faceDetectionNet', () => { ...@@ -72,18 +64,11 @@ describe('faceDetectionNet', () => {
}) })
describe('quantized weights', () => { describeWithNets('quantized weights', { withFaceDetectionNet: { quantized: true } }, ({ faceDetectionNet }) => {
let faceDetectionNet: faceapi.FaceDetectionNet
const expectedScores = [0.97, 0.88, 0.83, 0.82, 0.59, 0.52] const expectedScores = [0.97, 0.88, 0.83, 0.82, 0.59, 0.52]
const maxBoxDelta = 5 const maxBoxDelta = 5
beforeAll(async () => {
faceDetectionNet = new faceapi.FaceDetectionNet()
await faceDetectionNet.load('base/weights')
})
it('scores > 0.8', async () => { it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[]
......
...@@ -5,7 +5,7 @@ import { isTensor3D } from '../../../src/commons/isTensor'; ...@@ -5,7 +5,7 @@ import { isTensor3D } from '../../../src/commons/isTensor';
import { FaceLandmarks } from '../../../src/faceLandmarkNet/FaceLandmarks'; import { FaceLandmarks } from '../../../src/faceLandmarkNet/FaceLandmarks';
import { Point } from '../../../src/Point'; import { Point } from '../../../src/Point';
import { Dimensions, TMediaElement } from '../../../src/types'; import { Dimensions, TMediaElement } from '../../../src/types';
import { expectMaxDelta, expectAllTensorsReleased, tensor3D } from '../../utils'; import { expectMaxDelta, expectAllTensorsReleased, tensor3D, describeWithNets } from '../../utils';
import { NetInput } from '../../../src/NetInput'; import { NetInput } from '../../../src/NetInput';
import { toNetInput } from '../../../src'; import { toNetInput } from '../../../src';
...@@ -38,15 +38,7 @@ describe('faceLandmarkNet', () => { ...@@ -38,15 +38,7 @@ describe('faceLandmarkNet', () => {
faceLandmarkPositionsRect = await (await fetch('base/test/data/faceLandmarkPositionsRect.json')).json() faceLandmarkPositionsRect = await (await fetch('base/test/data/faceLandmarkPositionsRect.json')).json()
}) })
describe('uncompressed weights', () => { describeWithNets('uncompressed weights', { withFaceLandmarkNet: { quantized: false } }, ({ faceLandmarkNet }) => {
let faceLandmarkNet: faceapi.FaceLandmarkNet
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_landmark_68_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceLandmarkNet = faceapi.faceLandmarkNet(weights)
})
it('computes face landmarks for squared input', async () => { it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1 const { width, height } = imgEl1
...@@ -78,14 +70,7 @@ describe('faceLandmarkNet', () => { ...@@ -78,14 +70,7 @@ describe('faceLandmarkNet', () => {
}) })
describe('quantized weights', () => { describeWithNets('quantized weights', { withFaceLandmarkNet: { quantized: true } }, ({ faceLandmarkNet }) => {
let faceLandmarkNet: faceapi.FaceLandmarkNet
beforeAll(async () => {
faceLandmarkNet = new faceapi.FaceLandmarkNet()
await faceLandmarkNet.load('base/weights')
})
it('computes face landmarks for squared input', async () => { it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1 const { width, height } = imgEl1
...@@ -117,15 +102,7 @@ describe('faceLandmarkNet', () => { ...@@ -117,15 +102,7 @@ describe('faceLandmarkNet', () => {
}) })
describe('batch inputs', () => { describeWithNets('batch inputs', { withFaceLandmarkNet: { quantized: false } }, ({ faceLandmarkNet }) => {
let faceLandmarkNet: faceapi.FaceLandmarkNet
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_landmark_68_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceLandmarkNet = faceapi.faceLandmarkNet(weights)
})
it('computes face landmarks for batch of image elements', async () => { it('computes face landmarks for batch of image elements', async () => {
const inputs = [imgEl1, imgEl2, imgElRect] const inputs = [imgEl1, imgEl2, imgElRect]
...@@ -229,14 +206,7 @@ describe('faceLandmarkNet', () => { ...@@ -229,14 +206,7 @@ describe('faceLandmarkNet', () => {
}) })
describe('no memory leaks', () => { describeWithNets('no memory leaks', { withFaceLandmarkNet: { quantized: true } }, ({ faceLandmarkNet }) => {
let faceLandmarkNet: faceapi.FaceLandmarkNet
beforeAll(async () => {
faceLandmarkNet = new faceapi.FaceLandmarkNet()
await faceLandmarkNet.load('base/weights')
})
describe('NeuralNetwork, uncompressed model', () => { describe('NeuralNetwork, uncompressed model', () => {
......
...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { NetInput } from '../../../src/NetInput'; import { NetInput } from '../../../src/NetInput';
import { expectAllTensorsReleased } from '../../utils'; import { expectAllTensorsReleased, describeWithNets } from '../../utils';
import { toNetInput } from '../../../src'; import { toNetInput } from '../../../src';
describe('faceRecognitionNet', () => { describe('faceRecognitionNet', () => {
...@@ -26,15 +26,7 @@ describe('faceRecognitionNet', () => { ...@@ -26,15 +26,7 @@ describe('faceRecognitionNet', () => {
faceDescriptorRect = await (await fetch('base/test/data/faceDescriptorRect.json')).json() faceDescriptorRect = await (await fetch('base/test/data/faceDescriptorRect.json')).json()
}) })
describe('uncompressed weights', () => { describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
let faceRecognitionNet: faceapi.FaceRecognitionNet
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_recognition_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceRecognitionNet = faceapi.faceRecognitionNet(weights)
})
it('computes face descriptor for squared input', async () => { it('computes face descriptor for squared input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
...@@ -52,14 +44,7 @@ describe('faceRecognitionNet', () => { ...@@ -52,14 +44,7 @@ describe('faceRecognitionNet', () => {
// TODO: figure out why descriptors return NaN in the test cases // TODO: figure out why descriptors return NaN in the test cases
/* /*
describe('quantized weights', () => { describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
let faceRecognitionNet: faceapi.FaceRecognitionNet
beforeAll(async () => {
faceRecognitionNet = new faceapi.FaceRecognitionNet()
await faceRecognitionNet.load('base/weights')
})
it('computes face descriptor for squared input', async () => { it('computes face descriptor for squared input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
...@@ -76,15 +61,7 @@ describe('faceRecognitionNet', () => { ...@@ -76,15 +61,7 @@ describe('faceRecognitionNet', () => {
}) })
*/ */
describe('batch inputs', () => { describeWithNets('batch inputs', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
let faceRecognitionNet: faceapi.FaceRecognitionNet
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_recognition_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceRecognitionNet = faceapi.faceRecognitionNet(weights)
})
it('computes face descriptors for batch of image elements', async () => { it('computes face descriptors for batch of image elements', async () => {
const inputs = [imgEl1, imgEl2, imgElRect] const inputs = [imgEl1, imgEl2, imgElRect]
...@@ -156,19 +133,7 @@ describe('faceRecognitionNet', () => { ...@@ -156,19 +133,7 @@ describe('faceRecognitionNet', () => {
}) })
describe('no memory leaks', () => { describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
let faceRecognitionNet: faceapi.FaceRecognitionNet
beforeAll(async () => {
const res = await fetch('base/weights/uncompressed/face_recognition_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
faceRecognitionNet = faceapi.faceRecognitionNet(weights)
})
afterAll(async () => {
faceRecognitionNet.dispose()
})
describe('NeuralNetwork, uncompressed model', () => { describe('NeuralNetwork, uncompressed model', () => {
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../src/';
import { NeuralNetwork } from '../src/commons/NeuralNetwork';
export function zeros(length: number): Float32Array { export function zeros(length: number): Float32Array {
return new Float32Array(length) return new Float32Array(length)
} }
...@@ -20,4 +23,85 @@ export async function expectAllTensorsReleased(fn: () => any) { ...@@ -20,4 +23,85 @@ export async function expectAllTensorsReleased(fn: () => any) {
export function tensor3D() { export function tensor3D() {
return tf.tensor3d([[[0]]]) return tf.tensor3d([[[0]]])
} }
\ No newline at end of file
export type WithNetOptions = {
quantized?: boolean
}
export type InjectNetArgs = {
faceDetectionNet: faceapi.FaceDetectionNet
faceLandmarkNet: faceapi.FaceLandmarkNet
faceRecognitionNet: faceapi.FaceRecognitionNet
}
export type DescribeWithNetsOptions = {
withFaceDetectionNet?: WithNetOptions
withFaceLandmarkNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions
}
async function loadNetWeights(uri: string): Promise<Float32Array> {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function initNet<TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename: string | boolean
) {
await net.load(
uncompressedFilename
? await loadNetWeights(`base/weights/uncompressed/${uncompressedFilename}`)
: 'base/weights'
)
}
export function describeWithNets(
description: string,
options: DescribeWithNetsOptions,
specDefinitions: (nets: InjectNetArgs) => void
) {
describe(description, () => {
let faceDetectionNet: faceapi.FaceDetectionNet = new faceapi.FaceDetectionNet()
let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet()
let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet()
beforeAll(async () => {
const {
withFaceDetectionNet,
withFaceLandmarkNet,
withFaceRecognitionNet
} = options
if (withFaceDetectionNet) {
await initNet<faceapi.FaceDetectionNet>(
faceDetectionNet,
!withFaceDetectionNet.quantized && 'face_detection_model.weights'
)
}
if (withFaceLandmarkNet) {
await initNet<faceapi.FaceLandmarkNet>(
faceLandmarkNet,
!withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights'
)
}
if (withFaceRecognitionNet) {
await initNet<faceapi.FaceRecognitionNet>(
faceRecognitionNet,
!withFaceRecognitionNet.quantized && 'face_recognition_model.weights'
)
}
})
afterAll(() => {
faceDetectionNet && faceDetectionNet.dispose()
faceLandmarkNet && faceLandmarkNet.dispose()
faceRecognitionNet && faceRecognitionNet.dispose()
})
specDefinitions({ faceDetectionNet, faceLandmarkNet, faceRecognitionNet })
})
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment