Commit 397c05ae by vincent

finished test case restructuring + scripts for running specific test suites

parent 3c3c83d0
......@@ -14,6 +14,20 @@ const dataFiles = [
nocache: false
}))
const exclude = process.env.UUT
? [
'dom',
'faceLandmarkNet',
'faceRecognitionNet',
'ssdMobilenetv1',
'tinyFaceDetector',
'mtcnn',
'tinyYolov2'
]
.filter(ex => ex !== process.env.UUT)
.map(ex => `test/tests/${ex}/*.ts`)
: []
module.exports = function(config) {
config.set({
frameworks: ['jasmine', 'karma-typescript'],
......@@ -21,6 +35,7 @@ module.exports = function(config) {
'src/**/*.ts',
'test/**/*.ts'
].concat(dataFiles),
exclude,
preprocessors: {
'**/*.ts': ['karma-typescript']
},
......
......@@ -11,7 +11,13 @@
"tsc": "tsc",
"tsc-es6": "tsc --p tsconfig.es6.json",
"build": "npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
"test": "karma start"
"test": "karma start",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
"test-tinyfacedetector": "set UUT=tinyFaceDetector&& karma start",
"test-mtcnn": "set UUT=mtcnn&& karma start",
"test-tinyyolov2": "set UUT=tinyYolov2&& karma start"
},
"keywords": [
"face",
......
import { IRect } from '../src';
import { FaceDetection } from '../src/classes/FaceDetection';
import { expectRectClose, sortBoxes, sortFaceDetections } from './utils';
import { expectRectClose, sortFaceDetections } from './utils';
export function expectDetectionResults(
export function expectFaceDetections(
results: FaceDetection[],
allExpectedFaceDetections: IRect[],
expectedScores: number[],
......
......@@ -32,38 +32,6 @@ describe('faceLandmark68Net', () => {
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 1)
})
})
it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 2)
})
})
})
describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
it('computes face landmarks for squared input', async () => {
......
import { fetchImage, fetchJson, Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { describeWithNets, expectPointClose } from '../../utils';
describe('faceLandmark68Net, uncompressed', () => {
let imgEl1: HTMLImageElement
let imgElRect: HTMLImageElement
let faceLandmarkPositions1: Point[]
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 1)
})
})
it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 2)
})
})
})
})
......@@ -32,38 +32,6 @@ describe('faceLandmark68TinyNet', () => {
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 5)
})
})
it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 5)
})
})
})
describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
it('computes face landmarks for squared input', async () => {
......@@ -96,7 +64,7 @@ describe('faceLandmark68TinyNet', () => {
})
describeWithNets('batch inputs', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
describeWithNets('batch inputs', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
it('computes face landmarks for batch of image elements', async () => {
const inputs = [imgEl1, imgEl2, imgElRect]
......
import { fetchImage, fetchJson, Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { describeWithNets, expectPointClose } from '../../utils';
describe('faceLandmark68TinyNet, uncompressed', () => {
let imgEl1: HTMLImageElement
let imgElRect: HTMLImageElement
let faceLandmarkPositions1: Point[]
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 5)
})
})
it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 5)
})
})
})
})
......@@ -22,8 +22,7 @@ describe('faceRecognitionNet', () => {
faceDescriptor2 = await fetchJson<number[]>('base/test/data/faceDescriptor2.json')
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
})
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
it('computes face descriptor for squared input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
......@@ -39,26 +38,8 @@ describe('faceRecognitionNet', () => {
})
// TODO: figure out why descriptors return NaN in the test cases
/*
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
it('computes face descriptor for squared input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
expect(result.length).toEqual(128)
expect(result).toEqual(new Float32Array(faceDescriptor1))
})
it('computes face descriptor for rectangular input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgElRect) as Float32Array
expect(result.length).toEqual(128)
expect(result).toEqual(new Float32Array(faceDescriptorRect))
})
})
*/
describeWithNets('batch inputs', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
describeWithNets('batch inputs', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
it('computes face descriptors for batch of image elements', async () => {
const inputs = [imgEl1, imgEl2, imgElRect]
......@@ -113,7 +94,7 @@ describe('faceRecognitionNet', () => {
})
describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
describe('NeuralNetwork, uncompressed model', () => {
......
import { fetchImage, fetchJson } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { describeWithNets } from '../../utils';
// TODO: figure out why quantized weights results in NaNs in testcases
// apparently (net weight values differ when loading with karma)
xdescribe('faceRecognitionNet, uncompressed', () => {
let imgEl1: HTMLImageElement
let imgElRect: HTMLImageElement
let faceDescriptor1: number[]
let faceDescriptorRect: number[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceDescriptor1 = await fetchJson<number[]>('base/test/data/faceDescriptor1.json')
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
})
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
it('computes face descriptor for squared input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgEl1) as Float32Array
expect(result.length).toEqual(128)
expect(euclideanDistance(result, faceDescriptor1)).toBeLessThan(0.1)
})
it('computes face descriptor for rectangular input', async () => {
const result = await faceRecognitionNet.computeFaceDescriptor(imgElRect) as Float32Array
expect(result.length).toEqual(128)
expect(euclideanDistance(result, faceDescriptorRect)).toBeLessThan(0.1)
})
})
})
\ No newline at end of file
......@@ -16,13 +16,13 @@ export const expectedMtcnnBoxes: IRect[] = sortBoxes([
export function expectMtcnnResults(
results: FaceDetectionWithLandmarks<FaceLandmarks5>[],
expectedMtcnnFaceLandmarks: IPoint[][],
expectedScores: number[],
deltas: BoxAndLandmarksDeltas
) {
const expectedMtcnnFaceLandmarksSorted = sortByDistanceToOrigin(expectedMtcnnFaceLandmarks, obj => obj[0])
const expectedResults = expectedMtcnnBoxes
.map((detection, i) => ({ detection, landmarks: expectedMtcnnFaceLandmarksSorted[i] }))
const expectedScores = results.map(_ => 1.0)
return expectFaceDetectionsWithLandmarks<FaceLandmarks5>(results, expectedResults, expectedScores, deltas)
}
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
import { IPoint, fetchImage, fetchJson } from '../../../src';
describe('mtcnn.forward', () => {
let imgEl: HTMLImageElement
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedMtcnnLandmarks = await fetchJson<IPoint[][]>('base/test/data/mtcnnFaceLandmarkPositions.json')
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
it('minFaceSize = 20, finds all faces', async () => {
const forwardParams = {
minFaceSize: 20
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 2,
maxLandmarksDelta: 5
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 0.99, 0.99], deltas)
})
it('minFaceSize = 80, finds all faces', async () => {
const forwardParams = {
minFaceSize: 80
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 15,
maxLandmarksDelta: 13
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 0.99], deltas)
})
it('all optional params passed, finds all faces', async () => {
const forwardParams = {
maxNumScales: 10,
scaleFactor: 0.8,
scoreThresholds: [0.8, 0.8, 0.9],
minFaceSize: 20
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 8,
maxLandmarksDelta: 7
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 0.99, 1.0, 1.0], deltas)
})
it('scale steps passed, finds all faces', async () => {
const forwardParams = {
scaleSteps: [0.6, 0.4, 0.2, 0.15, 0.1, 0.08, 0.02]
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 8,
maxLandmarksDelta: 10
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], deltas)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/mtcnn_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createMtcnn(weights)
net.dispose()
})
})
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.Mtcnn()
await net.load('base/weights')
net.dispose()
})
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
import { IPoint, fetchImage, fetchJson } from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { expectedMtcnnBoxes } from './expectMtcnnResults';
import { fetchImage } from '../../../src';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
describe('mtcnn', () => {
let imgEl: HTMLImageElement
let expectedMtcnnLandmarks: IPoint[][]
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [1.0, 1.0, 1.0, 1.0, 0.99, 0.99]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedMtcnnLandmarks = await fetchJson<IPoint[][]>('base/test/data/mtcnnFaceLandmarkPositions.json')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes)
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
describeWithNets('detectAllFaces', { withAllFacesMtcnn: true }, () => {
it('minFaceSize = 20, finds all faces', async () => {
const forwardParams = {
it('detectAllFaces', async () => {
const options = new MtcnnOptions({
minFaceSize: 20
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 2,
maxLandmarksDelta: 5
}
expectMtcnnResults(results, expectedMtcnnLandmarks, deltas)
})
it('minFaceSize = 80, finds all faces', async () => {
const forwardParams = {
minFaceSize: 80
}
})
const results = await mtcnn.forward(imgEl, forwardParams)
const results = await faceapi.detectAllFaces(imgEl, options)
const maxBoxDelta = 2
expect(results.length).toEqual(6)
const deltas = {
maxBoxDelta: 15,
maxLandmarksDelta: 13
}
expectMtcnnResults(results, expectedMtcnnLandmarks, deltas)
expectFaceDetections(results, expectedMtcnnBoxes, expectedScores, maxBoxDelta)
})
it('all optional params passed, finds all faces', async () => {
const forwardParams = {
maxNumScales: 10,
scaleFactor: 0.8,
scoreThresholds: [0.8, 0.8, 0.9],
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new MtcnnOptions({
minFaceSize: 20
}
})
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
const deltas = {
maxBoxDelta: 8,
maxLandmarksDelta: 7
maxBoxDelta: 2,
maxLandmarksDelta: 6
}
expectMtcnnResults(results, expectedMtcnnLandmarks, deltas)
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('scale steps passed, finds all faces', async () => {
const forwardParams = {
scaleSteps: [0.6, 0.4, 0.2, 0.15, 0.1, 0.08, 0.02]
}
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new MtcnnOptions({
minFaceSize: 20
})
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxBoxDelta: 8,
maxLandmarksDelta: 10
maxBoxDelta: 2,
maxLandmarksDelta: 6,
maxDescriptorDelta: 0.4
}
expectMtcnnResults(results, expectedMtcnnLandmarks, deltas)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/mtcnn_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createMtcnn(weights)
net.dispose()
})
})
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.Mtcnn()
await net.load('base/weights')
net.dispose()
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, new MtcnnOptions({ minFaceSize: 200 }))
.withFaceLandmarks()
.withFaceDescriptors()
})
})
})
......
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectDetectionResults } from '../../expectDetectionResults';
import { fetchImage } from '../../../src';
import { expectedSsdBoxes } from './expectedBoxes';
describe('faceDetectionNet', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withFaceDetectionNet: { quantized: false } }, ({ faceDetectionNet }) => {
it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(3)
const expectedScores = [-1, -1, 0.98, 0.88, 0.81, -1]
const maxBoxDelta = 3
expectDetectionResults(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.57, 0.74, 0.98, 0.88, 0.81, 0.58]
const maxBoxDelta = 3
expectDetectionResults(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
})
describeWithNets('quantized weights', { withFaceDetectionNet: { quantized: true } }, ({ faceDetectionNet }) => {
it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(4)
const expectedScores = [-1, 0.81, 0.97, 0.88, 0.84, -1]
const maxBoxDelta = 4
expectDetectionResults(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
const maxBoxDelta = 5
expectDetectionResults(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/ssd_mobilenetv1_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createFaceDetectionNet(weights)
net.dispose()
})
})
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.FaceDetectionNet()
await net.load('base/weights')
net.dispose()
})
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { expectedSsdBoxes } from './expectedBoxes';
describe('ssdMobilenetv1.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => {
it('scores > 0.8', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(4)
const expectedScores = [-1, 0.81, 0.97, 0.88, 0.84, -1]
const maxBoxDelta = 4
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('scores > 0.5', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
const maxBoxDelta = 5
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.SsdMobilenetv1()
await net.load('base/weights')
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { expectedSsdBoxes } from './expectedBoxes';
describe('ssdMobilenetv1.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => {
it('scores > 0.8', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(3)
const expectedScores = [-1, -1, 0.98, 0.88, 0.81, -1]
const maxBoxDelta = 3
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('scores > 0.5', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.57, 0.74, 0.98, 0.88, 0.81, 0.58]
const maxBoxDelta = 3
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/ssd_mobilenetv1_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createSsdMobilenetv1(weights)
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { fetchImage, SsdMobilenetv1Options } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
describe('ssdMobilenetv1', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
describeWithNets('globalApi', { withAllFacesSsdMobilenetv1: true }, () => {
it('detectAllFaces', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi.detectAllFaces(imgEl, options)
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedSsdBoxes, expectedScores, maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
const deltas = {
maxBoxDelta: 5,
maxLandmarksDelta: 1
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxBoxDelta: 5,
maxLandmarksDelta: 1,
maxDescriptorDelta: 0.01
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, new SsdMobilenetv1Options())
.withFaceLandmarks()
.withFaceDescriptors()
})
})
})
})
\ No newline at end of file
import { IRect } from '../../../src';
import { sortBoxes } from '../../utils';
export const expectedTinyFaceDetectorBoxes: IRect[] = sortBoxes([
{ x: 29, y: 264, width: 139, height: 137 },
{ x: 224, y: 240, width: 147, height: 128 },
{ x: 547, y: 81, width: 136, height: 114 },
{ x: 214, y: 53, width: 124, height: 119 },
{ x: 430, y: 183, width: 162, height: 143 },
{ x: 54, y: 33, width: 134, height: 114 }
])
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
describe('tinyFaceDetector.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => {
it('inputSize 320, finds all faces', async () => {
const detections = await tinyFaceDetector.locateFaces(imgEl, { inputSize: 320 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.77, 0.75, 0.88, 0.77, 0.83, 0.85]
const maxBoxDelta = 36
expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxBoxDelta)
})
it('inputSize 416, finds all faces', async () => {
const detections = await tinyFaceDetector.locateFaces(imgEl, { inputSize: 416 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
const maxBoxDelta = 1
expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.TinyFaceDetector()
await net.load('base/weights')
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { fetchImage, TinyFaceDetectorOptions } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
describe('tinyFaceDetector', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true }, () => {
it('detectAllFaces', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi.detectAllFaces(imgEl, options)
const maxBoxDelta = 1
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
const deltas = {
maxBoxDelta: 1,
maxLandmarksDelta: 10
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxBoxDelta: 1,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, new TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
})
})
})
})
\ No newline at end of file
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { fetchImage, TinyYolov2 } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
xdescribe('tinyYolov2.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.8, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxBoxDelta = 4
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: 416 })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new TinyYolov2(false)
await net.load('base/weights_unused')
net.dispose()
})
})
})
})
\ No newline at end of file
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { createTinyYolov2, fetchImage, TinyYolov2 } from '../../../src';
import { expectDetectionResults } from '../../expectDetectionResults';
import { createTinyYolov2, fetchImage } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2', () => {
xdescribe('tinyYolov2.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement
......@@ -13,40 +13,6 @@ describe('tinyYolov2', () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.8, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxBoxDelta = 4
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: 416 })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
})
describeWithNets('uncompressed weights', { withTinyYolov2: { quantized: false, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
......@@ -56,7 +22,7 @@ describe('tinyYolov2', () => {
const maxBoxDelta = 1
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
......@@ -66,7 +32,7 @@ describe('tinyYolov2', () => {
const maxBoxDelta = 24
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
......@@ -76,36 +42,16 @@ describe('tinyYolov2', () => {
const maxBoxDelta = 24
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createTinyYolov2(weights, false)
net.dispose()
})
})
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new TinyYolov2(false)
await net.load('base/weights_unused')
net.dispose()
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createTinyYolov2(weights, false)
net.dispose()
})
})
})
......
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { createTinyYolov2, fetchImage, TinyYolov2 } from '../../../src';
import { expectDetectionResults } from '../../expectDetectionResults';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2, with separable convolutions', () => {
xdescribe('tinyYolov2.locateFaces, with separable convolutions', () => {
let imgEl: HTMLImageElement
......@@ -22,7 +22,7 @@ describe('tinyYolov2, with separable convolutions', () => {
const maxBoxDelta = 25
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
......@@ -32,7 +32,7 @@ describe('tinyYolov2, with separable convolutions', () => {
const maxBoxDelta = 34
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
......@@ -42,7 +42,7 @@ describe('tinyYolov2, with separable convolutions', () => {
const maxBoxDelta = 34
expect(detections.length).toEqual(6)
expectDetectionResults(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxBoxDelta)
})
})
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceDetectionNet, FaceRecognitionNet, IPoint, IRect, Mtcnn, NeuralNetwork, TinyYolov2 } from '../src/';
import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, NeuralNetwork, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
......@@ -96,7 +98,8 @@ export type WithTinyYolov2Options = WithNetOptions & {
}
export type InjectNetArgs = {
faceDetectionNet: FaceDetectionNet
ssdMobilenetv1: SsdMobilenetv1
tinyFaceDetector: TinyFaceDetector
faceLandmark68Net: FaceLandmark68Net
faceLandmark68TinyNet: FaceLandmark68TinyNet
faceRecognitionNet: FaceRecognitionNet
......@@ -107,9 +110,11 @@ export type InjectNetArgs = {
export type DescribeWithNetsOptions = {
withAllFacesSsdMobilenetv1?: boolean
withAllFacesTinyFaceDetector?: boolean
withAllFacesTinyYolov2?: boolean
withAllFacesMtcnn?: boolean
withFaceDetectionNet?: WithNetOptions
withSsdMobilenetv1?: WithNetOptions
withTinyFaceDetector?: WithNetOptions
withFaceLandmark68Net?: WithNetOptions
withFaceLandmark68TinyNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions
......@@ -126,11 +131,10 @@ async function initNet<TNet extends NeuralNetwork<any>>(
uncompressedFilename: string | boolean,
isUnusedModel: boolean = false
) {
await net.load(
uncompressedFilename
? await loadNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights')
)
const url = uncompressedFilename
? await loadNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights')
await net.load(url)
}
export function describeWithNets(
......@@ -140,19 +144,24 @@ export function describeWithNets(
) {
describe(description, () => {
let faceDetectionNet: FaceDetectionNet = new FaceDetectionNet()
let faceLandmark68Net: FaceLandmark68Net = new FaceLandmark68Net()
let faceLandmark68TinyNet: FaceLandmark68TinyNet = new FaceLandmark68TinyNet()
let faceRecognitionNet: FaceRecognitionNet = new FaceRecognitionNet()
let mtcnn: Mtcnn = new Mtcnn()
let tinyYolov2: TinyYolov2 = new TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv)
const {
ssdMobilenetv1,
tinyFaceDetector,
faceLandmark68Net,
faceLandmark68TinyNet,
faceRecognitionNet,
mtcnn,
tinyYolov2
} = faceapi.nets
beforeAll(async () => {
const {
withAllFacesSsdMobilenetv1,
withAllFacesTinyFaceDetector,
withAllFacesTinyYolov2,
withAllFacesMtcnn,
withFaceDetectionNet,
withSsdMobilenetv1,
withTinyFaceDetector,
withFaceLandmark68Net,
withFaceLandmark68TinyNet,
withFaceRecognitionNet,
......@@ -160,14 +169,21 @@ export function describeWithNets(
withTinyYolov2
} = options
if (withFaceDetectionNet || withAllFacesSsdMobilenetv1) {
await initNet<FaceDetectionNet>(
faceDetectionNet,
!!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'ssd_mobilenetv1_model.weights'
if (withSsdMobilenetv1 || withAllFacesSsdMobilenetv1) {
await initNet<SsdMobilenetv1>(
ssdMobilenetv1,
!!withSsdMobilenetv1 && !withSsdMobilenetv1.quantized && 'ssd_mobilenetv1_model.weights'
)
}
if (withTinyFaceDetector || withAllFacesTinyFaceDetector) {
await initNet<TinyFaceDetector>(
tinyFaceDetector,
!!withTinyFaceDetector && !withTinyFaceDetector.quantized && 'tiny_face_detector_model.weights'
)
}
if (withFaceLandmark68Net || withAllFacesSsdMobilenetv1 || withAllFacesTinyYolov2) {
if (withFaceLandmark68Net || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceLandmark68Net>(
faceLandmark68Net,
!!withFaceLandmark68Net && !withFaceLandmark68Net.quantized && 'face_landmark_68_model.weights'
......@@ -181,10 +197,11 @@ export function describeWithNets(
)
}
if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesMtcnn || withAllFacesTinyYolov2) {
if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceRecognitionNet>(
faceRecognitionNet,
// TODO: figure out why quantized weights results in NaNs in testcases
// apparently (net weight values differ when loading with karma)
'face_recognition_model.weights'
)
}
......@@ -200,21 +217,23 @@ export function describeWithNets(
await initNet<TinyYolov2>(
tinyYolov2,
!!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights',
withTinyYolov2 && withTinyYolov2.withSeparableConv === false
true
)
}
})
afterAll(() => {
faceDetectionNet && faceDetectionNet.dispose()
faceLandmark68Net && faceLandmark68Net.dispose()
faceRecognitionNet && faceRecognitionNet.dispose()
mtcnn && mtcnn.dispose(),
tinyYolov2 && tinyYolov2.dispose()
ssdMobilenetv1.isLoaded && ssdMobilenetv1.dispose()
faceLandmark68Net.isLoaded && faceLandmark68Net.dispose()
faceRecognitionNet.isLoaded && faceRecognitionNet.dispose()
mtcnn.isLoaded && mtcnn.dispose()
tinyFaceDetector.isLoaded && tinyFaceDetector.dispose()
tinyYolov2.isLoaded && tinyYolov2.dispose()
})
specDefinitions({
faceDetectionNet,
ssdMobilenetv1,
tinyFaceDetector,
faceLandmark68Net,
faceLandmark68TinyNet,
faceRecognitionNet,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment