Commit 2f50aa11 by vincent

added allFaceTinyYolov2 + tiny yolov2 face recognition example

parent 3e8ae6ca
...@@ -147,6 +147,10 @@ function renderNavBar(navbarId, exampleUri) { ...@@ -147,6 +147,10 @@ function renderNavBar(navbarId, exampleUri) {
name: 'Tiny Yolov2 Face Detection Webcam' name: 'Tiny Yolov2 Face Detection Webcam'
}, },
{ {
uri: 'tiny_yolov2_face_recognition',
name: 'Tiny Yolov2 Face Recognition'
},
{
uri: 'batch_face_landmarks', uri: 'batch_face_landmarks',
name: 'Batch Face Landmarks' name: 'Batch Face Landmarks'
}, },
......
...@@ -32,6 +32,7 @@ app.get('/mtcnn_face_recognition_webcam', (req, res) => res.sendFile(path.join(v ...@@ -32,6 +32,7 @@ app.get('/mtcnn_face_recognition_webcam', (req, res) => res.sendFile(path.join(v
app.get('/tiny_yolov2_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetection.html'))) app.get('/tiny_yolov2_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetection.html')))
app.get('/tiny_yolov2_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionVideo.html'))) app.get('/tiny_yolov2_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionVideo.html')))
app.get('/tiny_yolov2_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionWebcam.html'))) app.get('/tiny_yolov2_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionWebcam.html')))
app.get('/tiny_yolov2_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceRecognition.html')))
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html'))) app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html'))) app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
......
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
} }
$(document).ready(function() { $(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection_video') renderNavBar('#navbar', 'tiny_yolov2_face_detection_webcam')
const sizeTypeSelect = $('#sizeType') const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType) sizeTypeSelect.val(sizeType)
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let maxDistance = 0.6
let useBatchProcessing = false
let trainDescriptorsByClass = []
let scoreThreshold = 0.5
let sizeType = 'lg'
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const fullFaceDescriptions = (await faceapi.allFacesTinyYolov2(inputImgEl, forwardParams, useBatchProcessing))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_recognition')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
...@@ -166,7 +166,7 @@ export class NetInput { ...@@ -166,7 +166,7 @@ export class NetInput {
} }
/** /**
* By setting the isManaged flag, all newly created tensors will be automatically * By setting the isManaged flag, all newly created tensors will be
* automatically disposed after the batch tensor has been created * automatically disposed after the batch tensor has been created
*/ */
public managed() { public managed() {
......
import { TinyYolov2 } from '.';
import { extractFaceTensors } from './extractFaceTensors'; import { extractFaceTensors } from './extractFaceTensors';
import { FaceDetection } from './FaceDetection';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet'; import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet'; import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68'; import { FaceLandmarks68 } from './faceLandmarkNet/FaceLandmarks68';
...@@ -7,6 +9,7 @@ import { FullFaceDescription } from './FullFaceDescription'; ...@@ -7,6 +9,7 @@ import { FullFaceDescription } from './FullFaceDescription';
import { Mtcnn } from './mtcnn/Mtcnn'; import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams } from './mtcnn/types'; import { MtcnnForwardParams } from './mtcnn/types';
import { Rect } from './Rect'; import { Rect } from './Rect';
import { TinyYolov2ForwardParams } from './tinyYolov2/types';
import { TNetInput } from './types'; import { TNetInput } from './types';
function computeDescriptorsFactory( function computeDescriptorsFactory(
...@@ -27,8 +30,8 @@ function computeDescriptorsFactory( ...@@ -27,8 +30,8 @@ function computeDescriptorsFactory(
} }
} }
export function allFacesFactory( function allFacesFactory(
detectionNet: FaceDetectionNet, detectFaces: (input: TNetInput) => Promise<FaceDetection[]>,
landmarkNet: FaceLandmarkNet, landmarkNet: FaceLandmarkNet,
recognitionNet: FaceRecognitionNet recognitionNet: FaceRecognitionNet
) { ) {
...@@ -36,11 +39,10 @@ export function allFacesFactory( ...@@ -36,11 +39,10 @@ export function allFacesFactory(
return async function( return async function(
input: TNetInput, input: TNetInput,
minConfidence: number = 0.8,
useBatchProcessing: boolean = false useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> { ): Promise<FullFaceDescription[]> {
const detections = await detectionNet.locateFaces(input, minConfidence) const detections = await detectFaces(input)
const faceTensors = await extractFaceTensors(input, detections) const faceTensors = await extractFaceTensors(input, detections)
const faceLandmarksByFace = useBatchProcessing const faceLandmarksByFace = useBatchProcessing
...@@ -68,6 +70,38 @@ export function allFacesFactory( ...@@ -68,6 +70,38 @@ export function allFacesFactory(
} }
} }
export function allFacesSsdMobilenetv1Factory(
ssdMobilenetv1: FaceDetectionNet,
landmarkNet: FaceLandmarkNet,
recognitionNet: FaceRecognitionNet
) {
return async function(
input: TNetInput,
minConfidence: number = 0.8,
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const detectFaces = (input: TNetInput) => ssdMobilenetv1.locateFaces(input, minConfidence)
const allFaces = allFacesFactory(detectFaces, landmarkNet, recognitionNet)
return allFaces(input, useBatchProcessing)
}
}
export function allFacesTinyYolov2Factory(
tinyYolov2: TinyYolov2,
landmarkNet: FaceLandmarkNet,
recognitionNet: FaceRecognitionNet
) {
return async function(
input: TNetInput,
forwardParams: TinyYolov2ForwardParams = {},
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const detectFaces = (input: TNetInput) => tinyYolov2.locateFaces(input, forwardParams)
const allFaces = allFacesFactory(detectFaces, landmarkNet, recognitionNet)
return allFaces(input, useBatchProcessing)
}
}
export function allFacesMtcnnFactory( export function allFacesMtcnnFactory(
mtcnn: Mtcnn, mtcnn: Mtcnn,
recognitionNet: FaceRecognitionNet recognitionNet: FaceRecognitionNet
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { allFacesFactory, allFacesMtcnnFactory } from './allFacesFactory'; import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from './allFacesFactory';
import { FaceDetection } from './FaceDetection'; import { FaceDetection } from './FaceDetection';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet'; import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet'; import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
...@@ -21,15 +21,15 @@ export const recognitionNet = new FaceRecognitionNet() ...@@ -21,15 +21,15 @@ export const recognitionNet = new FaceRecognitionNet()
// nets need more specific names, to avoid ambiguity in future // nets need more specific names, to avoid ambiguity in future
// when alternative net implementations are provided // when alternative net implementations are provided
export const nets = { export const nets = {
ssdMobilenet: detectionNet, ssdMobilenetv1: detectionNet,
faceLandmark68Net: landmarkNet, faceLandmark68Net: landmarkNet,
faceRecognitionNet: recognitionNet, faceRecognitionNet: recognitionNet,
mtcnn: new Mtcnn(), mtcnn: new Mtcnn(),
tinyYolov2: new TinyYolov2() tinyYolov2: new TinyYolov2()
} }
export function loadFaceDetectionModel(url: string) { export function loadSsdMobilenetv1Model(url: string) {
return nets.ssdMobilenet.load(url) return nets.ssdMobilenetv1.load(url)
} }
export function loadFaceLandmarkModel(url: string) { export function loadFaceLandmarkModel(url: string) {
...@@ -48,9 +48,13 @@ export function loadTinyYolov2Model(url: string) { ...@@ -48,9 +48,13 @@ export function loadTinyYolov2Model(url: string) {
return nets.tinyYolov2.load(url) return nets.tinyYolov2.load(url)
} }
export function loadFaceDetectionModel(url: string) {
return loadSsdMobilenetv1Model(url)
}
export function loadModels(url: string) { export function loadModels(url: string) {
return Promise.all([ return Promise.all([
loadFaceDetectionModel(url), loadSsdMobilenetv1Model(url),
loadFaceLandmarkModel(url), loadFaceLandmarkModel(url),
loadFaceRecognitionModel(url), loadFaceRecognitionModel(url),
loadMtcnnModel(url), loadMtcnnModel(url),
...@@ -63,7 +67,7 @@ export function locateFaces( ...@@ -63,7 +67,7 @@ export function locateFaces(
minConfidence?: number, minConfidence?: number,
maxResults?: number maxResults?: number
): Promise<FaceDetection[]> { ): Promise<FaceDetection[]> {
return nets.ssdMobilenet.locateFaces(input, minConfidence, maxResults) return nets.ssdMobilenetv1.locateFaces(input, minConfidence, maxResults)
} }
export function detectLandmarks( export function detectLandmarks(
...@@ -92,14 +96,26 @@ export function tinyYolov2( ...@@ -92,14 +96,26 @@ export function tinyYolov2(
return nets.tinyYolov2.locateFaces(input, forwardParams) return nets.tinyYolov2.locateFaces(input, forwardParams)
} }
export type allFacesFunction = ( export type allFacesSsdMobilenetv1Function = (
input: tf.Tensor | NetInput | TNetInput, input: tf.Tensor | NetInput | TNetInput,
minConfidence?: number, minConfidence?: number,
useBatchProcessing?: boolean useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]> ) => Promise<FullFaceDescription[]>
export const allFaces: allFacesFunction = allFacesFactory( export const allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function = allFacesSsdMobilenetv1Factory(
nets.ssdMobilenet, nets.ssdMobilenetv1,
nets.faceLandmark68Net,
nets.faceRecognitionNet
)
export type allFacesTinyYolov2Function = (
input: tf.Tensor | NetInput | TNetInput,
forwardParams?: TinyYolov2ForwardParams,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]>
export const allFacesTinyYolov2: allFacesTinyYolov2Function = allFacesTinyYolov2Factory(
nets.tinyYolov2,
nets.faceLandmark68Net, nets.faceLandmark68Net,
nets.faceRecognitionNet nets.faceRecognitionNet
) )
...@@ -113,4 +129,6 @@ export type allFacesMtcnnFunction = ( ...@@ -113,4 +129,6 @@ export type allFacesMtcnnFunction = (
export const allFacesMtcnn: allFacesMtcnnFunction = allFacesMtcnnFactory( export const allFacesMtcnn: allFacesMtcnnFunction = allFacesMtcnnFactory(
nets.mtcnn, nets.mtcnn,
nets.faceRecognitionNet nets.faceRecognitionNet
) )
\ No newline at end of file
export const allFaces = allFacesSsdMobilenetv1
\ No newline at end of file
...@@ -88,7 +88,7 @@ export class TinyYolov2 extends NeuralNetwork<NetParams> { ...@@ -88,7 +88,7 @@ export class TinyYolov2 extends NeuralNetwork<NetParams> {
: _inputSize : _inputSize
if (typeof inputSize !== 'number') { if (typeof inputSize !== 'number') {
throw new Error(`TinyYolov2 - unkown inputSize: ${inputSize}, expected number or one of xs | sm | md | lg`) throw new Error(`TinyYolov2 - unknown inputSize: ${inputSize}, expected number or one of xs | sm | md | lg`)
} }
const netInput = await toNetInput(input, true) const netInput = await toNetInput(input, true)
......
...@@ -4,7 +4,7 @@ export function getDefaultParams(params: TinyYolov2ForwardParams) { ...@@ -4,7 +4,7 @@ export function getDefaultParams(params: TinyYolov2ForwardParams) {
return Object.assign( return Object.assign(
{}, {},
{ {
sizeType: SizeType.MD, inputSize: SizeType.MD,
scoreThreshold: 0.5 scoreThreshold: 0.5
}, },
params params
......
...@@ -6,7 +6,7 @@ import { toNetInput } from '../../../src'; ...@@ -6,7 +6,7 @@ import { toNetInput } from '../../../src';
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { Point } from '../../../src/Point'; import { Point } from '../../../src/Point';
describe('allFaces', () => { describe('allFacesSsdMobilenetv1', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
let facesFaceLandmarkPositions: Point[][] let facesFaceLandmarkPositions: Point[][]
...@@ -19,14 +19,14 @@ describe('allFaces', () => { ...@@ -19,14 +19,14 @@ describe('allFaces', () => {
facesFaceDescriptors = await (await fetch('base/test/data/facesFaceDescriptorsSsd.json')).json() facesFaceDescriptors = await (await fetch('base/test/data/facesFaceDescriptorsSsd.json')).json()
}) })
describeWithNets('computes full face descriptions', { withAllFaces: true }, ({ allFaces }) => { describeWithNets('computes full face descriptions', { withAllFacesSsdMobilenetv1: true }, ({ allFacesSsdMobilenetv1 }) => {
const expectedScores = [0.97, 0.88, 0.83, 0.82, 0.59, 0.52] const expectedScores = [0.97, 0.88, 0.83, 0.82, 0.59, 0.52]
const maxBoxDelta = 5 const maxBoxDelta = 5
const maxLandmarkPointsDelta = 1 const maxLandmarkPointsDelta = 1
it('scores > 0.8', async () => { it('scores > 0.8', async () => {
const results = await allFaces(imgEl, 0.8) const results = await allFacesSsdMobilenetv1(imgEl, 0.8)
expect(results.length).toEqual(4) expect(results.length).toEqual(4)
results.forEach(({ detection, landmarks, descriptor }, i) => { results.forEach(({ detection, landmarks, descriptor }, i) => {
...@@ -40,7 +40,7 @@ describe('allFaces', () => { ...@@ -40,7 +40,7 @@ describe('allFaces', () => {
}) })
it('scores > 0.5', async () => { it('scores > 0.5', async () => {
const results = await allFaces(imgEl, 0.5) const results = await allFacesSsdMobilenetv1(imgEl, 0.5)
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
results.forEach(({ detection, landmarks, descriptor }, i) => { results.forEach(({ detection, landmarks, descriptor }, i) => {
...@@ -55,11 +55,11 @@ describe('allFaces', () => { ...@@ -55,11 +55,11 @@ describe('allFaces', () => {
}) })
describeWithNets('no memory leaks', { withAllFaces: true }, ({ allFaces }) => { describeWithNets('no memory leaks', { withAllFacesSsdMobilenetv1: true }, ({ allFacesSsdMobilenetv1 }) => {
it('single image element', async () => { it('single image element', async () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await allFaces(imgEl) await allFacesSsdMobilenetv1(imgEl)
}) })
}) })
...@@ -68,7 +68,7 @@ describe('allFaces', () => { ...@@ -68,7 +68,7 @@ describe('allFaces', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = (new NetInput([tensor])).managed() const netInput = (new NetInput([tensor])).managed()
await allFaces(netInput) await allFacesSsdMobilenetv1(netInput)
}) })
tensor.dispose() tensor.dispose()
...@@ -78,7 +78,7 @@ describe('allFaces', () => { ...@@ -78,7 +78,7 @@ describe('allFaces', () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await allFaces(await toNetInput(tensor, true)) await allFacesSsdMobilenetv1(await toNetInput(tensor, true))
}) })
tensor.dispose() tensor.dispose()
......
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose, expectPointClose, expectMaxDelta } from '../../utils';
import { expectedTinyYolov2SeparableConvBoxes } from './expectedResults';
import { NetInput } from '../../../src/NetInput';
import { toNetInput } from '../../../src';
import * as tf from '@tensorflow/tfjs-core';
import { Point } from '../../../src/Point';
import { SizeType } from '../../../src/tinyYolov2/types';
describe('allFacesTinyYolov2', () => {
let imgEl: HTMLImageElement
let facesFaceLandmarkPositions: Point[][]
let facesFaceDescriptors: number[][]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
facesFaceLandmarkPositions = await (await fetch('base/test/data/facesFaceLandmarkPositions.json')).json()
facesFaceDescriptors = await (await fetch('base/test/data/facesFaceDescriptorsSsd.json')).json()
})
describeWithNets('computes full face descriptions', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => {
it('SizeType.LG', async () => {
const expectedScores = [0.9, 0.9, 0.89, 0.85, 0.85, 0.85]
const maxBoxDelta = 5
const maxLandmarkPointsDelta = 10
const maxDescriptorDelta = 0.06
const results = await allFacesTinyYolov2(imgEl, { inputSize: SizeType.LG })
const detectionOrder = [0, 2, 3, 4, 1, 5]
expect(results.length).toEqual(6)
results.forEach(({ detection, landmarks, descriptor }, i) => {
expect(detection.getImageWidth()).toEqual(imgEl.width)
expect(detection.getImageHeight()).toEqual(imgEl.height)
expect(detection.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(detection.getBox(), expectedTinyYolov2SeparableConvBoxes[i], maxBoxDelta)
landmarks.getPositions().forEach((pt, j) => expectPointClose(pt, facesFaceLandmarkPositions[detectionOrder[i]][j], maxLandmarkPointsDelta))
descriptor.forEach((val, j) => expectMaxDelta(val, facesFaceDescriptors[detectionOrder[i]][j], maxDescriptorDelta))
})
})
it('SizeType.MD', async () => {
const expectedScores = [0.85, 0.85, 0.84, 0.83, 0.8, 0.8]
const maxBoxDelta = 17
const maxLandmarkPointsDelta = 16
const maxDescriptorDelta = 0.05
const results = await allFacesTinyYolov2(imgEl, { inputSize: SizeType.MD })
const boxOrder = [5, 1, 4, 3, 2, 0]
const detectionOrder = [5, 2, 1, 4, 3, 0]
expect(results.length).toEqual(6)
results.forEach(({ detection, landmarks, descriptor }, i) => {
expect(detection.getImageWidth()).toEqual(imgEl.width)
expect(detection.getImageHeight()).toEqual(imgEl.height)
expect(detection.getScore()).toBeCloseTo(expectedScores[i], 2)
expectRectClose(detection.getBox(), expectedTinyYolov2SeparableConvBoxes[boxOrder[i]], maxBoxDelta)
landmarks.getPositions().forEach((pt, j) => expectPointClose(pt, facesFaceLandmarkPositions[detectionOrder[i]][j], maxLandmarkPointsDelta))
descriptor.forEach((val, j) => expectMaxDelta(val, facesFaceDescriptors[detectionOrder[i]][j], maxDescriptorDelta))
})
})
})
describeWithNets('no memory leaks', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFacesTinyYolov2(imgEl)
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl)
await expectAllTensorsReleased(async () => {
const netInput = (new NetInput([tensor])).managed()
await allFacesTinyYolov2(netInput)
})
tensor.dispose()
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await allFacesTinyYolov2(await toNetInput(tensor, true))
})
tensor.dispose()
})
})
})
\ No newline at end of file
...@@ -4,8 +4,8 @@ import { IRect } from '../build/Rect'; ...@@ -4,8 +4,8 @@ import { IRect } from '../build/Rect';
import * as faceapi from '../src/'; import * as faceapi from '../src/';
import { NeuralNetwork } from '../src/commons/NeuralNetwork'; import { NeuralNetwork } from '../src/commons/NeuralNetwork';
import { IPoint } from '../src/'; import { IPoint } from '../src/';
import { allFacesFactory, allFacesMtcnnFactory } from '../src/allFacesFactory'; import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from '../src/allFacesFactory';
import { allFacesMtcnnFunction, allFacesFunction } from '../src/globalApi'; import { allFacesMtcnnFunction, allFacesSsdMobilenetv1Function, allFacesTinyYolov2, allFacesTinyYolov2Function } from '../src/globalApi';
export function zeros(length: number): Float32Array { export function zeros(length: number): Float32Array {
return new Float32Array(length) return new Float32Array(length)
...@@ -59,7 +59,8 @@ export type WithTinyYolov2Options = WithNetOptions & { ...@@ -59,7 +59,8 @@ export type WithTinyYolov2Options = WithNetOptions & {
} }
export type InjectNetArgs = { export type InjectNetArgs = {
allFaces: allFacesFunction allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function
allFacesTinyYolov2: allFacesTinyYolov2Function
allFacesMtcnn: allFacesMtcnnFunction allFacesMtcnn: allFacesMtcnnFunction
faceDetectionNet: faceapi.FaceDetectionNet faceDetectionNet: faceapi.FaceDetectionNet
faceLandmarkNet: faceapi.FaceLandmarkNet faceLandmarkNet: faceapi.FaceLandmarkNet
...@@ -70,7 +71,8 @@ export type InjectNetArgs = { ...@@ -70,7 +71,8 @@ export type InjectNetArgs = {
export type DescribeWithNetsOptions = { export type DescribeWithNetsOptions = {
withAllFaces?: boolean withAllFacesSsdMobilenetv1?: boolean
withAllFacesTinyYolov2?: boolean
withAllFacesMtcnn?: boolean withAllFacesMtcnn?: boolean
withFaceDetectionNet?: WithNetOptions withFaceDetectionNet?: WithNetOptions
withFaceLandmarkNet?: WithNetOptions withFaceLandmarkNet?: WithNetOptions
...@@ -107,12 +109,14 @@ export function describeWithNets( ...@@ -107,12 +109,14 @@ export function describeWithNets(
let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet() let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet()
let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn() let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn()
let tinyYolov2: faceapi.TinyYolov2 = new faceapi.TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv) let tinyYolov2: faceapi.TinyYolov2 = new faceapi.TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv)
let allFaces = allFacesFactory(faceDetectionNet, faceLandmarkNet, faceRecognitionNet) let allFacesSsdMobilenetv1 = allFacesSsdMobilenetv1Factory(faceDetectionNet, faceLandmarkNet, faceRecognitionNet)
let allFacesTinyYolov2 = allFacesTinyYolov2Factory(tinyYolov2, faceLandmarkNet, faceRecognitionNet)
let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet) let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet)
beforeAll(async () => { beforeAll(async () => {
const { const {
withAllFaces, withAllFacesSsdMobilenetv1,
withAllFacesTinyYolov2,
withAllFacesMtcnn, withAllFacesMtcnn,
withFaceDetectionNet, withFaceDetectionNet,
withFaceLandmarkNet, withFaceLandmarkNet,
...@@ -121,21 +125,21 @@ export function describeWithNets( ...@@ -121,21 +125,21 @@ export function describeWithNets(
withTinyYolov2 withTinyYolov2
} = options } = options
if (withFaceDetectionNet || withAllFaces) { if (withFaceDetectionNet || withAllFacesSsdMobilenetv1) {
await initNet<faceapi.FaceDetectionNet>( await initNet<faceapi.FaceDetectionNet>(
faceDetectionNet, faceDetectionNet,
!!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'ssd_mobilenetv1_model.weights' !!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'ssd_mobilenetv1_model.weights'
) )
} }
if (withFaceLandmarkNet || withAllFaces) { if (withFaceLandmarkNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyYolov2) {
await initNet<faceapi.FaceLandmarkNet>( await initNet<faceapi.FaceLandmarkNet>(
faceLandmarkNet, faceLandmarkNet,
!!withFaceLandmarkNet && !withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights' !!withFaceLandmarkNet && !withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights'
) )
} }
if (withFaceRecognitionNet || withAllFaces || withAllFacesMtcnn) { if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<faceapi.FaceRecognitionNet>( await initNet<faceapi.FaceRecognitionNet>(
faceRecognitionNet, faceRecognitionNet,
// TODO: figure out why quantized weights results in NaNs in testcases // TODO: figure out why quantized weights results in NaNs in testcases
...@@ -150,11 +154,11 @@ export function describeWithNets( ...@@ -150,11 +154,11 @@ export function describeWithNets(
) )
} }
if (withTinyYolov2) { if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<faceapi.TinyYolov2>( await initNet<faceapi.TinyYolov2>(
tinyYolov2, tinyYolov2,
!!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights', !!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights',
withTinyYolov2.withSeparableConv === false withTinyYolov2 && withTinyYolov2.withSeparableConv === false
) )
} }
}) })
...@@ -167,7 +171,16 @@ export function describeWithNets( ...@@ -167,7 +171,16 @@ export function describeWithNets(
tinyYolov2 && tinyYolov2.dispose() tinyYolov2 && tinyYolov2.dispose()
}) })
specDefinitions({ allFaces, allFacesMtcnn, faceDetectionNet, faceLandmarkNet, faceRecognitionNet, mtcnn, tinyYolov2 }) specDefinitions({
allFacesSsdMobilenetv1,
allFacesTinyYolov2,
allFacesMtcnn,
faceDetectionNet,
faceLandmarkNet,
faceRecognitionNet,
mtcnn,
tinyYolov2
})
}) })
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment