Commit 5061313f by vincent

testcases for face expression recognition

parent 622a49af
...@@ -42,7 +42,11 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> ...@@ -42,7 +42,11 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
public async predictExpressions(input: TNetInput) { public async predictExpressions(input: TNetInput) {
const netInput = await toNetInput(input) const netInput = await toNetInput(input)
const out = await this.forwardInput(netInput) const out = await this.forwardInput(netInput)
const probabilitesByBatch = await Promise.all(tf.unstack(out).map(t => t.data())) const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async t => {
const data = await t.data()
t.dispose()
return data
}))
out.dispose() out.dispose()
const predictionsByBatch = probabilitesByBatch const predictionsByBatch = probabilitesByBatch
...@@ -53,11 +57,6 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> ...@@ -53,11 +57,6 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
: predictionsByBatch[0] : predictionsByBatch[0]
} }
public dispose(throwOnRedispose: boolean = true) {
this.faceFeatureExtractor.dispose(throwOnRedispose)
super.dispose(throwOnRedispose)
}
protected getDefaultModelName(): string { protected getDefaultModelName(): string {
return 'face_expression_model' return 'face_expression_model'
} }
......
...@@ -52,10 +52,6 @@ export class DetectAllFaceLandmarksTask< ...@@ -52,10 +52,6 @@ export class DetectAllFaceLandmarksTask<
) )
} }
withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>> {
return new PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>>(this, this.input)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> { withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input) return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
...@@ -84,10 +80,6 @@ export class DetectSingleFaceLandmarksTask< ...@@ -84,10 +80,6 @@ export class DetectSingleFaceLandmarksTask<
return extendWithFaceLandmarks<TSource>(parentResult, landmarks) return extendWithFaceLandmarks<TSource>(parentResult, landmarks)
} }
withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>> {
return new PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>>(this, this.input)
}
withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> { withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> {
return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input) return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
......
...@@ -102,7 +102,7 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un ...@@ -102,7 +102,7 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un
) )
} }
withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> { withFaceExpressions(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> {
return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>( return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(), this.runAndExtendWithFaceDetection(),
this.input this.input
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { loadImage } from '../../env';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
describe('faceExpressionNet', () => {
let imgElAngry: HTMLImageElement
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions', async () => {
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressionPrediction[]
expect(Array.isArray(result)).toBe(true)
expect(result.length).toEqual(7)
const angry = result.find(res => res.expression === 'angry') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('batch inputs', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('no memory leaks', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
describe('forwardInput', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry, imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
})
describe('predictExpressions', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(imgElAngry)
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions([imgElAngry, imgElAngry, imgElAngry])
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
})
})
})
...@@ -4,6 +4,7 @@ import * as faceapi from '../src'; ...@@ -4,6 +4,7 @@ import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/'; import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection'; import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks'; import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceExpressionNet } from '../src/faceExpressionNet/FaceExpressionNet';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net'; import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet'; import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1'; import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
...@@ -112,6 +113,7 @@ export type InjectNetArgs = { ...@@ -112,6 +113,7 @@ export type InjectNetArgs = {
faceLandmark68TinyNet: FaceLandmark68TinyNet faceLandmark68TinyNet: FaceLandmark68TinyNet
faceRecognitionNet: FaceRecognitionNet faceRecognitionNet: FaceRecognitionNet
mtcnn: Mtcnn mtcnn: Mtcnn
faceExpressionNet: FaceExpressionNet
tinyYolov2: TinyYolov2 tinyYolov2: TinyYolov2
} }
...@@ -126,6 +128,7 @@ export type DescribeWithNetsOptions = { ...@@ -126,6 +128,7 @@ export type DescribeWithNetsOptions = {
withFaceLandmark68TinyNet?: WithNetOptions withFaceLandmark68TinyNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions withFaceRecognitionNet?: WithNetOptions
withMtcnn?: WithNetOptions withMtcnn?: WithNetOptions
withFaceExpressionNet?: WithNetOptions
withTinyYolov2?: WithTinyYolov2Options withTinyYolov2?: WithTinyYolov2Options
} }
...@@ -143,6 +146,7 @@ export function describeWithNets( ...@@ -143,6 +146,7 @@ export function describeWithNets(
faceLandmark68TinyNet, faceLandmark68TinyNet,
faceRecognitionNet, faceRecognitionNet,
mtcnn, mtcnn,
faceExpressionNet,
tinyYolov2 tinyYolov2
} = faceapi.nets } = faceapi.nets
...@@ -158,6 +162,7 @@ export function describeWithNets( ...@@ -158,6 +162,7 @@ export function describeWithNets(
withFaceLandmark68TinyNet, withFaceLandmark68TinyNet,
withFaceRecognitionNet, withFaceRecognitionNet,
withMtcnn, withMtcnn,
withFaceExpressionNet,
withTinyYolov2 withTinyYolov2
} = options } = options
...@@ -203,6 +208,13 @@ export function describeWithNets( ...@@ -203,6 +208,13 @@ export function describeWithNets(
) )
} }
if (withFaceExpressionNet) {
await initNet<FaceExpressionNet>(
faceExpressionNet,
!!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights'
)
}
if (withTinyYolov2 || withAllFacesTinyYolov2) { if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>( await initNet<TinyYolov2>(
tinyYolov2, tinyYolov2,
...@@ -210,6 +222,8 @@ export function describeWithNets( ...@@ -210,6 +222,8 @@ export function describeWithNets(
true true
) )
} }
}) })
afterAll(() => { afterAll(() => {
...@@ -219,6 +233,7 @@ export function describeWithNets( ...@@ -219,6 +233,7 @@ export function describeWithNets(
mtcnn.isLoaded && mtcnn.dispose() mtcnn.isLoaded && mtcnn.dispose()
tinyFaceDetector.isLoaded && tinyFaceDetector.dispose() tinyFaceDetector.isLoaded && tinyFaceDetector.dispose()
tinyYolov2.isLoaded && tinyYolov2.dispose() tinyYolov2.isLoaded && tinyYolov2.dispose()
faceExpressionNet.isLoaded && faceExpressionNet.dispose()
}) })
specDefinitions({ specDefinitions({
...@@ -228,6 +243,7 @@ export function describeWithNets( ...@@ -228,6 +243,7 @@ export function describeWithNets(
faceLandmark68TinyNet, faceLandmark68TinyNet,
faceRecognitionNet, faceRecognitionNet,
mtcnn, mtcnn,
faceExpressionNet,
tinyYolov2 tinyYolov2
}) })
}) })
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment