Commit 5061313f by vincent

testcases for face expression recognition

parent 622a49af
...@@ -42,7 +42,11 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> ...@@ -42,7 +42,11 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
public async predictExpressions(input: TNetInput) { public async predictExpressions(input: TNetInput) {
const netInput = await toNetInput(input) const netInput = await toNetInput(input)
const out = await this.forwardInput(netInput) const out = await this.forwardInput(netInput)
const probabilitesByBatch = await Promise.all(tf.unstack(out).map(t => t.data())) const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async t => {
const data = await t.data()
t.dispose()
return data
}))
out.dispose() out.dispose()
const predictionsByBatch = probabilitesByBatch const predictionsByBatch = probabilitesByBatch
...@@ -53,11 +57,6 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> ...@@ -53,11 +57,6 @@ export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams>
: predictionsByBatch[0] : predictionsByBatch[0]
} }
public dispose(throwOnRedispose: boolean = true) {
this.faceFeatureExtractor.dispose(throwOnRedispose)
super.dispose(throwOnRedispose)
}
protected getDefaultModelName(): string { protected getDefaultModelName(): string {
return 'face_expression_model' return 'face_expression_model'
} }
......
...@@ -52,10 +52,6 @@ export class DetectAllFaceLandmarksTask< ...@@ -52,10 +52,6 @@ export class DetectAllFaceLandmarksTask<
) )
} }
withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>> {
return new PredictAllFaceExpressionsTask<WithFaceLandmarks<TSource>>(this, this.input)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> { withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input) return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
...@@ -84,10 +80,6 @@ export class DetectSingleFaceLandmarksTask< ...@@ -84,10 +80,6 @@ export class DetectSingleFaceLandmarksTask<
return extendWithFaceLandmarks<TSource>(parentResult, landmarks) return extendWithFaceLandmarks<TSource>(parentResult, landmarks)
} }
withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>> {
return new PredictSingleFaceExpressionTask<WithFaceLandmarks<TSource>>(this, this.input)
}
withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> { withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> {
return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input) return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input)
} }
......
...@@ -102,7 +102,7 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un ...@@ -102,7 +102,7 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un
) )
} }
withFaceExpression(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> { withFaceExpressions(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> {
return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>( return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(), this.runAndExtendWithFaceDetection(),
this.input this.input
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { loadImage } from '../../env';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
describe('faceExpressionNet', () => {
let imgElAngry: HTMLImageElement
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions', async () => {
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressionPrediction[]
expect(Array.isArray(result)).toBe(true)
expect(result.length).toEqual(7)
const angry = result.find(res => res.expression === 'angry') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('batch inputs', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('no memory leaks', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
describe('forwardInput', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry, imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
})
describe('predictExpressions', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(imgElAngry)
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions([imgElAngry, imgElAngry, imgElAngry])
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
})
})
})
...@@ -6,104 +6,272 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; ...@@ -6,104 +6,272 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes'; import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { loadImage } from '../../env';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
function expectFaceExpressions(results: WithFaceExpressions<{}>[]) {
results.forEach((result, i) => {
const happy = result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction
const neutral = result.expressions.find(res => res.expression === 'neutral') as FaceExpressionPrediction
const happyProb = i === 4 ? 0 : 0.95
const neutralProb = i === 4 ? 0.4 : 0
expect(happy).not.toBeUndefined()
expect(neutral).not.toBeUndefined()
expect(happy.probability).toBeGreaterThanOrEqual(happyProb)
expect(neutral.probability).toBeGreaterThanOrEqual(neutralProb)
})
}
describe('tinyFaceDetector', () => { describe('tinyFaceDetector', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[] let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84] const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true }, () => { describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true, withFaceExpressionNet: { quantized: false } }, () => {
it('detectAllFaces', async () => { describe('detectAllFaces', () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416 it('detectAllFaces', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi.detectAllFaces(imgEl, options)
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, deltas.maxScoreDelta, deltas.maxBoxDelta)
}) })
const results = await faceapi.detectAllFaces(imgEl, options) it('detectAllFaces.withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const maxScoreDelta = 0.05 const results = await faceapi
const maxBoxDelta = 5 .detectAllFaces(imgEl, options)
expect(results.length).toEqual(6) .withFaceLandmarks()
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => { expect(results.length).toEqual(6)
const options = new TinyFaceDetectorOptions({ expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
inputSize: 416
}) })
const results = await faceapi it('detectAllFaces.withFaceExpressions()', async () => {
.detectAllFaces(imgEl, options) const options = new TinyFaceDetectorOptions({
.withFaceLandmarks() inputSize: 416
})
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => { const results = await faceapi
const options = new TinyFaceDetectorOptions({ .detectAllFaces(imgEl, options)
inputSize: 416 .withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
}) })
const results = await faceapi it('detectAllFaces.withFaceExpressions().withFaceLandmarks()', async () => {
.detectAllFaces(imgEl, options) const options = new TinyFaceDetectorOptions({
.withFaceLandmarks() inputSize: 416
.withFaceDescriptors() })
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => { const results = await faceapi
const options = new TinyFaceDetectorOptions({ .detectAllFaces(imgEl, options)
inputSize: 416 .withFaceExpressions()
.withFaceLandmarks()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
}) })
const result = await faceapi it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
.detectSingleFace(imgEl, options) const options = new TinyFaceDetectorOptions({
.withFaceLandmarks() inputSize: 416
.withFaceDescriptor() })
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
})
it('no memory leaks', async () => { const results = await faceapi
await expectAllTensorsReleased(async () => { .detectAllFaces(imgEl, options)
await faceapi
.detectAllFaces(imgEl, new TinyFaceDetectorOptions())
.withFaceLandmarks() .withFaceLandmarks()
.withFaceDescriptors() .withFaceDescriptors()
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
}) })
it('detectAllFaces.withFaceLandmarks().withFaceExpressions()withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describe('detectSingleFace', () => {
it('detectSingleFace', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
expect(!!result).toBeTruthy()
expectFaceDetections(
result ? [result] : [],
[expectedTinyFaceDetectorBoxes[2]],
[expectedScores[2]],
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
})
it('detectSingleFace.withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceLandmarks()
expect(!!result).toBeTruthy()
expectFaceDetectionsWithLandmarks(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
})
it('detectSingleFace.withFaceExpressions()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
expect(!!result).toBeTruthy()
expectFaceDetections(
result ? [result.detection] : [],
[expectedTinyFaceDetectorBoxes[2]],
[expectedScores[2]],
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
expect(!!result).toBeTruthy()
expectFaceDetectionsWithLandmarks(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceLandmarks()
.withFaceDescriptor()
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptor()
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
})
describe('no memory leaks', () => {
it('detectAllFaces', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, new TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
})
})
it('detectSingleFace', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, new TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptor()
})
})
}) })
}) })
......
...@@ -4,6 +4,7 @@ import * as faceapi from '../src'; ...@@ -4,6 +4,7 @@ import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/'; import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection'; import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks'; import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceExpressionNet } from '../src/faceExpressionNet/FaceExpressionNet';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net'; import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet'; import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1'; import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
...@@ -112,6 +113,7 @@ export type InjectNetArgs = { ...@@ -112,6 +113,7 @@ export type InjectNetArgs = {
faceLandmark68TinyNet: FaceLandmark68TinyNet faceLandmark68TinyNet: FaceLandmark68TinyNet
faceRecognitionNet: FaceRecognitionNet faceRecognitionNet: FaceRecognitionNet
mtcnn: Mtcnn mtcnn: Mtcnn
faceExpressionNet: FaceExpressionNet
tinyYolov2: TinyYolov2 tinyYolov2: TinyYolov2
} }
...@@ -126,6 +128,7 @@ export type DescribeWithNetsOptions = { ...@@ -126,6 +128,7 @@ export type DescribeWithNetsOptions = {
withFaceLandmark68TinyNet?: WithNetOptions withFaceLandmark68TinyNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions withFaceRecognitionNet?: WithNetOptions
withMtcnn?: WithNetOptions withMtcnn?: WithNetOptions
withFaceExpressionNet?: WithNetOptions
withTinyYolov2?: WithTinyYolov2Options withTinyYolov2?: WithTinyYolov2Options
} }
...@@ -143,6 +146,7 @@ export function describeWithNets( ...@@ -143,6 +146,7 @@ export function describeWithNets(
faceLandmark68TinyNet, faceLandmark68TinyNet,
faceRecognitionNet, faceRecognitionNet,
mtcnn, mtcnn,
faceExpressionNet,
tinyYolov2 tinyYolov2
} = faceapi.nets } = faceapi.nets
...@@ -158,6 +162,7 @@ export function describeWithNets( ...@@ -158,6 +162,7 @@ export function describeWithNets(
withFaceLandmark68TinyNet, withFaceLandmark68TinyNet,
withFaceRecognitionNet, withFaceRecognitionNet,
withMtcnn, withMtcnn,
withFaceExpressionNet,
withTinyYolov2 withTinyYolov2
} = options } = options
...@@ -203,6 +208,13 @@ export function describeWithNets( ...@@ -203,6 +208,13 @@ export function describeWithNets(
) )
} }
if (withFaceExpressionNet) {
await initNet<FaceExpressionNet>(
faceExpressionNet,
!!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights'
)
}
if (withTinyYolov2 || withAllFacesTinyYolov2) { if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>( await initNet<TinyYolov2>(
tinyYolov2, tinyYolov2,
...@@ -210,6 +222,8 @@ export function describeWithNets( ...@@ -210,6 +222,8 @@ export function describeWithNets(
true true
) )
} }
}) })
afterAll(() => { afterAll(() => {
...@@ -219,6 +233,7 @@ export function describeWithNets( ...@@ -219,6 +233,7 @@ export function describeWithNets(
mtcnn.isLoaded && mtcnn.dispose() mtcnn.isLoaded && mtcnn.dispose()
tinyFaceDetector.isLoaded && tinyFaceDetector.dispose() tinyFaceDetector.isLoaded && tinyFaceDetector.dispose()
tinyYolov2.isLoaded && tinyYolov2.dispose() tinyYolov2.isLoaded && tinyYolov2.dispose()
faceExpressionNet.isLoaded && faceExpressionNet.dispose()
}) })
specDefinitions({ specDefinitions({
...@@ -228,6 +243,7 @@ export function describeWithNets( ...@@ -228,6 +243,7 @@ export function describeWithNets(
faceLandmark68TinyNet, faceLandmark68TinyNet,
faceRecognitionNet, faceRecognitionNet,
mtcnn, mtcnn,
faceExpressionNet,
tinyYolov2 tinyYolov2
}) })
}) })
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment