Commit 5f37730d by vincent

make testcases runnable in nodejs env

parent c8b3410d
......@@ -3,13 +3,17 @@ language: node_js
node_js:
- "10"
env:
- BACKEND_CPU=true EXCLUDE_UNCOMPRESSED=true
global:
- BACKEND_CPU=true EXCLUDE_UNCOMPRESSED=true
matrix:
- ENV=browser
- ENV=node
addons:
chrome: stable
before_install:
- export DISPLAY=:99.0
- sh -e /etc/init.d/xvfb start
- sleep 3 # give xvfb some time to start
script:
- npm run test-travis
- if [ $ENV == 'browser' ]; then npm run test-browser; fi
- if [ $ENV == 'node' ]; then npm run test-node; fi
- npm run build
\ No newline at end of file
const spec_files = ['**/*.test.ts'].concat(
process.env.EXCLUDE_UNCOMPRESSED
? ['!**/*.uncompressed.test.ts']
: []
)
module.exports = {
spec_dir: 'test',
spec_files,
random: false
}
\ No newline at end of file
......@@ -22,10 +22,9 @@ let exclude = (
'faceRecognitionNet',
'ssdMobilenetv1',
'tinyFaceDetector',
'mtcnn',
'tinyYolov2'
'mtcnn'
]
: ['tinyYolov2']
: []
)
.filter(ex => ex !== process.env.UUT)
.map(ex => `test/tests/${ex}/*.ts`)
......
import * as tf from '@tensorflow/tfjs-core';
import { fetchNetWeights, NeuralNetwork } from 'tfjs-image-recognition-base';
import { env, fetchImage, fetchJson } from '../src';
export let fs: any, path: any, canvas: any
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
if (env.isNodejs()) {
require('@tensorflow/tfjs-node')
fs = require('fs')
path = require('path')
canvas = require('canvas')
const { Canvas, Image, ImageData } = canvas
env.monkeyPatch({ Canvas, Image, ImageData })
} else {
if ((window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) {
tf.setBackend('cpu')
}
}
export async function initNet<TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename: string | boolean,
isUnusedModel: boolean = false
) {
if (env.isNodejs()) {
await net.loadFromDisk(path.resolve(__dirname, '../weights'))
} else {
const url = uncompressedFilename
? await fetchNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights')
await net.load(url)
}
}
export async function loadImage(uri: string): Promise<HTMLImageElement> {
if (env.isNodejs()) {
return canvas.loadImage(path.resolve(__dirname, '../', uri))
}
return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
export async function loadJson<T>(uri: string): Promise<T> {
if (env.isNodejs()) {
return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString())
}
return fetchJson<T>(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
import { bufferToImage, extractFaceTensors, Rect, tf } from '../../../src';
import { createCanvasFromMedia, extractFaceTensors, Rect, tf } from '../../../src';
import { loadImage } from '../../env';
describe('extractFaceTensors', () => {
let imgTensor: tf.Tensor3D
beforeAll(async () => {
const img = await (await fetch('base/test/images/face1.png')).blob()
imgTensor = tf.fromPixels(await bufferToImage(img))
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png')))
})
describe('extracts tensors', () => {
......
import { bufferToImage, createCanvasFromMedia, extractFaces, Rect } from '../../../src';
import { createCanvasFromMedia, env, extractFaces, Rect } from '../../../src';
import { loadImage } from '../../env';
describe('extractFaces', () => {
let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement
let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement, Canvas: typeof HTMLCanvasElement
beforeAll(async () => {
const img = await (await fetch('base/test/images/face1.png')).blob()
imgEl = await bufferToImage(img)
imgEl = await loadImage('test/images/face1.png')
canvasEl = createCanvasFromMedia(imgEl)
Canvas = env.getEnv().Canvas
})
describe('extracts canvases', () => {
......@@ -17,7 +18,7 @@ describe('extractFaces', () => {
const canvases = await extractFaces(imgEl, [rect])
expect(canvases.length).toEqual(1)
expect(canvases[0] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[0] instanceof Canvas).toBe(true)
expect(canvases[0].width).toEqual(50)
expect(canvases[0].height).toEqual(60)
})
......@@ -30,10 +31,10 @@ describe('extractFaces', () => {
const canvases = await extractFaces(imgEl, rects)
expect(canvases.length).toEqual(2)
expect(canvases[0] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[0] instanceof Canvas).toBe(true)
expect(canvases[0].width).toEqual(50)
expect(canvases[0].height).toEqual(60)
expect(canvases[1] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[1] instanceof Canvas).toBe(true)
expect(canvases[1].width).toEqual(70)
expect(canvases[1].height).toEqual(80)
})
......@@ -43,7 +44,7 @@ describe('extractFaces', () => {
const canvases = await extractFaces(canvasEl, [rect])
expect(canvases.length).toEqual(1)
expect(canvases[0] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[0] instanceof Canvas).toBe(true)
expect(canvases[0].width).toEqual(50)
expect(canvases[0].height).toEqual(60)
})
......@@ -56,10 +57,10 @@ describe('extractFaces', () => {
const canvases = await extractFaces(canvasEl, rects)
expect(canvases.length).toEqual(2)
expect(canvases[0] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[0] instanceof Canvas).toBe(true)
expect(canvases[0].width).toEqual(50)
expect(canvases[0].height).toEqual(60)
expect(canvases[1] instanceof HTMLCanvasElement).toBe(true)
expect(canvases[1] instanceof Canvas).toBe(true)
expect(canvases[1].width).toEqual(70)
expect(canvases[1].height).toEqual(80)
})
......
......@@ -3,21 +3,22 @@ import * as tf from '@tensorflow/tfjs-core';
import { FaceLandmark68NetBase } from '../../../src/faceLandmarkNet/FaceLandmark68NetBase';
class FakeFaceLandmark68NetBase extends FaceLandmark68NetBase<any> {
public runNet(_: any): any {
}
protected getDefaultModelName(): any {
throw new Error('FakeNeuralNetwork - getDefaultModelName not implemented')
protected getDefaultModelName(): string {
throw new Error('FakeFaceLandmark68NetBase - getDefaultModelName not implemented')
}
protected extractParams(_: any): any {
throw new Error('FakeNeuralNetwork - extractParams not implemented')
throw new Error('FakeFaceLandmark68NetBase - extractParams not implemented')
}
protected extractParamsFromWeigthMap(_: any): any {
throw new Error('FakeNeuralNetwork - extractParamsFromWeigthMap not implemented')
throw new Error('FakeFaceLandmark68NetBase - extractParamsFromWeigthMap not implemented')
}
public runNet(): any {
throw new Error('FakeFaceLandmark68NetBase - extractParamsFromWeigthMap not implemented')
}
}
}
describe('FaceLandmark68NetBase', () => {
......
import * as tf from '@tensorflow/tfjs-core';
import { fetchImage, fetchJson, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { FaceLandmark68Net } from '../../../src/faceLandmarkNet/FaceLandmark68Net';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectAllTensorsReleased, expectMaxDelta, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
......@@ -24,12 +24,12 @@ describe('faceLandmark68Net', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
imgEl1 = await loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
......@@ -85,14 +85,14 @@ describe('faceLandmark68Net', () => {
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach(({ x, y }, i) => {
expectMaxDelta(x, faceLandmarkPositions[batchIdx][i].x, 2)
expectMaxDelta(y, faceLandmarkPositions[batchIdx][i].y, 2)
expectMaxDelta(x, faceLandmarkPositions[batchIdx][i].x, 5)
expectMaxDelta(y, faceLandmarkPositions[batchIdx][i].y, 5)
})
})
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(el))
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -117,7 +117,7 @@ describe('faceLandmark68Net', () => {
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(imgEl2), tf.fromPixels(imgElRect)]
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -145,18 +145,6 @@ describe('faceLandmark68Net', () => {
describeWithNets('no memory leaks', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new FaceLandmark68Net()
await net.load('base/weights')
net.dispose()
})
})
})
describe('forwardInput', () => {
it('single image element', async () => {
......@@ -176,7 +164,7 @@ describe('faceLandmark68Net', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor])
......@@ -188,7 +176,7 @@ describe('faceLandmark68Net', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors)
......@@ -200,7 +188,7 @@ describe('faceLandmark68Net', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensor))
......@@ -212,7 +200,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensors))
......@@ -239,7 +227,7 @@ describe('faceLandmark68Net', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor)
......@@ -249,7 +237,7 @@ describe('faceLandmark68Net', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
......@@ -260,7 +248,7 @@ describe('faceLandmark68Net', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor)
......@@ -271,7 +259,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensors)
......
import { fetchImage, fetchJson, Point } from '../../../src';
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectPointClose } from '../../utils';
describe('faceLandmark68Net, uncompressed', () => {
......@@ -11,10 +11,10 @@ describe('faceLandmark68Net, uncompressed', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
imgEl1 = await loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
......@@ -43,16 +43,7 @@ describe('faceLandmark68Net, uncompressed', () => {
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 2)
})
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/face_landmark_68_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createFaceLandmarkNet(weights)
net.dispose()
expectPointClose(pt, { x, y }, 5)
})
})
......
import * as tf from '@tensorflow/tfjs-core';
import { fetchImage, fetchJson, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { FaceLandmark68TinyNet } from '../../../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
......@@ -24,12 +23,12 @@ describe('faceLandmark68TinyNet', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
imgEl1 = await loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
......@@ -92,7 +91,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(el))
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -117,7 +116,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(imgEl2), tf.fromPixels(imgElRect)]
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -145,17 +144,6 @@ describe('faceLandmark68TinyNet', () => {
describeWithNets('no memory leaks', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new FaceLandmark68TinyNet()
await net.load('base/weights')
net.dispose()
})
})
})
describe('forwardInput', () => {
......@@ -176,7 +164,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor])
......@@ -188,7 +176,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors)
......@@ -200,7 +188,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensor))
......@@ -212,7 +200,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensors))
......@@ -239,7 +227,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor)
......@@ -249,7 +237,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
......@@ -260,7 +248,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor)
......@@ -271,7 +259,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensors)
......
import { fetchImage, fetchJson, Point } from '../../../src';
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectPointClose } from '../../utils';
describe('faceLandmark68TinyNet, uncompressed', () => {
......@@ -11,10 +11,10 @@ describe('faceLandmark68TinyNet, uncompressed', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
imgEl1 = await loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
......@@ -47,15 +47,6 @@ describe('faceLandmark68TinyNet, uncompressed', () => {
})
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/face_landmark_68_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createFaceLandmarkNet(weights)
net.dispose()
})
})
})
})
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceRecognitionNet, fetchImage, fetchJson, NetInput, toNetInput } from '../../../src';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
describe('faceRecognitionNet', () => {
let imgEl1: HTMLImageElement
let imgEl2: HTMLImageElement
let imgElRect: HTMLImageElement
let imgEl1: HTMLCanvasElement
let imgEl2: HTMLCanvasElement
let imgElRect: HTMLCanvasElement
let faceDescriptor1: number[]
let faceDescriptor2: number[]
let faceDescriptorRect: number[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceDescriptor1 = await fetchJson<number[]>('base/test/data/faceDescriptor1.json')
faceDescriptor2 = await fetchJson<number[]>('base/test/data/faceDescriptor2.json')
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png'))
imgEl2 = createCanvasFromMedia(await loadImage('test/images/face2.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptor2 = await loadJson<number[]>('test/data/faceDescriptor2.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json')
})
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
......@@ -96,18 +97,6 @@ describe('faceRecognitionNet', () => {
describeWithNets('no memory leaks', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new FaceRecognitionNet()
await net.load('base/weights')
net.dispose()
})
})
})
describe('forwardInput', () => {
it('single image element', async () => {
......
import { fetchImage, fetchJson } from '../../../src';
import { createCanvasFromMedia } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { createFaceRecognitionNet } from '../../../src/faceRecognitionNet';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { loadImage, loadJson } from '../../env';
import { describeWithNets } from '../../utils';
describe('faceRecognitionNet, uncompressed', () => {
let imgEl1: HTMLImageElement
let imgElRect: HTMLImageElement
let imgEl1: HTMLCanvasElement
let imgElRect: HTMLCanvasElement
let faceDescriptor1: number[]
let faceDescriptorRect: number[]
beforeAll(async () => {
imgEl1 = await fetchImage('base/test/images/face1.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceDescriptor1 = await fetchJson<number[]>('base/test/data/faceDescriptor1.json')
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json')
})
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
......@@ -31,14 +31,5 @@ describe('faceRecognitionNet, uncompressed', () => {
expect(euclideanDistance(result, faceDescriptorRect)).toBeLessThan(0.1)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/face_recognition_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createFaceRecognitionNet(weights)
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env';
import { describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
import { IPoint, fetchImage, fetchJson } from '../../../src';
describe('mtcnn.forward', () => {
......@@ -9,8 +9,8 @@ describe('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedMtcnnLandmarks = await fetchJson<IPoint[][]>('base/test/data/mtcnnFaceLandmarkPositions.json')
imgEl = await loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
})
// "quantized" actually means loaded from manifest.json, since there is no quantization applied to the mtcnn model
......@@ -26,8 +26,8 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 2,
maxLandmarksDelta: 5
maxBoxDelta: 10,
maxLandmarksDelta: 10
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 0.99, 0.99], deltas)
})
......@@ -43,7 +43,7 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 15,
maxLandmarksDelta: 13
maxLandmarksDelta: 15
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 0.99], deltas)
})
......@@ -61,8 +61,8 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 8,
maxLandmarksDelta: 7
maxBoxDelta: 15,
maxLandmarksDelta: 20
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 0.99, 1.0, 1.0], deltas)
})
......@@ -77,20 +77,12 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 8,
maxLandmarksDelta: 10
maxBoxDelta: 15,
maxLandmarksDelta: 15
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.Mtcnn()
await net.load('base/weights')
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env';
import { describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
import { IPoint, fetchImage, fetchJson } from '../../../src';
describe('mtcnn.forward', () => {
......@@ -9,8 +9,8 @@ describe('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedMtcnnLandmarks = await fetchJson<IPoint[][]>('base/test/data/mtcnnFaceLandmarkPositions.json')
imgEl = await loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
......@@ -25,8 +25,8 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 2,
maxLandmarksDelta: 5
maxBoxDelta: 10,
maxLandmarksDelta: 10
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 0.99, 0.99], deltas)
})
......@@ -42,7 +42,7 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 15,
maxLandmarksDelta: 13
maxLandmarksDelta: 15
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 0.99], deltas)
})
......@@ -60,8 +60,8 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 8,
maxLandmarksDelta: 7
maxBoxDelta: 15,
maxLandmarksDelta: 20
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 0.99, 1.0, 1.0], deltas)
})
......@@ -76,21 +76,12 @@ describe('mtcnn.forward', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 8,
maxLandmarksDelta: 10
maxBoxDelta: 15,
maxLandmarksDelta: 15
}
expectMtcnnResults(results, expectedMtcnnLandmarks, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0], deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/mtcnn_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createMtcnn(weights)
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { expectedMtcnnBoxes } from './expectMtcnnResults';
import { fetchImage } from '../../../src';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import {
assembleExpectedFullFaceDescriptions,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
} from '../../utils';
import { expectedMtcnnBoxes } from './expectMtcnnResults';
describe('mtcnn', () => {
......@@ -14,7 +19,7 @@ describe('mtcnn', () => {
const expectedScores = [1.0, 1.0, 1.0, 1.0, 0.99, 0.99]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes)
})
......@@ -27,7 +32,7 @@ describe('mtcnn', () => {
const results = await faceapi.detectAllFaces(imgEl, options)
const maxScoreDelta = 0.01
const maxBoxDelta = 2
const maxBoxDelta = 10
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedMtcnnBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
......@@ -43,7 +48,7 @@ describe('mtcnn', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 2,
maxBoxDelta: 10,
maxLandmarksDelta: 6
}
expect(results.length).toEqual(6)
......@@ -62,7 +67,7 @@ describe('mtcnn', () => {
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 2,
maxBoxDelta: 10,
maxLandmarksDelta: 6,
maxDescriptorDelta: 0.2
}
......
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes';
describe('ssdMobilenetv1.locateFaces', () => {
......@@ -9,18 +9,18 @@ describe('ssdMobilenetv1.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
})
describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => {
it('scores > 0.8', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
it('scores > 0.7', async () => {
const detections = await ssdMobilenetv1.locateFaces(imgEl, { minConfidence: 0.7 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(4)
const expectedScores = [-1, 0.81, 0.97, 0.88, 0.84, -1]
const maxScoreDelta = 0.01
const maxScoreDelta = 0.05
const maxBoxDelta = 4
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
......@@ -32,20 +32,12 @@ describe('ssdMobilenetv1.locateFaces', () => {
expect(detections.length).toEqual(6)
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
const maxScoreDelta = 0.01
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.SsdMobilenetv1()
await net.load('base/weights')
net.dispose()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes';
describe('ssdMobilenetv1.locateFaces, uncompressed', () => {
......@@ -9,7 +9,7 @@ describe('ssdMobilenetv1.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => {
......@@ -20,8 +20,8 @@ describe('ssdMobilenetv1.locateFaces, uncompressed', () => {
expect(detections.length).toEqual(3)
const expectedScores = [-1, -1, 0.98, 0.88, 0.81, -1]
const maxScoreDelta = 0.01
const maxBoxDelta = 3
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
......@@ -31,22 +31,13 @@ describe('ssdMobilenetv1.locateFaces, uncompressed', () => {
expect(detections.length).toEqual(6)
const expectedScores = [0.57, 0.74, 0.98, 0.88, 0.81, 0.58]
const maxScoreDelta = 0.01
const maxBoxDelta = 3
const expectedScores = [0.57, 0.76, 0.98, 0.88, 0.81, 0.58]
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expectFaceDetections(detections, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/ssd_mobilenetv1_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createSsdMobilenetv1(weights)
net.dispose()
})
})
})
})
\ No newline at end of file
......@@ -5,6 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
describe('ssdMobilenetv1', () => {
......@@ -13,7 +14,7 @@ describe('ssdMobilenetv1', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
......@@ -26,7 +27,7 @@ describe('ssdMobilenetv1', () => {
const results = await faceapi.detectAllFaces(imgEl, options)
const maxScoreDelta = 0.01
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
......@@ -42,7 +43,7 @@ describe('ssdMobilenetv1', () => {
.withFaceLandmarks()
const deltas = {
maxScoreDelta: 0.01,
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 2
}
......@@ -61,7 +62,7 @@ describe('ssdMobilenetv1', () => {
.withFaceDescriptors()
const deltas = {
maxScoreDelta: 0.01,
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 2,
maxDescriptorDelta: 0.2
......
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { fetchImage } from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
describe('tinyFaceDetector.locateFaces', () => {
......@@ -9,7 +9,7 @@ describe('tinyFaceDetector.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => {
......@@ -20,7 +20,7 @@ describe('tinyFaceDetector.locateFaces', () => {
expect(detections.length).toEqual(6)
const expectedScores = [0.77, 0.75, 0.88, 0.77, 0.83, 0.85]
const maxScoreDelta = 0.01
const maxScoreDelta = 0.05
const maxBoxDelta = 40
expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
......@@ -32,20 +32,12 @@ describe('tinyFaceDetector.locateFaces', () => {
expect(detections.length).toEqual(6)
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
const maxScoreDelta = 0.01
const maxBoxDelta = 1
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expectFaceDetections(detections, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.TinyFaceDetector()
await net.load('base/weights')
net.dispose()
})
})
})
})
\ No newline at end of file
......@@ -5,6 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
describe('tinyFaceDetector', () => {
......@@ -13,7 +14,7 @@ describe('tinyFaceDetector', () => {
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......@@ -26,8 +27,8 @@ describe('tinyFaceDetector', () => {
const results = await faceapi.detectAllFaces(imgEl, options)
const maxScoreDelta = 0.01
const maxBoxDelta = 1
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
......@@ -42,8 +43,8 @@ describe('tinyFaceDetector', () => {
.withFaceLandmarks()
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 1,
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10
}
expect(results.length).toEqual(6)
......@@ -61,8 +62,8 @@ describe('tinyFaceDetector', () => {
.withFaceDescriptors()
const deltas = {
maxScoreDelta: 0.01,
maxBoxDelta: 1,
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
......
import { IRect } from '../../../src';
import { sortBoxes } from '../../utils';
export const expectedTinyYolov2Boxes: IRect[] = sortBoxes([
{ x: 52, y: 263, width: 106, height: 102 },
{ x: 455, y: 191, width: 103, height: 97 },
{ x: 236, y: 57, width: 90, height: 85 },
{ x: 257, y: 243, width: 86, height: 95 },
{ x: 578, y: 76, width: 86, height: 91 },
{ x: 87, y: 30, width: 92, height: 93 }
])
export const expectedTinyYolov2SeparableConvBoxes: IRect[] = sortBoxes([
{ x: 42, y: 257, width: 111, height: 121 },
{ x: 454, y: 175, width: 104, height: 121 },
{ x: 230, y: 45, width: 94, height: 104 },
{ x: 574, y: 62, width: 88, height: 113 },
{ x: 260, y: 233, width: 82, height: 104 },
{ x: 83, y: 24, width: 85, height: 111 }
])
\ No newline at end of file
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { fetchImage, TinyYolov2 } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.8, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxScoreDelta = 0.01
const maxBoxDelta = 4
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxScoreDelta = 0.01
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: 416 })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxScoreDelta = 0.01
const maxBoxDelta = 27
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const net = new TinyYolov2(false)
await net.load('base/weights_unused')
net.dispose()
})
})
})
})
\ No newline at end of file
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { createTinyYolov2, fetchImage } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withTinyYolov2: { quantized: false, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.81, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxScoreDelta = 0.01
const maxBoxDelta = 1
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.82, 0.82, 0.72, 0.81, 0.86]
const maxScoreDelta = 0.01
const maxBoxDelta = 24
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: 416 })
const expectedScores = [0.89, 0.82, 0.82, 0.72, 0.81, 0.86]
const maxScoreDelta = 0.01
const maxBoxDelta = 24
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createTinyYolov2(weights, false)
net.dispose()
})
})
})
})
\ No newline at end of file
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { createTinyYolov2, fetchImage, TinyYolov2 } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2.locateFaces, with separable convolutions', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.85, 0.88, 0.9, 0.85, 0.9, 0.85]
const maxScoreDelta = 0.01
const maxBoxDelta = 25
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.85, 0.8, 0.8, 0.85, 0.85, 0.83]
const maxScoreDelta = 0.01
const maxBoxDelta = 34
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('inputSize custom, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: 416 })
const expectedScores = [0.85, 0.8, 0.8, 0.85, 0.85, 0.83]
const maxScoreDelta = 0.01
const maxBoxDelta = 34
expect(detections.length).toEqual(6)
expectFaceDetections(detections, expectedTinyYolov2Boxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_separable_conv_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = createTinyYolov2(weights)
net.dispose()
})
})
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new TinyYolov2()
await net.load('base/weights')
net.dispose()
})
})
})
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, NeuralNetwork, TinyYolov2 } from '../src/';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
const args: string[] = window['__karma__'].config.jasmine.args
if (args.some(arg => arg === 'backend_cpu')) {
tf.setBackend('cpu')
}
import { initNet, loadJson } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
expect(Math.abs(val1 - val2)).toBeLessThan(maxDelta)
......@@ -84,8 +78,8 @@ export async function assembleExpectedFullFaceDescriptions(
detections: IRect[],
landmarksFile: string = 'facesFaceLandmarkPositions.json'
): Promise<ExpectedFullFaceDescription[]> {
const landmarks = await (await fetch(`base/test/data/${landmarksFile}`)).json()
const descriptors = await (await fetch('base/test/data/facesFaceDescriptors.json')).json()
const landmarks = await loadJson(`test/data/${landmarksFile}`)
const descriptors = await loadJson('test/data/facesFaceDescriptors.json')
return detections.map((detection, i) => ({
detection,
......@@ -112,7 +106,6 @@ export type InjectNetArgs = {
tinyYolov2: TinyYolov2
}
export type DescribeWithNetsOptions = {
withAllFacesSsdMobilenetv1?: boolean
withAllFacesTinyFaceDetector?: boolean
......@@ -127,21 +120,6 @@ export type DescribeWithNetsOptions = {
withTinyYolov2?: WithTinyYolov2Options
}
async function loadNetWeights(uri: string): Promise<Float32Array> {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function initNet<TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename: string | boolean,
isUnusedModel: boolean = false
) {
const url = uncompressedFilename
? await loadNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights')
await net.load(url)
}
export function describeWithNets(
description: string,
options: DescribeWithNetsOptions,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment