Commit 60801128 by vincent

update tfjs-core to latest

parent 3a314e8d
let spec_files = ['**/*.test.ts'].concat( let spec_files = ['**/*.test.ts'].concat(
process.env.EXCLUDE_UNCOMPRESSED process.env.EXCLUDE_UNCOMPRESSED
? ['!**/*.uncompressed.test.ts'] ? ['!**/*.uncompressed.test.ts']
......
...@@ -38,6 +38,7 @@ exclude = exclude.concat( ...@@ -38,6 +38,7 @@ exclude = exclude.concat(
// exclude nodejs tests // exclude nodejs tests
exclude = exclude.concat(['**/*.node.test.ts']) exclude = exclude.concat(['**/*.node.test.ts'])
exclude = exclude.concat(['test/env.node.ts'])
module.exports = function(config) { module.exports = function(config) {
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
"build": "rm -rf ./build && rm -rf ./dist && npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6", "build": "rm -rf ./build && rm -rf ./dist && npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
"test": "karma start", "test": "karma start",
"test-browser": "karma start --single-run", "test-browser": "karma start --single-run",
"test-node": "ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js", "test-node": "ts-node -r ./test/env.node.ts node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed", "test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed",
"test-all-include-uncompressed": "npm run test-browser && npm run test-node", "test-all-include-uncompressed": "npm run test-browser && npm run test-node",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start", "test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
"test-cpu": "set BACKEND_CPU=true&& karma start", "test-cpu": "set BACKEND_CPU=true&& karma start",
"test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start", "test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start",
"test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run", "test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js", "test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& npm run test-node",
"docs": "typedoc --options ./typedoc.config.js ./src" "docs": "typedoc --options ./typedoc.config.js ./src"
}, },
"keywords": [ "keywords": [
...@@ -39,27 +39,27 @@ ...@@ -39,27 +39,27 @@
"author": "justadudewhohacks", "author": "justadudewhohacks",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tensorflow/tfjs-core": "1.2.2", "@tensorflow/tfjs-core": "1.2.9",
"tfjs-image-recognition-base": "^0.6.1", "tfjs-image-recognition-base": "^0.6.2",
"tslib": "^1.10.0" "tslib": "^1.10.0"
}, },
"devDependencies": { "devDependencies": {
"@tensorflow/tfjs-node": "^1.2.3", "@tensorflow/tfjs-node": "^1.2.9",
"@types/jasmine": "^3.3.13", "@types/jasmine": "^3.4.0",
"@types/node": "^12.0.10", "@types/node": "^12.7.5",
"canvas": "2.5.0", "canvas": "2.5.0",
"jasmine": "^3.4.0", "jasmine": "^3.4.0",
"jasmine-core": "^3.4.0", "jasmine-core": "^3.4.0",
"karma": "^4.1.0", "karma": "^4.3.0",
"karma-chrome-launcher": "^2.2.0", "karma-chrome-launcher": "^2.2.0",
"karma-jasmine": "^2.0.1", "karma-jasmine": "^2.0.1",
"karma-typescript": "^4.1.0", "karma-typescript": "^4.1.1",
"rollup": "^1.16.2", "rollup": "^1.21.2",
"rollup-plugin-commonjs": "^10.0.1", "rollup-plugin-commonjs": "^10.1.0",
"rollup-plugin-node-resolve": "^5.1.0", "rollup-plugin-node-resolve": "^5.2.0",
"rollup-plugin-typescript2": "^0.21.2", "rollup-plugin-typescript2": "^0.21.2",
"rollup-plugin-uglify": "^6.0.2", "rollup-plugin-uglify": "^6.0.3",
"ts-node": "^8.3.0", "ts-node": "^8.3.0",
"typescript": "^3.5.2" "typescript": "^3.6.3"
} }
} }
import { NeuralNetwork } from 'tfjs-image-recognition-base';
export type TestEnv = {
loadImage: (uri: string) => Promise<HTMLImageElement>
loadJson: <T> (uri: string) => Promise<T>
initNet: <TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename?: string | boolean,
isUnusedModel?: boolean
) => any
}
\ No newline at end of file
import * as fs from 'fs';
import * as path from 'path';
import { NeuralNetwork } from 'tfjs-image-recognition-base';
import { env } from '../src';
import { TestEnv } from './Environment';
require('@tensorflow/tfjs-node')
const canvas = require('canvas')
const { Canvas, Image, ImageData } = canvas
env.monkeyPatch({ Canvas, Image, ImageData })
async function loadImageNode(uri: string): Promise<HTMLImageElement> {
return canvas.loadImage(path.resolve(__dirname, '../', uri))
}
async function loadJsonNode<T>(uri: string): Promise<T> {
return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString())
}
export async function initNetNode<TNet extends NeuralNetwork<any>>(net: TNet) {
await net.loadFromDisk(path.resolve(__dirname, '../weights'))
}
const nodeTestEnv: TestEnv = {
loadImage: loadImageNode,
loadJson: loadJsonNode,
initNet: initNetNode
}
global['nodeTestEnv'] = nodeTestEnv
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { fetchNetWeights, NeuralNetwork } from 'tfjs-image-recognition-base'; import { fetchNetWeights, NeuralNetwork } from 'tfjs-image-recognition-base';
import { env, fetchImage, fetchJson } from '../src'; import { fetchImage, fetchJson } from '../src';
import { TestEnv } from './Environment';
export let fs: any, path: any, canvas: any
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000 jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
if (env.isNodejs()) { if (typeof window !== 'undefined' && window['__karma__'] && (window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) {
require('@tensorflow/tfjs-node')
fs = require('fs')
path = require('path')
canvas = require('canvas')
const { Canvas, Image, ImageData } = canvas
env.monkeyPatch({ Canvas, Image, ImageData })
} else {
if ((window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) {
tf.setBackend('cpu') tf.setBackend('cpu')
}
} }
export async function initNet<TNet extends NeuralNetwork<any>>( async function loadImageBrowser(uri: string): Promise<HTMLImageElement> {
return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
async function loadJsonBrowser<T>(uri: string): Promise<T> {
return fetchJson<T>(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
async function initNetBrowser<TNet extends NeuralNetwork<any>>(
net: TNet, net: TNet,
uncompressedFilename: string | boolean, uncompressedFilename: string | boolean,
isUnusedModel: boolean = false isUnusedModel: boolean = false
) { ) {
if (env.isNodejs()) {
await net.loadFromDisk(path.resolve(__dirname, '../weights'))
} else {
const url = uncompressedFilename const url = uncompressedFilename
? await fetchNetWeights(`base/weights_uncompressed/${uncompressedFilename}`) ? await fetchNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights') : (isUnusedModel ? 'base/weights_unused' : 'base/weights')
await net.load(url) await net.load(url)
}
} }
export async function loadImage(uri: string): Promise<HTMLImageElement> { const browserTestEnv: TestEnv = {
if (env.isNodejs()) { loadImage: loadImageBrowser,
return canvas.loadImage(path.resolve(__dirname, '../', uri)) loadJson: loadJsonBrowser,
} initNet: initNetBrowser
return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
} }
export async function loadJson<T>(uri: string): Promise<T> { export function getTestEnv(): TestEnv {
if (env.isNodejs()) { return global['nodeTestEnv'] || browserTestEnv
return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString())
}
return fetchJson<T>(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
} }
...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src'; import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { AgeAndGenderPrediction } from '../../../src/ageGenderNet/types'; import { AgeAndGenderPrediction } from '../../../src/ageGenderNet/types';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils'; import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
function expectResultsAngry(result: AgeAndGenderPrediction) { function expectResultsAngry(result: AgeAndGenderPrediction) {
...@@ -25,8 +25,8 @@ describeWithBackend('ageGenderNet', () => { ...@@ -25,8 +25,8 @@ describeWithBackend('ageGenderNet', () => {
let imgElSurprised: HTMLImageElement let imgElSurprised: HTMLImageElement
beforeAll(async () => { beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg') imgElAngry = await getTestEnv().loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg') imgElSurprised = await getTestEnv().loadImage('test/images/surprised_cropped.jpg')
}) })
describeWithNets('quantized weights', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => { describeWithNets('quantized weights', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => {
......
import { createCanvasFromMedia, extractFaceTensors, Rect, tf } from '../../../src'; import { createCanvasFromMedia, extractFaceTensors, Rect, tf } from '../../../src';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend } from '../../utils'; import { describeWithBackend } from '../../utils';
describeWithBackend('extractFaceTensors', () => { describeWithBackend('extractFaceTensors', () => {
...@@ -7,7 +7,7 @@ describeWithBackend('extractFaceTensors', () => { ...@@ -7,7 +7,7 @@ describeWithBackend('extractFaceTensors', () => {
let imgTensor: tf.Tensor3D let imgTensor: tf.Tensor3D
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png')))
}) })
describe('extracts tensors', () => { describe('extracts tensors', () => {
......
import { createCanvasFromMedia, env, extractFaces, Rect } from '../../../src'; import { createCanvasFromMedia, env, extractFaces, Rect } from '../../../src';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
describe('extractFaces', () => { describe('extractFaces', () => {
let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement, Canvas: typeof HTMLCanvasElement let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement, Canvas: typeof HTMLCanvasElement
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/face1.png') imgEl = await getTestEnv().loadImage('test/images/face1.png')
canvasEl = createCanvasFromMedia(imgEl) canvasEl = createCanvasFromMedia(imgEl)
Canvas = env.getEnv().Canvas Canvas = env.getEnv().Canvas
}) })
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src'; import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { loadImage } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
import { FaceExpressions } from '../../../src/faceExpressionNet/FaceExpressions'; import { FaceExpressions } from '../../../src/faceExpressionNet/FaceExpressions';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
describeWithBackend('faceExpressionNet', () => { describeWithBackend('faceExpressionNet', () => {
...@@ -11,8 +11,8 @@ describeWithBackend('faceExpressionNet', () => { ...@@ -11,8 +11,8 @@ describeWithBackend('faceExpressionNet', () => {
let imgElSurprised: HTMLImageElement let imgElSurprised: HTMLImageElement
beforeAll(async () => { beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg') imgElAngry = await getTestEnv().loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg') imgElSurprised = await getTestEnv().loadImage('test/images/surprised_cropped.jpg')
}) })
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => { describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
......
...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src'; import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { import {
describeWithBackend, describeWithBackend,
describeWithNets, describeWithNets,
...@@ -29,12 +29,12 @@ describeWithBackend('faceLandmark68Net', () => { ...@@ -29,12 +29,12 @@ describeWithBackend('faceLandmark68Net', () => {
let faceLandmarkPositionsRect: Point[] let faceLandmarkPositionsRect: Point[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png') imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png') imgEl2 = await getTestEnv().loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png') imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json') faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2.json') faceLandmarkPositions2 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json') faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
}) })
describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => { describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
......
import { Point } from '../../../src'; import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils'; import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
describeWithBackend('faceLandmark68Net, uncompressed', () => { describeWithBackend('faceLandmark68Net, uncompressed', () => {
...@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68Net, uncompressed', () => { ...@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68Net, uncompressed', () => {
let faceLandmarkPositionsRect: Point[] let faceLandmarkPositionsRect: Point[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png') imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png') imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json') faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json') faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
}) })
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => { describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
......
...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src'; import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils'; import { describeWithBackend, describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions { function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
...@@ -23,12 +23,12 @@ describeWithBackend('faceLandmark68TinyNet', () => { ...@@ -23,12 +23,12 @@ describeWithBackend('faceLandmark68TinyNet', () => {
let faceLandmarkPositionsRect: Point[] let faceLandmarkPositionsRect: Point[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png') imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png') imgEl2 = await getTestEnv().loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png') imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json') faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2Tiny.json') faceLandmarkPositions2 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json') faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
}) })
describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => { describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
......
import { Point } from '../../../src'; import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithNets, expectPointClose, describeWithBackend } from '../../utils'; import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
describeWithBackend('faceLandmark68TinyNet, uncompressed', () => { describeWithBackend('faceLandmark68TinyNet, uncompressed', () => {
...@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68TinyNet, uncompressed', () => { ...@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68TinyNet, uncompressed', () => {
let faceLandmarkPositionsRect: Point[] let faceLandmarkPositionsRect: Point[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png') imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png') imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json') faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json') faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
}) })
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => { describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
......
...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src'; import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance'; import { euclideanDistance } from '../../../src/euclideanDistance';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils'; import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
describeWithBackend('faceRecognitionNet', () => { describeWithBackend('faceRecognitionNet', () => {
...@@ -15,12 +15,12 @@ describeWithBackend('faceRecognitionNet', () => { ...@@ -15,12 +15,12 @@ describeWithBackend('faceRecognitionNet', () => {
let faceDescriptorRect: number[] let faceDescriptorRect: number[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png')) imgEl1 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png'))
imgEl2 = createCanvasFromMedia(await loadImage('test/images/face2.png')) imgEl2 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face2.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png')) imgElRect = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json') faceDescriptor1 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptor2 = await loadJson<number[]>('test/data/faceDescriptor2.json') faceDescriptor2 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor2.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json') faceDescriptorRect = await getTestEnv().loadJson<number[]>('test/data/faceDescriptorRect.json')
}) })
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => { describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
......
import { createCanvasFromMedia } from '../../../src'; import { createCanvasFromMedia } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance'; import { euclideanDistance } from '../../../src/euclideanDistance';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
describeWithBackend('faceRecognitionNet, uncompressed', () => { describeWithBackend('faceRecognitionNet, uncompressed', () => {
...@@ -11,10 +11,10 @@ describeWithBackend('faceRecognitionNet, uncompressed', () => { ...@@ -11,10 +11,10 @@ describeWithBackend('faceRecognitionNet, uncompressed', () => {
let faceDescriptorRect: number[] let faceDescriptorRect: number[]
beforeAll(async () => { beforeAll(async () => {
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png')) imgEl1 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png')) imgElRect = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json') faceDescriptor1 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json') faceDescriptorRect = await getTestEnv().loadJson<number[]>('test/data/faceDescriptorRect.json')
}) })
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => { describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
......
...@@ -3,7 +3,7 @@ import { WithAge } from '../../../src/factories/WithAge'; ...@@ -3,7 +3,7 @@ import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection'; import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions'; import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender'; import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes'; import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
...@@ -49,7 +49,7 @@ describeWithBackend('globalApi', () => { ...@@ -49,7 +49,7 @@ describeWithBackend('globalApi', () => {
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[] let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
......
...@@ -2,7 +2,7 @@ import * as faceapi from '../../../src'; ...@@ -2,7 +2,7 @@ import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge'; import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions'; import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender'; import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes'; import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
...@@ -40,7 +40,7 @@ describeWithBackend('globalApi', () => { ...@@ -40,7 +40,7 @@ describeWithBackend('globalApi', () => {
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[] let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
......
import { IPoint } from '../../../src'; import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults'; import { expectMtcnnResults } from './expectMtcnnResults';
...@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => { ...@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][] let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json') expectedMtcnnLandmarks = await getTestEnv().loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
}) })
// "quantized" actually means loaded from manifest.json, since there is no quantization applied to the mtcnn model // "quantized" actually means loaded from manifest.json, since there is no quantization applied to the mtcnn model
......
import { IPoint } from '../../../src'; import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env'; import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults'; import { expectMtcnnResults } from './expectMtcnnResults';
...@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => { ...@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][] let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json') expectedMtcnnLandmarks = await getTestEnv().loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
}) })
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => { describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions'; import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections'; import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
...@@ -20,7 +20,7 @@ describeWithBackend('mtcnn', () => { ...@@ -20,7 +20,7 @@ describeWithBackend('mtcnn', () => {
const expectedScores = [1.0, 1.0, 1.0, 1.0, 0.99, 0.99] const expectedScores = [1.0, 1.0, 1.0, 1.0, 0.99, 0.99]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes)
}) })
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections'; import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
...@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces', () => { ...@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
}) })
describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => { describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => {
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections'; import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
...@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces, uncompressed', () => { ...@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
}) })
describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => { describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => {
......
...@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections'; ...@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { getTestEnv } from '../../env';
describe('ssdMobilenetv1 - node', () => { describe('ssdMobilenetv1 - node', () => {
...@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => { ...@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61] const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
}) })
......
...@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections'; ...@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes'; import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
describeWithBackend('ssdMobilenetv1', () => { describeWithBackend('ssdMobilenetv1', () => {
...@@ -14,7 +14,7 @@ describeWithBackend('ssdMobilenetv1', () => { ...@@ -14,7 +14,7 @@ describeWithBackend('ssdMobilenetv1', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61] const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
}) })
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetections } from '../../expectFaceDetections'; import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils'; import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
describeWithBackend('tinyFaceDetector.locateFaces', () => { describeWithBackend('tinyFaceDetector.locateFaces', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
}) })
describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => { describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => {
......
...@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections'; ...@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes'; import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { getTestEnv } from '../../env';
describe('tinyFaceDetector - node', () => { describe('tinyFaceDetector - node', () => {
...@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => { ...@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => {
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84] const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
......
...@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections'; ...@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions'; import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks'; import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes'; import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env'; import { getTestEnv } from '../../env';
describeWithBackend('tinyFaceDetector', () => { describeWithBackend('tinyFaceDetector', () => {
...@@ -20,7 +20,7 @@ describeWithBackend('tinyFaceDetector', () => { ...@@ -20,7 +20,7 @@ describeWithBackend('tinyFaceDetector', () => {
} }
beforeAll(async () => { beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg') imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
......
...@@ -10,7 +10,7 @@ import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net'; ...@@ -10,7 +10,7 @@ import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet'; import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1'; import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector'; import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
import { initNet, loadJson } from './env'; import { getTestEnv } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) { export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
expect(Math.abs(val1 - val2)).toBeLessThanOrEqual(maxDelta) expect(Math.abs(val1 - val2)).toBeLessThanOrEqual(maxDelta)
...@@ -93,8 +93,8 @@ export async function assembleExpectedFullFaceDescriptions( ...@@ -93,8 +93,8 @@ export async function assembleExpectedFullFaceDescriptions(
detections: IRect[], detections: IRect[],
landmarksFile: string = 'facesFaceLandmarkPositions.json' landmarksFile: string = 'facesFaceLandmarkPositions.json'
): Promise<ExpectedFullFaceDescription[]> { ): Promise<ExpectedFullFaceDescription[]> {
const landmarks = await loadJson<any[]>(`test/data/${landmarksFile}`) const landmarks = await getTestEnv().loadJson<any[]>(`test/data/${landmarksFile}`)
const descriptors = await loadJson<any[]>('test/data/facesFaceDescriptors.json') const descriptors = await getTestEnv().loadJson<any[]>('test/data/facesFaceDescriptors.json')
return detections.map((detection, i) => ({ return detections.map((detection, i) => ({
detection, detection,
...@@ -211,63 +211,63 @@ export function describeWithNets( ...@@ -211,63 +211,63 @@ export function describeWithNets(
} = options } = options
if (withSsdMobilenetv1 || withAllFacesSsdMobilenetv1) { if (withSsdMobilenetv1 || withAllFacesSsdMobilenetv1) {
await initNet<SsdMobilenetv1>( await getTestEnv().initNet<SsdMobilenetv1>(
ssdMobilenetv1, ssdMobilenetv1,
!!withSsdMobilenetv1 && !withSsdMobilenetv1.quantized && 'ssd_mobilenetv1_model.weights' !!withSsdMobilenetv1 && !withSsdMobilenetv1.quantized && 'ssd_mobilenetv1_model.weights'
) )
} }
if (withTinyFaceDetector || withAllFacesTinyFaceDetector) { if (withTinyFaceDetector || withAllFacesTinyFaceDetector) {
await initNet<TinyFaceDetector>( await getTestEnv().initNet<TinyFaceDetector>(
tinyFaceDetector, tinyFaceDetector,
!!withTinyFaceDetector && !withTinyFaceDetector.quantized && 'tiny_face_detector_model.weights' !!withTinyFaceDetector && !withTinyFaceDetector.quantized && 'tiny_face_detector_model.weights'
) )
} }
if (withFaceLandmark68Net || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) { if (withFaceLandmark68Net || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceLandmark68Net>( await getTestEnv().initNet<FaceLandmark68Net>(
faceLandmark68Net, faceLandmark68Net,
!!withFaceLandmark68Net && !withFaceLandmark68Net.quantized && 'face_landmark_68_model.weights' !!withFaceLandmark68Net && !withFaceLandmark68Net.quantized && 'face_landmark_68_model.weights'
) )
} }
if (withFaceLandmark68TinyNet) { if (withFaceLandmark68TinyNet) {
await initNet<FaceLandmark68TinyNet>( await getTestEnv().initNet<FaceLandmark68TinyNet>(
faceLandmark68TinyNet, faceLandmark68TinyNet,
!!withFaceLandmark68TinyNet && !withFaceLandmark68TinyNet.quantized && 'face_landmark_68_tiny_model.weights' !!withFaceLandmark68TinyNet && !withFaceLandmark68TinyNet.quantized && 'face_landmark_68_tiny_model.weights'
) )
} }
if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) { if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceRecognitionNet>( await getTestEnv().initNet<FaceRecognitionNet>(
faceRecognitionNet, faceRecognitionNet,
!!withFaceRecognitionNet && !withFaceRecognitionNet.quantized && 'face_recognition_model.weights' !!withFaceRecognitionNet && !withFaceRecognitionNet.quantized && 'face_recognition_model.weights'
) )
} }
if (withMtcnn || withAllFacesMtcnn) { if (withMtcnn || withAllFacesMtcnn) {
await initNet<Mtcnn>( await getTestEnv().initNet<Mtcnn>(
mtcnn, mtcnn,
!!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights' !!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights'
) )
} }
if (withFaceExpressionNet) { if (withFaceExpressionNet) {
await initNet<FaceExpressionNet>( await getTestEnv().initNet<FaceExpressionNet>(
faceExpressionNet, faceExpressionNet,
!!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights' !!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights'
) )
} }
if (withAgeGenderNet) { if (withAgeGenderNet) {
await initNet<AgeGenderNet>( await getTestEnv().initNet<AgeGenderNet>(
ageGenderNet, ageGenderNet,
!!withAgeGenderNet && !withAgeGenderNet.quantized && 'age_gender_model.weights' !!withAgeGenderNet && !withAgeGenderNet.quantized && 'age_gender_model.weights'
) )
} }
if (withTinyYolov2 || withAllFacesTinyYolov2) { if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>( await getTestEnv().initNet<TinyYolov2>(
tinyYolov2, tinyYolov2,
!!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights', !!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights',
true true
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
"suppressImplicitAnyIndexErrors": true, "suppressImplicitAnyIndexErrors": true,
"strictNullChecks": true, "strictNullChecks": true,
"importHelpers": true, "importHelpers": true,
"skipLibCheck": true,
"outDir": "build/commonjs", "outDir": "build/commonjs",
"module": "commonjs", "module": "commonjs",
"target": "es5", "target": "es5",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment