Commit 60801128 by vincent

update tfjs-core to latest

parent 3a314e8d
let spec_files = ['**/*.test.ts'].concat(
process.env.EXCLUDE_UNCOMPRESSED
? ['!**/*.uncompressed.test.ts']
......
......@@ -38,6 +38,7 @@ exclude = exclude.concat(
// exclude nodejs tests
exclude = exclude.concat(['**/*.node.test.ts'])
exclude = exclude.concat(['test/env.node.ts'])
module.exports = function(config) {
......
......@@ -13,7 +13,7 @@
"build": "rm -rf ./build && rm -rf ./dist && npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
"test": "karma start",
"test-browser": "karma start --single-run",
"test-node": "ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-node": "ts-node -r ./test/env.node.ts node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed",
"test-all-include-uncompressed": "npm run test-browser && npm run test-node",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
......@@ -26,7 +26,7 @@
"test-cpu": "set BACKEND_CPU=true&& karma start",
"test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start",
"test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& ts-node node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& npm run test-node",
"docs": "typedoc --options ./typedoc.config.js ./src"
},
"keywords": [
......@@ -39,27 +39,27 @@
"author": "justadudewhohacks",
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "1.2.2",
"tfjs-image-recognition-base": "^0.6.1",
"@tensorflow/tfjs-core": "1.2.9",
"tfjs-image-recognition-base": "^0.6.2",
"tslib": "^1.10.0"
},
"devDependencies": {
"@tensorflow/tfjs-node": "^1.2.3",
"@types/jasmine": "^3.3.13",
"@types/node": "^12.0.10",
"@tensorflow/tfjs-node": "^1.2.9",
"@types/jasmine": "^3.4.0",
"@types/node": "^12.7.5",
"canvas": "2.5.0",
"jasmine": "^3.4.0",
"jasmine-core": "^3.4.0",
"karma": "^4.1.0",
"karma": "^4.3.0",
"karma-chrome-launcher": "^2.2.0",
"karma-jasmine": "^2.0.1",
"karma-typescript": "^4.1.0",
"rollup": "^1.16.2",
"rollup-plugin-commonjs": "^10.0.1",
"rollup-plugin-node-resolve": "^5.1.0",
"karma-typescript": "^4.1.1",
"rollup": "^1.21.2",
"rollup-plugin-commonjs": "^10.1.0",
"rollup-plugin-node-resolve": "^5.2.0",
"rollup-plugin-typescript2": "^0.21.2",
"rollup-plugin-uglify": "^6.0.2",
"rollup-plugin-uglify": "^6.0.3",
"ts-node": "^8.3.0",
"typescript": "^3.5.2"
"typescript": "^3.6.3"
}
}
import { NeuralNetwork } from 'tfjs-image-recognition-base';
export type TestEnv = {
loadImage: (uri: string) => Promise<HTMLImageElement>
loadJson: <T> (uri: string) => Promise<T>
initNet: <TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename?: string | boolean,
isUnusedModel?: boolean
) => any
}
\ No newline at end of file
import * as fs from 'fs';
import * as path from 'path';
import { NeuralNetwork } from 'tfjs-image-recognition-base';
import { env } from '../src';
import { TestEnv } from './Environment';
require('@tensorflow/tfjs-node')
const canvas = require('canvas')
const { Canvas, Image, ImageData } = canvas
env.monkeyPatch({ Canvas, Image, ImageData })
async function loadImageNode(uri: string): Promise<HTMLImageElement> {
return canvas.loadImage(path.resolve(__dirname, '../', uri))
}
async function loadJsonNode<T>(uri: string): Promise<T> {
return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString())
}
export async function initNetNode<TNet extends NeuralNetwork<any>>(net: TNet) {
await net.loadFromDisk(path.resolve(__dirname, '../weights'))
}
const nodeTestEnv: TestEnv = {
loadImage: loadImageNode,
loadJson: loadJsonNode,
initNet: initNetNode
}
global['nodeTestEnv'] = nodeTestEnv
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { fetchNetWeights, NeuralNetwork } from 'tfjs-image-recognition-base';
import { env, fetchImage, fetchJson } from '../src';
export let fs: any, path: any, canvas: any
import { fetchImage, fetchJson } from '../src';
import { TestEnv } from './Environment';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
if (env.isNodejs()) {
require('@tensorflow/tfjs-node')
fs = require('fs')
path = require('path')
canvas = require('canvas')
const { Canvas, Image, ImageData } = canvas
env.monkeyPatch({ Canvas, Image, ImageData })
} else {
if ((window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) {
if (typeof window !== 'undefined' && window['__karma__'] && (window['__karma__'].config.jasmine.args as string[]).some(arg => arg === 'backend_cpu')) {
tf.setBackend('cpu')
}
}
export async function initNet<TNet extends NeuralNetwork<any>>(
async function loadImageBrowser(uri: string): Promise<HTMLImageElement> {
return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
async function loadJsonBrowser<T>(uri: string): Promise<T> {
return fetchJson<T>(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
}
async function initNetBrowser<TNet extends NeuralNetwork<any>>(
net: TNet,
uncompressedFilename: string | boolean,
isUnusedModel: boolean = false
) {
if (env.isNodejs()) {
await net.loadFromDisk(path.resolve(__dirname, '../weights'))
} else {
const url = uncompressedFilename
? await fetchNetWeights(`base/weights_uncompressed/${uncompressedFilename}`)
: (isUnusedModel ? 'base/weights_unused' : 'base/weights')
await net.load(url)
}
}
export async function loadImage(uri: string): Promise<HTMLImageElement> {
if (env.isNodejs()) {
return canvas.loadImage(path.resolve(__dirname, '../', uri))
}
return fetchImage(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
const browserTestEnv: TestEnv = {
loadImage: loadImageBrowser,
loadJson: loadJsonBrowser,
initNet: initNetBrowser
}
export async function loadJson<T>(uri: string): Promise<T> {
if (env.isNodejs()) {
return JSON.parse(fs.readFileSync(path.resolve(__dirname, '../', uri)).toString())
}
return fetchJson<T>(`base${uri.startsWith('/') ? '' : '/'}${uri}`)
export function getTestEnv(): TestEnv {
return global['nodeTestEnv'] || browserTestEnv
}
......@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { AgeAndGenderPrediction } from '../../../src/ageGenderNet/types';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
function expectResultsAngry(result: AgeAndGenderPrediction) {
......@@ -25,8 +25,8 @@ describeWithBackend('ageGenderNet', () => {
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
imgElAngry = await getTestEnv().loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await getTestEnv().loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withAgeGenderNet: { quantized: true } }, ({ ageGenderNet }) => {
......
import { createCanvasFromMedia, extractFaceTensors, Rect, tf } from '../../../src';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend } from '../../utils';
describeWithBackend('extractFaceTensors', () => {
......@@ -7,7 +7,7 @@ describeWithBackend('extractFaceTensors', () => {
let imgTensor: tf.Tensor3D
beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png')))
})
describe('extracts tensors', () => {
......
import { createCanvasFromMedia, env, extractFaces, Rect } from '../../../src';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
describe('extractFaces', () => {
let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement, Canvas: typeof HTMLCanvasElement
beforeAll(async () => {
imgEl = await loadImage('test/images/face1.png')
imgEl = await getTestEnv().loadImage('test/images/face1.png')
canvasEl = createCanvasFromMedia(imgEl)
Canvas = env.getEnv().Canvas
})
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { loadImage } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
import { FaceExpressions } from '../../../src/faceExpressionNet/FaceExpressions';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
describeWithBackend('faceExpressionNet', () => {
......@@ -11,8 +11,8 @@ describeWithBackend('faceExpressionNet', () => {
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
imgElAngry = await getTestEnv().loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await getTestEnv().loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
......
......@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import {
describeWithBackend,
describeWithNets,
......@@ -29,12 +29,12 @@ describeWithBackend('faceLandmark68Net', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgEl2 = await getTestEnv().loadImage('test/images/face2.png')
imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('quantized weights', { withFaceLandmark68Net: { quantized: true } }, ({ faceLandmark68Net }) => {
......
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
describeWithBackend('faceLandmark68Net, uncompressed', () => {
......@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68Net, uncompressed', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1.json')
faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
......
......@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
......@@ -23,12 +23,12 @@ describeWithBackend('faceLandmark68TinyNet', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png')
imgEl2 = await loadImage('test/images/face2.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await loadJson<Point[]>('test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgEl2 = await getTestEnv().loadImage('test/images/face2.png')
imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('quantized weights', { withFaceLandmark68TinyNet: { quantized: true } }, ({ faceLandmark68TinyNet }) => {
......
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { loadImage, loadJson } from '../../env';
import { describeWithNets, expectPointClose, describeWithBackend } from '../../utils';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
describeWithBackend('faceLandmark68TinyNet, uncompressed', () => {
......@@ -11,10 +11,10 @@ describeWithBackend('faceLandmark68TinyNet, uncompressed', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
imgEl1 = await loadImage('test/images/face1.png')
imgElRect = await loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
imgEl1 = await getTestEnv().loadImage('test/images/face1.png')
imgElRect = await getTestEnv().loadImage('test/images/face_rectangular.png')
faceLandmarkPositions1 = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositionsRect = await getTestEnv().loadJson<Point[]>('test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
......
......@@ -2,7 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
describeWithBackend('faceRecognitionNet', () => {
......@@ -15,12 +15,12 @@ describeWithBackend('faceRecognitionNet', () => {
let faceDescriptorRect: number[]
beforeAll(async () => {
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png'))
imgEl2 = createCanvasFromMedia(await loadImage('test/images/face2.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptor2 = await loadJson<number[]>('test/data/faceDescriptor2.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json')
imgEl1 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png'))
imgEl2 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face2.png'))
imgElRect = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptor2 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor2.json')
faceDescriptorRect = await getTestEnv().loadJson<number[]>('test/data/faceDescriptorRect.json')
})
describeWithNets('quantized weights', { withFaceRecognitionNet: { quantized: true } }, ({ faceRecognitionNet }) => {
......
import { createCanvasFromMedia } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils';
describeWithBackend('faceRecognitionNet, uncompressed', () => {
......@@ -11,10 +11,10 @@ describeWithBackend('faceRecognitionNet, uncompressed', () => {
let faceDescriptorRect: number[]
beforeAll(async () => {
imgEl1 = createCanvasFromMedia(await loadImage('test/images/face1.png'))
imgElRect = createCanvasFromMedia(await loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptorRect = await loadJson<number[]>('test/data/faceDescriptorRect.json')
imgEl1 = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face1.png'))
imgElRect = createCanvasFromMedia(await getTestEnv().loadImage('test/images/face_rectangular.png'))
faceDescriptor1 = await getTestEnv().loadJson<number[]>('test/data/faceDescriptor1.json')
faceDescriptorRect = await getTestEnv().loadJson<number[]>('test/data/faceDescriptorRect.json')
})
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
......
......@@ -3,7 +3,7 @@ import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
......@@ -49,7 +49,7 @@ describeWithBackend('globalApi', () => {
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......
......@@ -2,7 +2,7 @@ import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
......@@ -40,7 +40,7 @@ describeWithBackend('globalApi', () => {
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......
import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
......@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await getTestEnv().loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
})
// "quantized" actually means loaded from manifest.json, since there is no quantization applied to the mtcnn model
......
import { IPoint } from '../../../src';
import { loadImage, loadJson } from '../../env';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
......@@ -9,8 +9,8 @@ describeWithBackend('mtcnn.forward', () => {
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedMtcnnLandmarks = await getTestEnv().loadJson<IPoint[][]>('test/data/mtcnnFaceLandmarkPositions.json')
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
......
import * as faceapi from '../../../src';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
......@@ -20,7 +20,7 @@ describeWithBackend('mtcnn', () => {
const expectedScores = [1.0, 1.0, 1.0, 1.0, 0.99, 0.99]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes)
})
......
import * as faceapi from '../../../src';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes';
......@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
})
describeWithNets('quantized weights', { withSsdMobilenetv1: { quantized: true } }, ({ ssdMobilenetv1 }) => {
......
import * as faceapi from '../../../src';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes';
......@@ -9,7 +9,7 @@ describeWithBackend('ssdMobilenetv1.locateFaces, uncompressed', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withSsdMobilenetv1: { quantized: false } }, ({ ssdMobilenetv1 }) => {
......
......@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
import { getTestEnv } from '../../env';
describe('ssdMobilenetv1 - node', () => {
......@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
......
......@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
describeWithBackend('ssdMobilenetv1', () => {
......@@ -14,7 +14,7 @@ describeWithBackend('ssdMobilenetv1', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
......
import * as faceapi from '../../../src';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
describeWithBackend('tinyFaceDetector.locateFaces', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyFaceDetector: { quantized: true } }, ({ tinyFaceDetector }) => {
......
......@@ -5,8 +5,8 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
import { getTestEnv } from '../../env';
describe('tinyFaceDetector - node', () => {
......@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => {
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => {
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await getTestEnv().loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......
......@@ -5,7 +5,7 @@ import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { loadImage } from '../../env';
import { getTestEnv } from '../../env';
describeWithBackend('tinyFaceDetector', () => {
......@@ -20,7 +20,7 @@ describeWithBackend('tinyFaceDetector', () => {
}
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
imgEl = await getTestEnv().loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......
......@@ -10,7 +10,7 @@ import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
import { initNet, loadJson } from './env';
import { getTestEnv } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
expect(Math.abs(val1 - val2)).toBeLessThanOrEqual(maxDelta)
......@@ -93,8 +93,8 @@ export async function assembleExpectedFullFaceDescriptions(
detections: IRect[],
landmarksFile: string = 'facesFaceLandmarkPositions.json'
): Promise<ExpectedFullFaceDescription[]> {
const landmarks = await loadJson<any[]>(`test/data/${landmarksFile}`)
const descriptors = await loadJson<any[]>('test/data/facesFaceDescriptors.json')
const landmarks = await getTestEnv().loadJson<any[]>(`test/data/${landmarksFile}`)
const descriptors = await getTestEnv().loadJson<any[]>('test/data/facesFaceDescriptors.json')
return detections.map((detection, i) => ({
detection,
......@@ -211,63 +211,63 @@ export function describeWithNets(
} = options
if (withSsdMobilenetv1 || withAllFacesSsdMobilenetv1) {
await initNet<SsdMobilenetv1>(
await getTestEnv().initNet<SsdMobilenetv1>(
ssdMobilenetv1,
!!withSsdMobilenetv1 && !withSsdMobilenetv1.quantized && 'ssd_mobilenetv1_model.weights'
)
}
if (withTinyFaceDetector || withAllFacesTinyFaceDetector) {
await initNet<TinyFaceDetector>(
await getTestEnv().initNet<TinyFaceDetector>(
tinyFaceDetector,
!!withTinyFaceDetector && !withTinyFaceDetector.quantized && 'tiny_face_detector_model.weights'
)
}
if (withFaceLandmark68Net || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceLandmark68Net>(
await getTestEnv().initNet<FaceLandmark68Net>(
faceLandmark68Net,
!!withFaceLandmark68Net && !withFaceLandmark68Net.quantized && 'face_landmark_68_model.weights'
)
}
if (withFaceLandmark68TinyNet) {
await initNet<FaceLandmark68TinyNet>(
await getTestEnv().initNet<FaceLandmark68TinyNet>(
faceLandmark68TinyNet,
!!withFaceLandmark68TinyNet && !withFaceLandmark68TinyNet.quantized && 'face_landmark_68_tiny_model.weights'
)
}
if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyFaceDetector|| withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<FaceRecognitionNet>(
await getTestEnv().initNet<FaceRecognitionNet>(
faceRecognitionNet,
!!withFaceRecognitionNet && !withFaceRecognitionNet.quantized && 'face_recognition_model.weights'
)
}
if (withMtcnn || withAllFacesMtcnn) {
await initNet<Mtcnn>(
await getTestEnv().initNet<Mtcnn>(
mtcnn,
!!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights'
)
}
if (withFaceExpressionNet) {
await initNet<FaceExpressionNet>(
await getTestEnv().initNet<FaceExpressionNet>(
faceExpressionNet,
!!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights'
)
}
if (withAgeGenderNet) {
await initNet<AgeGenderNet>(
await getTestEnv().initNet<AgeGenderNet>(
ageGenderNet,
!!withAgeGenderNet && !withAgeGenderNet.quantized && 'age_gender_model.weights'
)
}
if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>(
await getTestEnv().initNet<TinyYolov2>(
tinyYolov2,
!!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights',
true
......
......@@ -18,6 +18,7 @@
"suppressImplicitAnyIndexErrors": true,
"strictNullChecks": true,
"importHelpers": true,
"skipLibCheck": true,
"outDir": "build/commonjs",
"module": "commonjs",
"target": "es5",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment