Unverified Commit cb52dab4 by Vincent Mühler Committed by GitHub

Merge pull request #504 from justadudewhohacks/remove-tfjs-image-recognition-base

remove tfjs image recognition base
parents c804dc68 0b4470af
......@@ -26,13 +26,13 @@ function getFaceDetectorOptions() {
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
minConfidence = Math.min(faceapi.utils.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
minConfidence = Math.max(faceapi.utils.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
......@@ -51,24 +51,24 @@ function changeInputSize(size) {
}
function onIncreaseScoreThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
scoreThreshold = Math.min(faceapi.utils.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseScoreThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
scoreThreshold = Math.max(faceapi.utils.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 300)
minFaceSize = Math.min(faceapi.utils.round(minFaceSize + 20), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 50)
minFaceSize = Math.max(faceapi.utils.round(minFaceSize - 20), 50)
$('#minFaceSize').val(minFaceSize)
}
......
......@@ -161,8 +161,8 @@
const { age, gender, genderProbability } = result
new faceapi.draw.DrawTextField(
[
`${faceapi.round(age, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
`${faceapi.utils.round(age, 0)} years`,
`${gender} (${faceapi.utils.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(canvas)
......
......@@ -96,7 +96,7 @@
function displayTimeStats(timeInMs) {
$('#time').val(`${timeInMs} ms`)
$('#fps').val(`${faceapi.round(1000 / timeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / timeInMs)}`)
}
function displayImage(src) {
......
......@@ -39,7 +39,7 @@
let descriptors = { desc1: null, desc2: null }
function updateResult() {
const distance = faceapi.round(
const distance = faceapi.utils.round(
faceapi.euclideanDistance(descriptors.desc1, descriptors.desc2)
)
let text = distance
......
......@@ -156,7 +156,7 @@
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
......
......@@ -152,7 +152,7 @@
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
}
function interpolateAgePredictions(age) {
......@@ -192,8 +192,8 @@
const interpolatedAge = interpolateAgePredictions(age)
new faceapi.draw.DrawTextField(
[
`${faceapi.round(interpolatedAge, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
`${faceapi.utils.round(interpolatedAge, 0)} years`,
`${gender} (${faceapi.utils.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(canvas)
......
......@@ -139,7 +139,7 @@
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
......
......@@ -151,7 +151,7 @@
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
......
......@@ -151,7 +151,7 @@
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
$('#fps').val(`${faceapi.utils.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
......
......@@ -19,8 +19,8 @@ async function run() {
const { age, gender, genderProbability } = result
new faceapi.draw.DrawTextField(
[
`${faceapi.round(age, 0)} years`,
`${gender} (${faceapi.round(genderProbability)})`
`${faceapi.utils.round(age, 0)} years`,
`${gender} (${faceapi.utils.round(genderProbability)})`
],
result.detection.box.bottomLeft
).draw(out)
......
let spec_files = ['**/*.test.ts'].concat(
process.env.EXCLUDE_UNCOMPRESSED
? ['!**/*.uncompressed.test.ts']
: []
)
let spec_files = ['**/*.test.ts']
// exclude browser tests
spec_files = spec_files.concat(['!**/*.browser.test.ts'])
spec_files = spec_files.concat(['!test/tests.legacy/*'])
module.exports = {
spec_dir: 'test',
......
......@@ -2,6 +2,7 @@ const dataFiles = [
'test/images/*.jpg',
'test/images/*.png',
'test/data/*.json',
'test/data/*.weights',
'test/media/*.mp4',
'weights/**/*',
'weights_uncompressed/**/*',
......@@ -21,24 +22,17 @@ let exclude = (
'faceLandmarkNet',
'faceRecognitionNet',
'ssdMobilenetv1',
'tinyFaceDetector',
'mtcnn'
'tinyFaceDetector'
]
: []
)
.filter(ex => ex !== process.env.UUT)
.map(ex => `test/tests/${ex}/*.ts`)
exclude = exclude.concat(
process.env.EXCLUDE_UNCOMPRESSED
? ['**/*.uncompressed.test.ts']
: []
)
// exclude nodejs tests
exclude = exclude.concat(['**/*.node.test.ts'])
exclude = exclude.concat(['test/env.node.ts'])
exclude = exclude.concat(['test/tests-legacy/**/*.ts'])
module.exports = function(config) {
......
......@@ -1487,9 +1487,9 @@
"dev": true
},
"fsevents": {
"version": "2.0.7",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.0.7.tgz",
"integrity": "sha512-a7YT0SV3RB+DjYcppwVDLtn13UQnmg0SWZS7ezZD0UjnLwXmy8Zm21GMVGLaFGimIqcvyMQaOJBrop8MyOp1kQ==",
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.2.tgz",
"integrity": "sha512-R4wDiBwZ0KzpgOWetKDug1FZcYhqYnUYKtfZYt4mD5SBz76q0KR4Q9o7GIPamsVPGmW3EYPPJ0dOOjvx32ldZA==",
"dev": true,
"optional": true
},
......@@ -3917,15 +3917,6 @@
"yallist": "^3.0.3"
}
},
"tfjs-image-recognition-base": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.6.2.tgz",
"integrity": "sha512-ukxViVfAPw7s0KiGhwr3zrwsm+EVa2Z+4aEwKBWO43Rt48nbPyVvrHdL+WbxRynZYjklEE69ft66C8zzea7vFw==",
"requires": {
"@tensorflow/tfjs-core": "^1.2.9",
"tslib": "^1.10.0"
}
},
"through2": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/through2/-/through2-3.0.0.tgz",
......
......@@ -11,22 +11,16 @@
"tsc": "tsc",
"tsc-es6": "tsc --p tsconfig.es6.json",
"build": "rm -rf ./build && rm -rf ./dist && npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
"test": "karma start",
"test": "npm run test-browser && npm run test-node",
"test-browser": "karma start --single-run",
"test-node": "ts-node -r ./test/env.node.ts node_modules/jasmine/bin/jasmine --config=jasmine-node.js",
"test-all": "npm run test-browser-exclude-uncompressed && npm run test-node-exclude-uncompressed",
"test-all-include-uncompressed": "npm run test-browser && npm run test-node",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
"test-agegendernet": "set UUT=ageGenderNet&& karma start",
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
"test-tinyfacedetector": "set UUT=tinyFaceDetector&& karma start",
"test-globalapi": "set UUT=globalApi&& karma start",
"test-mtcnn": "set UUT=mtcnn&& karma start",
"test-cpu": "set BACKEND_CPU=true&& karma start",
"test-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start",
"test-browser-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& karma start --single-run",
"test-node-exclude-uncompressed": "set EXCLUDE_UNCOMPRESSED=true&& npm run test-node",
"docs": "typedoc --options ./typedoc.config.js ./src"
},
"keywords": [
......@@ -40,7 +34,6 @@
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "1.2.9",
"tfjs-image-recognition-base": "^0.6.2",
"tslib": "^1.10.0"
},
"devDependencies": {
......
import * as tf from '@tensorflow/tfjs-core';
import { ParamMapping } from './common';
import { getModelUris } from './common/getModelUris';
import { loadWeightMap } from './dom';
import { env } from './env';
export abstract class NeuralNetwork<TNetParams> {
protected _params: TNetParams | undefined = undefined
protected _paramMappings: ParamMapping[] = []
constructor(protected _name: string) {}
public get params(): TNetParams | undefined { return this._params }
public get paramMappings(): ParamMapping[] { return this._paramMappings }
public get isLoaded(): boolean { return !!this.params }
public getParamFromPath(paramPath: string): tf.Tensor {
const { obj, objProp } = this.traversePropertyPath(paramPath)
return obj[objProp]
}
public reassignParamFromPath(paramPath: string, tensor: tf.Tensor) {
const { obj, objProp } = this.traversePropertyPath(paramPath)
obj[objProp].dispose()
obj[objProp] = tensor
}
public getParamList() {
return this._paramMappings.map(({ paramPath }) => ({
path: paramPath,
tensor: this.getParamFromPath(paramPath)
}))
}
public getTrainableParams() {
return this.getParamList().filter(param => param.tensor instanceof tf.Variable)
}
public getFrozenParams() {
return this.getParamList().filter(param => !(param.tensor instanceof tf.Variable))
}
public variable() {
this.getFrozenParams().forEach(({ path, tensor }) => {
this.reassignParamFromPath(path, tensor.variable())
})
}
public freeze() {
this.getTrainableParams().forEach(({ path, tensor: variable }) => {
const tensor = tf.tensor(variable.dataSync())
variable.dispose()
this.reassignParamFromPath(path, tensor)
})
}
public dispose(throwOnRedispose: boolean = true) {
this.getParamList().forEach(param => {
if (throwOnRedispose && param.tensor.isDisposed) {
throw new Error(`param tensor has already been disposed for path ${param.path}`)
}
param.tensor.dispose()
})
this._params = undefined
}
public serializeParams(): Float32Array {
return new Float32Array(
this.getParamList()
.map(({ tensor }) => Array.from(tensor.dataSync()) as number[])
.reduce((flat, arr) => flat.concat(arr))
)
}
public async load(weightsOrUrl: Float32Array | string | undefined): Promise<void> {
if (weightsOrUrl instanceof Float32Array) {
this.extractWeights(weightsOrUrl)
return
}
await this.loadFromUri(weightsOrUrl)
}
public async loadFromUri(uri: string | undefined) {
if (uri && typeof uri !== 'string') {
throw new Error(`${this._name}.loadFromUri - expected model uri`)
}
const weightMap = await loadWeightMap(uri, this.getDefaultModelName())
this.loadFromWeightMap(weightMap)
}
public async loadFromDisk(filePath: string | undefined) {
if (filePath && typeof filePath !== 'string') {
throw new Error(`${this._name}.loadFromDisk - expected model file path`)
}
const { readFile } = env.getEnv()
const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName())
const fetchWeightsFromDisk = (filePaths: string[]) => Promise.all(
filePaths.map(filePath => readFile(filePath).then(buf => buf.buffer))
)
const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
const manifest = JSON.parse((await readFile(manifestUri)).toString())
const weightMap = await loadWeights(manifest, modelBaseUri)
this.loadFromWeightMap(weightMap)
}
public loadFromWeightMap(weightMap: tf.NamedTensorMap) {
const {
paramMappings,
params
} = this.extractParamsFromWeigthMap(weightMap)
this._paramMappings = paramMappings
this._params = params
}
public extractWeights(weights: Float32Array) {
const {
paramMappings,
params
} = this.extractParams(weights)
this._paramMappings = paramMappings
this._params = params
}
private traversePropertyPath(paramPath: string) {
if (!this.params) {
throw new Error(`traversePropertyPath - model has no loaded params`)
}
const result = paramPath.split('/').reduce((res: { nextObj: any, obj?: any, objProp?: string }, objProp) => {
if (!res.nextObj.hasOwnProperty(objProp)) {
throw new Error(`traversePropertyPath - object does not have property ${objProp}, for path ${paramPath}`)
}
return { obj: res.nextObj, objProp, nextObj: res.nextObj[objProp] }
}, { nextObj: this.params })
const { obj, objProp } = result
if (!obj || !objProp || !(obj[objProp] instanceof tf.Tensor)) {
throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`)
}
return { obj, objProp }
}
protected abstract getDefaultModelName(): string
protected abstract extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TNetParams, paramMappings: ParamMapping[] }
protected abstract extractParams(weights: Float32Array): { params: TNetParams, paramMappings: ParamMapping[] }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { seperateWeightMaps } from '../faceProcessor/util';
......@@ -7,6 +6,8 @@ import { TinyXception } from '../xception/TinyXception';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { AgeAndGenderPrediction, Gender, NetOutput, NetParams } from './types';
import { NeuralNetwork } from '../NeuralNetwork';
import { NetInput, TNetInput, toNetInput } from '../dom';
export class AgeGenderNet extends NeuralNetwork<NetParams> {
......
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const extractFCParams = TfjsImageRecognitionBase.extractFCParamsFactory(extractWeights, paramMappings)
const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings)
const age = extractFCParams(512, 1, 'fc/age')
const gender = extractFCParams(512, 2, 'fc/gender')
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractFcParams(prefix: string): TfjsImageRecognitionBase.FCParams {
function extractFcParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
......@@ -24,7 +24,7 @@ export function extractParamsFromWeigthMap(
}
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { FCParams } from '../common';
export type AgeAndGenderPrediction = {
age: number
......@@ -16,7 +17,7 @@ export type NetOutput = { age: tf.Tensor1D, gender: tf.Tensor2D }
export type NetParams = {
fc: {
age: TfjsImageRecognitionBase.FCParams
gender: TfjsImageRecognitionBase.FCParams
age: FCParams
gender: FCParams
}
}
\ No newline at end of file
import { Box } from './Box';
export interface IBoundingBox {
left: number
top: number
right: number
bottom: number
}
export class BoundingBox extends Box<BoundingBox> implements IBoundingBox {
constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions: boolean = false) {
super({ left, top, right, bottom }, allowNegativeDimensions)
}
}
\ No newline at end of file
import { isDimensions, isValidNumber } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { IDimensions } from './Dimensions';
import { Point } from './Point';
import { IRect } from './Rect';
export class Box<BoxType = any> implements IBoundingBox, IRect {
public static isRect(rect: any): boolean {
return !!rect && [rect.x, rect.y, rect.width, rect.height].every(isValidNumber)
}
public static assertIsValidBox(box: any, callee: string, allowNegativeDimensions: boolean = false) {
if (!Box.isRect(box)) {
throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`)
}
if (!allowNegativeDimensions && (box.width < 0 || box.height < 0)) {
throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`)
}
}
private _x: number
private _y: number
private _width: number
private _height: number
constructor(_box: IBoundingBox | IRect, allowNegativeDimensions: boolean = true) {
const box = (_box || {}) as any
const isBbox = [box.left, box.top, box.right, box.bottom].every(isValidNumber)
const isRect = [box.x, box.y, box.width, box.height].every(isValidNumber)
if (!isRect && !isBbox) {
throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`)
}
const [x, y, width, height] = isRect
? [box.x, box.y, box.width, box.height]
: [box.left, box.top, box.right - box.left, box.bottom - box.top]
Box.assertIsValidBox({ x, y, width, height }, 'Box.constructor', allowNegativeDimensions)
this._x = x
this._y = y
this._width = width
this._height = height
}
public get x(): number { return this._x }
public get y(): number { return this._y }
public get width(): number { return this._width }
public get height(): number { return this._height }
public get left(): number { return this.x }
public get top(): number { return this.y }
public get right(): number { return this.x + this.width }
public get bottom(): number { return this.y + this.height }
public get area(): number { return this.width * this.height }
public get topLeft(): Point { return new Point(this.left, this.top) }
public get topRight(): Point { return new Point(this.right, this.top) }
public get bottomLeft(): Point { return new Point(this.left, this.bottom) }
public get bottomRight(): Point { return new Point(this.right, this.bottom) }
public round(): Box<BoxType> {
const [x, y, width, height] = [this.x, this.y, this.width, this.height]
.map(val => Math.round(val))
return new Box({ x, y, width, height })
}
public floor(): Box<BoxType> {
const [x, y, width, height] = [this.x, this.y, this.width, this.height]
.map(val => Math.floor(val))
return new Box({ x, y, width, height })
}
public toSquare(): Box<BoxType> {
let { x, y, width, height } = this
const diff = Math.abs(width - height)
if (width < height) {
x -= (diff / 2)
width += diff
}
if (height < width) {
y -= (diff / 2)
height += diff
}
return new Box({ x, y, width, height })
}
public rescale(s: IDimensions | number): Box<BoxType> {
const scaleX = isDimensions(s) ? (s as IDimensions).width : s as number
const scaleY = isDimensions(s) ? (s as IDimensions).height : s as number
return new Box({
x: this.x * scaleX,
y: this.y * scaleY,
width: this.width * scaleX,
height: this.height * scaleY
})
}
public pad(padX: number, padY: number): Box<BoxType> {
let [x, y, width, height] = [
this.x - (padX / 2),
this.y - (padY / 2),
this.width + padX,
this.height + padY
]
return new Box({ x, y, width, height })
}
public clipAtImageBorders(imgWidth: number, imgHeight: number): Box<BoxType> {
const { x, y, right, bottom } = this
const clippedX = Math.max(x, 0)
const clippedY = Math.max(y, 0)
const newWidth = right - clippedX
const newHeight = bottom - clippedY
const clippedWidth = Math.min(newWidth, imgWidth - clippedX)
const clippedHeight = Math.min(newHeight, imgHeight - clippedY)
return (new Box({ x: clippedX, y: clippedY, width: clippedWidth, height: clippedHeight})).floor()
}
public shift(sx: number, sy: number): Box<BoxType> {
const { width, height } = this
const x = this.x + sx
const y = this.y + sy
return new Box({ x, y, width, height })
}
public padAtBorders(imageHeight: number, imageWidth: number) {
const w = this.width + 1
const h = this.height + 1
let dx = 1
let dy = 1
let edx = w
let edy = h
let x = this.left
let y = this.top
let ex = this.right
let ey = this.bottom
if (ex > imageWidth) {
edx = -ex + imageWidth + w
ex = imageWidth
}
if (ey > imageHeight) {
edy = -ey + imageHeight + h
ey = imageHeight
}
if (x < 1) {
edy = 2 - x
x = 1
}
if (y < 1) {
edy = 2 - y
y = 1
}
return { dy, edy, dx, edx, y, ey, x, ex, w, h }
}
public calibrate(region: Box) {
return new Box({
left: this.left + (region.left * this.width),
top: this.top + (region.top * this.height),
right: this.right + (region.right * this.width),
bottom: this.bottom + (region.bottom * this.height)
}).toSquare().round()
}
}
\ No newline at end of file
import { isValidNumber } from '../utils';
export interface IDimensions {
width: number
height: number
}
export class Dimensions implements IDimensions {
private _width: number
private _height: number
constructor(width: number, height: number) {
if (!isValidNumber(width) || !isValidNumber(height)) {
throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`)
}
this._width = width
this._height = height
}
public get width(): number { return this._width }
public get height(): number { return this._height }
public reverse(): Dimensions {
return new Dimensions(1 / this.width, 1 / this.height)
}
}
\ No newline at end of file
import { Box, IDimensions, ObjectDetection, Rect } from 'tfjs-image-recognition-base';
import { Box } from './Box';
import { IDimensions } from './Dimensions';
import { ObjectDetection } from './ObjectDetection';
import { Rect } from './Rect';
export interface IFaceDetecion {
score: number
......
import { Box, Dimensions, getCenterPoint, IBoundingBox, IDimensions, IRect, Point, Rect } from 'tfjs-image-recognition-base';
import { minBbox } from '../minBbox';
import { minBbox } from '../ops';
import { getCenterPoint } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { Box } from './Box';
import { Dimensions, IDimensions } from './Dimensions';
import { FaceDetection } from './FaceDetection';
import { Point } from './Point';
import { IRect, Rect } from './Rect';
// face alignment constants
const relX = 0.5
......
import { getCenterPoint, Point } from 'tfjs-image-recognition-base';
import { getCenterPoint } from '../utils';
import { FaceLandmarks } from './FaceLandmarks';
import { Point } from './Point';
export class FaceLandmarks5 extends FaceLandmarks {
......
import { getCenterPoint, Point } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { getCenterPoint } from '../utils';
import { FaceLandmarks } from './FaceLandmarks';
import { Point } from './Point';
export class FaceLandmarks68 extends FaceLandmarks {
public getJawOutline(): Point[] {
......
import { round } from 'tfjs-image-recognition-base';
import { round } from '../utils';
export interface IFaceMatch {
label: string
......
import { isValidNumber } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { Box } from './Box';
import { IRect } from './Rect';
export class LabeledBox extends Box<LabeledBox> {
public static assertIsValidLabeledBox(box: any, callee: string) {
Box.assertIsValidBox(box, callee)
if (!isValidNumber(box.label)) {
throw new Error(`${callee} - expected property label (${box.label}) to be a number`)
}
}
private _label: number
constructor(box: IBoundingBox | IRect | any, label: number) {
super(box)
this._label = label
}
public get label(): number { return this._label }
}
\ No newline at end of file
import { Box } from './Box';
import { Dimensions, IDimensions } from './Dimensions';
import { IRect, Rect } from './Rect';
export class ObjectDetection {
private _score: number
private _classScore: number
private _className: string
private _box: Rect
private _imageDims: Dimensions
constructor(
score: number,
classScore: number,
className: string,
relativeBox: IRect,
imageDims: IDimensions
) {
this._imageDims = new Dimensions(imageDims.width, imageDims.height)
this._score = score
this._classScore = classScore
this._className = className
this._box = new Box(relativeBox).rescale(this._imageDims)
}
public get score(): number { return this._score }
public get classScore(): number { return this._classScore }
public get className(): string { return this._className }
public get box(): Box { return this._box }
public get imageDims(): Dimensions { return this._imageDims }
public get imageWidth(): number { return this.imageDims.width }
public get imageHeight(): number { return this.imageDims.height }
public get relativeBox(): Box { return new Box(this._box).rescale(this.imageDims.reverse()) }
public forSize(width: number, height: number): ObjectDetection {
return new ObjectDetection(
this.score,
this.classScore,
this.className,
this.relativeBox,
{ width, height}
)
}
}
\ No newline at end of file
export interface IPoint {
x: number
y: number
}
export class Point implements IPoint {
private _x: number
private _y: number
constructor(x: number, y: number) {
this._x = x
this._y = y
}
get x(): number { return this._x }
get y(): number { return this._y }
public add(pt: IPoint): Point {
return new Point(this.x + pt.x, this.y + pt.y)
}
public sub(pt: IPoint): Point {
return new Point(this.x - pt.x, this.y - pt.y)
}
public mul(pt: IPoint): Point {
return new Point(this.x * pt.x, this.y * pt.y)
}
public div(pt: IPoint): Point {
return new Point(this.x / pt.x, this.y / pt.y)
}
public abs(): Point {
return new Point(Math.abs(this.x), Math.abs(this.y))
}
public magnitude(): number {
return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2))
}
public floor(): Point {
return new Point(Math.floor(this.x), Math.floor(this.y))
}
}
\ No newline at end of file
import { isValidProbablitiy } from '../utils';
import { IBoundingBox } from './BoundingBox';
import { LabeledBox } from './LabeledBox';
import { IRect } from './Rect';
export class PredictedBox extends LabeledBox {
public static assertIsValidPredictedBox(box: any, callee: string) {
LabeledBox.assertIsValidLabeledBox(box, callee)
if (
!isValidProbablitiy(box.score)
|| !isValidProbablitiy(box.classScore)
) {
throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`)
}
}
private _score: number
private _classScore: number
constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number) {
super(box, label)
this._score = score
this._classScore = classScore
}
public get score(): number { return this._score }
public get classScore(): number { return this._classScore }
}
\ No newline at end of file
import { Box } from './Box';
export interface IRect {
x: number
y: number
width: number
height: number
}
export class Rect extends Box<Rect> implements IRect {
constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions: boolean = false) {
super({ x, y, width, height }, allowNegativeDimensions)
}
}
\ No newline at end of file
export * from './BoundingBox'
export * from './Box'
export * from './Dimensions'
export * from './FaceDetection';
export * from './FaceLandmarks';
export * from './FaceLandmarks5';
export * from './FaceLandmarks68';
export * from './FaceMatch';
export * from './LabeledBox'
export * from './LabeledFaceDescriptors';
export * from './ObjectDetection'
export * from './Point'
export * from './PredictedBox'
export * from './Rect'
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from './types';
export function convLayer(
x: tf.Tensor4D,
params: ConvParams,
padding: 'valid' | 'same' = 'same',
withRelu: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out = tf.add(
tf.conv2d(x, params.filters, [1, 1], padding),
params.bias
) as tf.Tensor4D
return withRelu ? tf.relu(out) : out
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { SeparableConvParams } from './types';
export function depthwiseSeparableConv(
x: tf.Tensor4D,
params: TfjsImageRecognitionBase.SeparableConvParams,
params: SeparableConvParams,
stride: [number, number]
): tf.Tensor4D {
return tf.tidy(() => {
......
import { ParamMapping } from './types';
export function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]) {
Object.keys(weightMap).forEach(path => {
if (!paramMappings.some(pm => pm.originalPath === path)) {
weightMap[path].dispose()
}
})
}
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, ExtractWeightsFunction, ParamMapping } from './types';
export function extractConvParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
filterSize: number,
mappedPrefix: string
): ConvParams {
const filters = tf.tensor4d(
extractWeights(channelsIn * channelsOut * filterSize * filterSize),
[filterSize, filterSize, channelsIn, channelsOut]
)
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/filters` },
{ paramPath: `${mappedPrefix}/bias` }
)
return { filters, bias }
}
}
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, FCParams, ParamMapping } from './types';
export function extractFCParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
mappedPrefix: string
): FCParams {
const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut])
const fc_bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/weights` },
{ paramPath: `${mappedPrefix}/bias` }
)
return {
weights: fc_weights,
bias: fc_bias
}
}
}
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from './types';
export function extractSeparableConvParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams {
const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1])
const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut])
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/depthwise_filter` },
{ paramPath: `${mappedPrefix}/pointwise_filter` },
{ paramPath: `${mappedPrefix}/bias` }
)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
}
export function loadSeparableConvParamsFactory(
extractWeightEntry: <T>(originalPath: string, paramRank: number) => T
) {
return function (prefix: string): SeparableConvParams {
const depthwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/depthwise_filter`, 4)
const pointwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/pointwise_filter`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
}
import { isTensor } from '../utils';
import { ParamMapping } from './types';
export function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]) {
return function<T> (originalPath: string, paramRank: number, mappedPath?: string): T {
const tensor = weightMap[originalPath]
if (!isTensor(tensor, paramRank)) {
throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor}`)
}
paramMappings.push(
{ originalPath, paramPath: mappedPath || originalPath }
)
return tensor
}
}
export function extractWeightsFactory(weights: Float32Array) {
let remainingWeights = weights
function extractWeights(numWeights: number): Float32Array {
const ret = remainingWeights.slice(0, numWeights)
remainingWeights = remainingWeights.slice(numWeights)
return ret
}
function getRemainingWeights(): Float32Array {
return remainingWeights
}
return {
extractWeights,
getRemainingWeights
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { FCParams } from './types';
export function fullyConnectedLayer(
x: tf.Tensor2D,
params: TfjsImageRecognitionBase.FCParams
params: FCParams
): tf.Tensor2D {
return tf.tidy(() =>
tf.add(
......
export function getModelUris(uri: string | undefined, defaultModelName: string) {
const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`
if (!uri) {
return {
modelBaseUri: '',
manifestUri: defaultManifestFilename
}
}
if (uri === '/') {
return {
modelBaseUri: '/',
manifestUri: `/${defaultManifestFilename}`
}
}
const protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : '';
uri = uri.replace(protocol, '');
const parts = uri.split('/').filter(s => s)
const manifestFile = uri.endsWith('.json')
? parts[parts.length - 1]
: defaultManifestFilename
let modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/')
modelBaseUri = uri.startsWith('/') ? `/${modelBaseUri}` : modelBaseUri
return {
modelBaseUri,
manifestUri: modelBaseUri === '/' ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}`
}
}
\ No newline at end of file
export * from './convLayer'
export * from './depthwiseSeparableConv'
export * from './disposeUnusedWeightTensors'
export * from './extractConvParamsFactory'
export * from './extractFCParamsFactory'
export * from './extractSeparableConvParamsFactory'
export * from './extractWeightEntryFactory'
export * from './extractWeightsFactory'
export * from './getModelUris'
export * from './types'
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams } from './types';
export function loadConvParamsFactory(extractWeightEntry: <T>(originalPath: string, paramRank: number) => T) {
return function(prefix: string): TfjsImageRecognitionBase.ConvParams {
return function(prefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
......
import * as tf from '@tensorflow/tfjs-core';
export type ExtractWeightsFunction = (numWeights: number) => Float32Array
export type ParamMapping = {
originalPath?: string
paramPath: string
}
export type ConvParams = {
filters: tf.Tensor4D
bias: tf.Tensor1D
}
export type FCParams = {
weights: tf.Tensor2D
bias: tf.Tensor1D
}
export class SeparableConvParams {
constructor(
public depthwise_filter: tf.Tensor4D,
public pointwise_filter: tf.Tensor4D,
public bias: tf.Tensor1D
) {}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { Dimensions } from '../classes/Dimensions';
import { env } from '../env';
import { padToSquare } from '../ops/padToSquare';
import { computeReshapedDimensions, isTensor3D, isTensor4D, range } from '../utils';
import { createCanvasFromMedia } from './createCanvas';
import { imageToSquare } from './imageToSquare';
import { TResolvedNetInput } from './types';
export class NetInput {
private _imageTensors: Array<tf.Tensor3D | tf.Tensor4D> = []
private _canvases: HTMLCanvasElement[] = []
private _batchSize: number
private _treatAsBatchInput: boolean = false
private _inputDimensions: number[][] = []
private _inputSize: number
constructor(
inputs: Array<TResolvedNetInput>,
treatAsBatchInput: boolean = false
) {
if (!Array.isArray(inputs)) {
throw new Error(`NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have ${inputs}`)
}
this._treatAsBatchInput = treatAsBatchInput
this._batchSize = inputs.length
inputs.forEach((input, idx) => {
if (isTensor3D(input)) {
this._imageTensors[idx] = input
this._inputDimensions[idx] = input.shape
return
}
if (isTensor4D(input)) {
const batchSize = input.shape[0]
if (batchSize !== 1) {
throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)
}
this._imageTensors[idx] = input
this._inputDimensions[idx] = input.shape.slice(1)
return
}
const canvas = input instanceof env.getEnv().Canvas ? input : createCanvasFromMedia(input)
this._canvases[idx] = canvas
this._inputDimensions[idx] = [canvas.height, canvas.width, 3]
})
}
public get imageTensors(): Array<tf.Tensor3D | tf.Tensor4D> {
return this._imageTensors
}
public get canvases(): HTMLCanvasElement[] {
return this._canvases
}
public get isBatchInput(): boolean {
return this.batchSize > 1 || this._treatAsBatchInput
}
public get batchSize(): number {
return this._batchSize
}
public get inputDimensions(): number[][] {
return this._inputDimensions
}
public get inputSize(): number | undefined {
return this._inputSize
}
public get reshapedInputDimensions(): Dimensions[] {
return range(this.batchSize, 0, 1).map(
(_, batchIdx) => this.getReshapedInputDimensions(batchIdx)
)
}
public getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement {
return this.canvases[batchIdx] || this.imageTensors[batchIdx]
}
public getInputDimensions(batchIdx: number): number[] {
return this._inputDimensions[batchIdx]
}
public getInputHeight(batchIdx: number): number {
return this._inputDimensions[batchIdx][0]
}
public getInputWidth(batchIdx: number): number {
return this._inputDimensions[batchIdx][1]
}
public getReshapedInputDimensions(batchIdx: number): Dimensions {
if (typeof this.inputSize !== 'number') {
throw new Error('getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet')
}
const width = this.getInputWidth(batchIdx)
const height = this.getInputHeight(batchIdx)
return computeReshapedDimensions({ width, height }, this.inputSize)
}
/**
* Create a batch tensor from all input canvases and tensors
* with size [batchSize, inputSize, inputSize, 3].
*
* @param inputSize Height and width of the tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The batch tensor.
*/
public toBatchTensor(inputSize: number, isCenterInputs: boolean = true): tf.Tensor4D {
this._inputSize = inputSize
return tf.tidy(() => {
const inputTensors = range(this.batchSize, 0, 1).map(batchIdx => {
const input = this.getInput(batchIdx)
if (input instanceof tf.Tensor) {
let imgTensor = isTensor4D(input) ? input : input.expandDims<tf.Rank.R4>()
imgTensor = padToSquare(imgTensor, isCenterInputs)
if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) {
imgTensor = tf.image.resizeBilinear(imgTensor, [inputSize, inputSize])
}
return imgTensor.as3D(inputSize, inputSize, 3)
}
if (input instanceof env.getEnv().Canvas) {
return tf.browser.fromPixels(imageToSquare(input, inputSize, isCenterInputs))
}
throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`)
})
const batchTensor = tf.stack(inputTensors.map(t => t.toFloat())).as4D(this.batchSize, inputSize, inputSize, 3)
return batchTensor
})
}
}
\ No newline at end of file
import { env } from '../env';
import { isMediaLoaded } from './isMediaLoaded';
export function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) {
return new Promise((resolve, reject) => {
if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) {
return resolve()
}
function onLoad(e: Event) {
if (!e.currentTarget) return
e.currentTarget.removeEventListener('load', onLoad)
e.currentTarget.removeEventListener('error', onError)
resolve(e)
}
function onError(e: Event) {
if (!e.currentTarget) return
e.currentTarget.removeEventListener('load', onLoad)
e.currentTarget.removeEventListener('error', onError)
reject(e)
}
media.addEventListener('load', onLoad)
media.addEventListener('error', onError)
})
}
\ No newline at end of file
import { env } from '../env';
export function bufferToImage(buf: Blob): Promise<HTMLImageElement> {
return new Promise((resolve, reject) => {
if (!(buf instanceof Blob)) {
return reject('bufferToImage - expected buf to be of type: Blob')
}
const reader = new FileReader()
reader.onload = () => {
if (typeof reader.result !== 'string') {
return reject('bufferToImage - expected reader.result to be a string, in onload')
}
const img = env.getEnv().createImageElement()
img.onload = () => resolve(img)
img.onerror = reject
img.src = reader.result
}
reader.onerror = reject
reader.readAsDataURL(buf)
})
}
\ No newline at end of file
import { IDimensions } from '../classes/Dimensions';
import { env } from '../env';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { getMediaDimensions } from './getMediaDimensions';
import { isMediaLoaded } from './isMediaLoaded';
export function createCanvas({ width, height }: IDimensions): HTMLCanvasElement {
const { createCanvasElement } = env.getEnv()
const canvas = createCanvasElement()
canvas.width = width
canvas.height = height
return canvas
}
export function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement {
const { ImageData } = env.getEnv()
if (!(media instanceof ImageData) && !isMediaLoaded(media)) {
throw new Error('createCanvasFromMedia - media has not finished loading yet')
}
const { width, height } = dims || getMediaDimensions(media)
const canvas = createCanvas({ width, height })
if (media instanceof ImageData) {
getContext2dOrThrow(canvas).putImageData(media, 0, 0)
} else {
getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height)
}
return canvas
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { isTensor4D, Rect, isTensor3D } from 'tfjs-image-recognition-base';
import { Rect } from '../classes';
import { FaceDetection } from '../classes/FaceDetection';
import { isTensor3D, isTensor4D } from '../utils';
/**
* Extracts the tensors of the image regions containing the detected faces.
......
import {
createCanvas,
env,
getContext2dOrThrow,
imageTensorToCanvas,
Rect,
TNetInput,
toNetInput,
} from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { Rect } from '../classes/Rect';
import { env } from '../env';
import { createCanvas } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { imageTensorToCanvas } from './imageTensorToCanvas';
import { toNetInput } from './toNetInput';
import { TNetInput } from './types';
/**
* Extracts the image regions containing the detected faces.
......
import { bufferToImage } from './bufferToImage';
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchImage(uri: string): Promise<HTMLImageElement> {
const res = await fetchOrThrow(uri)
const blob = await (res).blob()
if (!blob.type.startsWith('image/')) {
throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`)
}
return bufferToImage(blob)
}
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchJson<T>(uri: string): Promise<T> {
return (await fetchOrThrow(uri)).json()
}
import { fetchOrThrow } from './fetchOrThrow';
export async function fetchNetWeights(uri: string): Promise<Float32Array> {
return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer())
}
import { env } from '../env';
export async function fetchOrThrow(
url: string,
init?: RequestInit
): Promise<Response> {
const fetch = env.getEnv().fetch
const res = await fetch(url, init)
if (!(res.status < 400)) {
throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`)
}
return res
}
\ No newline at end of file
import { env } from '../env';
import { resolveInput } from './resolveInput';
export function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D {
const { Canvas, CanvasRenderingContext2D } = env.getEnv()
if (canvasArg instanceof CanvasRenderingContext2D) {
return canvasArg
}
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof Canvas)) {
throw new Error('resolveContext2d - expected canvas to be of instance of Canvas')
}
const ctx = canvas.getContext('2d')
if (!ctx) {
throw new Error('resolveContext2d - canvas 2d context is null')
}
return ctx
}
\ No newline at end of file
import { Dimensions, IDimensions } from '../classes/Dimensions';
import { env } from '../env';
export function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions {
const { Image, Video } = env.getEnv()
if (input instanceof Image) {
return new Dimensions(input.naturalWidth, input.naturalHeight)
}
if (input instanceof Video) {
return new Dimensions(input.videoWidth, input.videoHeight)
}
return new Dimensions(input.width, input.height)
}
import * as tf from '@tensorflow/tfjs-core';
import { env } from '../env';
import { isTensor4D } from '../utils';
export async function imageTensorToCanvas(
imgTensor: tf.Tensor,
canvas?: HTMLCanvasElement
): Promise<HTMLCanvasElement> {
const targetCanvas = canvas || env.getEnv().createCanvasElement()
const [height, width, numChannels] = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0)
const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt())
await tf.browser.toPixels(imgTensor3D, targetCanvas)
imgTensor3D.dispose()
return targetCanvas
}
\ No newline at end of file
import { env } from '../env';
import { createCanvas, createCanvasFromMedia } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { getMediaDimensions } from './getMediaDimensions';
export function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage: boolean = false) {
const { Image, Canvas } = env.getEnv()
if (!(input instanceof Image || input instanceof Canvas)) {
throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement')
}
const dims = getMediaDimensions(input)
const scale = inputSize / Math.max(dims.height, dims.width)
const width = scale * dims.width
const height = scale * dims.height
const targetCanvas = createCanvas({ width: inputSize, height: inputSize })
const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input)
const offset = Math.abs(width - height) / 2
const dx = centerImage && width < height ? offset : 0
const dy = centerImage && height < width ? offset : 0
getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height)
return targetCanvas
}
\ No newline at end of file
export * from './awaitMediaLoaded'
export * from './bufferToImage'
export * from './createCanvas'
export * from './extractFaces'
export * from './extractFaceTensors'
export * from './fetchImage'
export * from './fetchJson'
export * from './fetchNetWeights'
export * from './fetchOrThrow'
export * from './getContext2dOrThrow'
export * from './getMediaDimensions'
export * from './imageTensorToCanvas'
export * from './imageToSquare'
export * from './isMediaElement'
export * from './isMediaLoaded'
export * from './loadWeightMap'
export * from './matchDimensions'
export * from './NetInput'
export * from './resolveInput'
export * from './toNetInput'
export * from './types'
\ No newline at end of file
import { env } from '../env';
export function isMediaElement(input: any) {
const { Image, Canvas, Video } = env.getEnv()
return input instanceof Image
|| input instanceof Canvas
|| input instanceof Video
}
\ No newline at end of file
import { env } from '../env';
export function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement) : boolean {
const { Image, Video } = env.getEnv()
return (media instanceof Image && media.complete)
|| (media instanceof Video && media.readyState >= 3)
}
import * as tf from '@tensorflow/tfjs-core';
import { getModelUris } from '../common/getModelUris';
import { fetchJson } from './fetchJson';
export async function loadWeightMap(
uri: string | undefined,
defaultModelName: string,
): Promise<tf.NamedTensorMap> {
const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName)
const manifest = await fetchJson<tf.io.WeightsManifestConfig>(manifestUri)
return tf.io.loadWeights(manifest, modelBaseUri)
}
\ No newline at end of file
import { IDimensions } from '../classes';
import { getMediaDimensions } from './getMediaDimensions';
export function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions: boolean = false) {
const { width, height } = useMediaDimensions
? getMediaDimensions(reference)
: reference
input.width = width
input.height = height
return { width, height }
}
\ No newline at end of file
import { env } from '../env';
export function resolveInput(arg: string | any) {
if (!env.isNodejs() && typeof arg === 'string') {
return document.getElementById(arg)
}
return arg
}
\ No newline at end of file
import { isTensor3D, isTensor4D } from '../utils';
import { awaitMediaLoaded } from './awaitMediaLoaded';
import { isMediaElement } from './isMediaElement';
import { NetInput } from './NetInput';
import { resolveInput } from './resolveInput';
import { TNetInput } from './types';
/**
* Validates the input to make sure, they are valid net inputs and awaits all media elements
* to be finished loading.
*
* @param input The input, which can be a media element or an array of different media elements.
* @returns A NetInput instance, which can be passed into one of the neural networks.
*/
export async function toNetInput(inputs: TNetInput): Promise<NetInput> {
if (inputs instanceof NetInput) {
return inputs
}
let inputArgArray = Array.isArray(inputs)
? inputs
: [inputs]
if (!inputArgArray.length) {
throw new Error('toNetInput - empty array passed as input')
}
const getIdxHint = (idx: number) => Array.isArray(inputs) ? ` at input index ${idx}:` : ''
const inputArray = inputArgArray.map(resolveInput)
inputArray.forEach((input, i) => {
if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {
if (typeof inputArgArray[i] === 'string') {
throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`)
}
throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`)
}
if (isTensor4D(input)) {
// if tf.Tensor4D is passed in the input array, the batch size has to be 1
const batchSize = input.shape[0]
if (batchSize !== 1) {
throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`)
}
}
})
// wait for all media elements being loaded
await Promise.all(
inputArray.map(input => isMediaElement(input) && awaitMediaLoaded(input))
)
return new NetInput(inputArray, Array.isArray(inputs))
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from './NetInput';
export type TMediaElement = HTMLImageElement | HTMLVideoElement | HTMLCanvasElement
export type TResolvedNetInput = TMediaElement | tf.Tensor3D | tf.Tensor4D
export type TNetInputArg = string | TResolvedNetInput
export type TNetInput = TNetInputArg | Array<TNetInputArg> | NetInput | tf.Tensor4D
\ No newline at end of file
import { Box, IBoundingBox, IRect } from '../classes';
import { getContext2dOrThrow } from '../dom/getContext2dOrThrow';
import { AnchorPosition, DrawTextField, DrawTextFieldOptions, IDrawTextFieldOptions } from './DrawTextField';
export interface IDrawBoxOptions {
boxColor?: string
lineWidth?: number
drawLabelOptions?: IDrawTextFieldOptions
label?: string
}
export class DrawBoxOptions {
public boxColor: string
public lineWidth: number
public drawLabelOptions: DrawTextFieldOptions
public label?: string
constructor(options: IDrawBoxOptions = {}) {
const { boxColor, lineWidth, label, drawLabelOptions } = options
this.boxColor = boxColor || 'rgba(0, 0, 255, 1)'
this.lineWidth = lineWidth || 2
this.label = label
const defaultDrawLabelOptions = {
anchorPosition: AnchorPosition.BOTTOM_LEFT,
backgroundColor: this.boxColor
}
this.drawLabelOptions = new DrawTextFieldOptions(Object.assign({}, defaultDrawLabelOptions, drawLabelOptions))
}
}
export class DrawBox {
public box: Box
public options: DrawBoxOptions
constructor(
box: IBoundingBox | IRect,
options: IDrawBoxOptions = {}
) {
this.box = new Box(box)
this.options = new DrawBoxOptions(options)
}
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {
const ctx = getContext2dOrThrow(canvasArg)
const { boxColor, lineWidth } = this.options
const { x, y, width, height } = this.box
ctx.strokeStyle = boxColor
ctx.lineWidth = lineWidth
ctx.strokeRect(x, y, width, height)
const { label } = this.options
if (label) {
new DrawTextField([label], { x: x - (lineWidth / 2), y }, this.options.drawLabelOptions).draw(canvasArg)
}
}
}
\ No newline at end of file
import { getContext2dOrThrow, IPoint } from 'tfjs-image-recognition-base';
import { IPoint } from '../classes';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { getContext2dOrThrow } from '../dom/getContext2dOrThrow';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { drawContour } from './drawContour';
......
import { IDimensions, IPoint } from '../classes';
import { getContext2dOrThrow } from '../dom/getContext2dOrThrow';
import { resolveInput } from '../dom/resolveInput';
export enum AnchorPosition {
TOP_LEFT = 'TOP_LEFT',
TOP_RIGHT = 'TOP_RIGHT',
BOTTOM_LEFT = 'BOTTOM_LEFT',
BOTTOM_RIGHT = 'BOTTOM_RIGHT'
}
export interface IDrawTextFieldOptions {
anchorPosition?: AnchorPosition
backgroundColor?: string
fontColor?: string
fontSize?: number
fontStyle?: string
padding?: number
}
export class DrawTextFieldOptions implements IDrawTextFieldOptions {
public anchorPosition: AnchorPosition
public backgroundColor: string
public fontColor: string
public fontSize: number
public fontStyle: string
public padding: number
constructor(options: IDrawTextFieldOptions = {}) {
const { anchorPosition, backgroundColor, fontColor, fontSize, fontStyle, padding } = options
this.anchorPosition = anchorPosition || AnchorPosition.TOP_LEFT
this.backgroundColor = backgroundColor || 'rgba(0, 0, 0, 0.5)'
this.fontColor = fontColor || 'rgba(255, 255, 255, 1)'
this.fontSize = fontSize || 14
this.fontStyle = fontStyle || 'Georgia'
this.padding = padding || 4
}
}
export class DrawTextField {
public text: string[]
public anchor : IPoint
public options: DrawTextFieldOptions
constructor(
text: string | string[] | DrawTextField,
anchor: IPoint,
options: IDrawTextFieldOptions = {}
) {
this.text = typeof text === 'string'
? [text]
: (text instanceof DrawTextField ? text.text : text)
this.anchor = anchor
this.options = new DrawTextFieldOptions(options)
}
measureWidth(ctx: CanvasRenderingContext2D): number {
const { padding } = this.options
return this.text.map(l => ctx.measureText(l).width).reduce((w0, w1) => w0 < w1 ? w1 : w0, 0) + (2 * padding)
}
measureHeight(): number {
const { fontSize, padding } = this.options
return this.text.length * fontSize + (2 * padding)
}
getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint {
const { anchorPosition } = this.options
const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT
const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT
const textFieldWidth = this.measureWidth(ctx)
const textFieldHeight = this.measureHeight()
const x = (isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x)
const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y
// adjust anchor if text box exceeds canvas borders
if (canvasDims) {
const { width, height } = canvasDims
const newX = Math.max(Math.min(x, width - textFieldWidth), 0)
const newY = Math.max(Math.min(y, height - textFieldHeight), 0)
return { x: newX, y: newY }
}
return { x, y }
}
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {
const canvas = resolveInput(canvasArg)
const ctx = getContext2dOrThrow(canvas)
const { backgroundColor, fontColor, fontSize, fontStyle, padding } = this.options
ctx.font = `${fontSize}px ${fontStyle}`
const maxTextWidth = this.measureWidth(ctx)
const textHeight = this.measureHeight()
ctx.fillStyle = backgroundColor
const upperLeft = this.getUpperLeft(ctx, canvas)
ctx.fillRect(upperLeft.x, upperLeft.y, maxTextWidth, textHeight)
ctx.fillStyle = fontColor;
this.text.forEach((textLine, i) => {
const x = padding + upperLeft.x
const y = padding + upperLeft.y + ((i + 1) * fontSize)
ctx.fillText(textLine, x, y)
})
}
}
\ No newline at end of file
import { Point } from 'tfjs-image-recognition-base';
import { Point } from '../classes';
export function drawContour(
ctx: CanvasRenderingContext2D,
......
import { Box, draw, IBoundingBox, IRect, round } from 'tfjs-image-recognition-base';
import { Box, IBoundingBox, IRect } from '../classes';
import { FaceDetection } from '../classes/FaceDetection';
import { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
import { round } from '../utils';
import { DrawBox } from './DrawBox';
export type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>
......@@ -21,6 +22,6 @@ export function drawDetections(
: (isWithFaceDetection(det) ? det.detection.box : new Box(det))
const label = score ? `${round(score)}` : undefined
new draw.DrawBox(box, { label }).draw(canvasArg)
new DrawBox(box, { label }).draw(canvasArg)
})
}
\ No newline at end of file
import { draw, IPoint, Point, round } from 'tfjs-image-recognition-base';
import { IPoint, Point } from '../classes';
import { FaceExpressions } from '../faceExpressionNet';
import { isWithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
import { round } from '../utils';
import { DrawTextField } from './DrawTextField';
export type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>
......@@ -29,7 +30,7 @@ export function drawFaceExpressions(
? e.detection.box.bottomLeft
: (textFieldAnchor || new Point(0, 0))
const drawTextField = new draw.DrawTextField(
const drawTextField = new DrawTextField(
resultsToDisplay.map(expr => `${expr.expression} (${round(expr.probability)})`),
anchor
)
......
export * from './drawContour'
export * from './drawDetections'
export * from './drawFaceExpressions'
export * from './DrawBox'
export * from './DrawFaceLandmarks'
export * from './DrawTextField'
\ No newline at end of file
import { Environment } from './types';
export function createBrowserEnv(): Environment {
const fetch = window['fetch'] || function() {
throw new Error('fetch - missing fetch implementation for browser environment')
}
const readFile = function() {
throw new Error('readFile - filesystem not available for browser environment')
}
return {
Canvas: HTMLCanvasElement,
CanvasRenderingContext2D: CanvasRenderingContext2D,
Image: HTMLImageElement,
ImageData: ImageData,
Video: HTMLVideoElement,
createCanvasElement: () => document.createElement('canvas'),
createImageElement: () => document.createElement('img'),
fetch,
readFile
}
}
\ No newline at end of file
import { FileSystem } from './types';
export function createFileSystem(fs?: any): FileSystem {
let requireFsError = ''
if (!fs) {
try {
fs = require('fs')
} catch (err) {
requireFsError = err.toString()
}
}
const readFile = fs
? function(filePath: string) {
return new Promise<Buffer>((res, rej) => {
fs.readFile(filePath, function(err: any, buffer: Buffer) {
return err ? rej(err) : res(buffer)
})
})
}
: function() {
throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`)
}
return {
readFile
}
}
\ No newline at end of file
import { createFileSystem } from './createFileSystem';
import { Environment } from './types';
export function createNodejsEnv(): Environment {
const Canvas = global['Canvas'] || global['HTMLCanvasElement']
const Image = global['Image'] || global['HTMLImageElement']
const createCanvasElement = function() {
if (Canvas) {
return new Canvas()
}
throw new Error('createCanvasElement - missing Canvas implementation for nodejs environment')
}
const createImageElement = function() {
if (Image) {
return new Image()
}
throw new Error('createImageElement - missing Image implementation for nodejs environment')
}
const fetch = global['fetch'] || function() {
throw new Error('fetch - missing fetch implementation for nodejs environment')
}
const fileSystem = createFileSystem()
return {
Canvas: Canvas || class {},
CanvasRenderingContext2D: global['CanvasRenderingContext2D'] || class {},
Image: Image || class {},
ImageData: global['ImageData'] || class {},
Video: global['HTMLVideoElement'] || class {},
createCanvasElement,
createImageElement,
fetch,
...fileSystem
}
}
\ No newline at end of file
import { createBrowserEnv } from './createBrowserEnv';
import { createFileSystem } from './createFileSystem';
import { createNodejsEnv } from './createNodejsEnv';
import { isBrowser } from './isBrowser';
import { isNodejs } from './isNodejs';
import { Environment } from './types';
let environment: Environment | null
function getEnv(): Environment {
if (!environment) {
throw new Error('getEnv - environment is not defined, check isNodejs() and isBrowser()')
}
return environment
}
function setEnv(env: Environment) {
environment = env
}
function initialize() {
// check for isBrowser() first to prevent electron renderer process
// to be initialized with wrong environment due to isNodejs() returning true
if (isBrowser()) {
setEnv(createBrowserEnv())
}
if (isNodejs()) {
setEnv(createNodejsEnv())
}
}
function monkeyPatch(env: Partial<Environment>) {
if (!environment) {
initialize()
}
if (!environment) {
throw new Error('monkeyPatch - environment is not defined, check isNodejs() and isBrowser()')
}
const { Canvas = environment.Canvas, Image = environment.Image } = env
environment.Canvas = Canvas
environment.Image = Image
environment.createCanvasElement = env.createCanvasElement || (() => new Canvas())
environment.createImageElement = env.createImageElement || (() => new Image())
environment.ImageData = env.ImageData || environment.ImageData
environment.Video = env.Video || environment.Video
environment.fetch = env.fetch || environment.fetch
environment.readFile = env.readFile || environment.readFile
}
export const env = {
getEnv,
setEnv,
initialize,
createBrowserEnv,
createFileSystem,
createNodejsEnv,
monkeyPatch,
isBrowser,
isNodejs
}
initialize()
export * from './types'
export function isBrowser(): boolean {
return typeof window === 'object'
&& typeof document !== 'undefined'
&& typeof HTMLImageElement !== 'undefined'
&& typeof HTMLCanvasElement !== 'undefined'
&& typeof HTMLVideoElement !== 'undefined'
&& typeof ImageData !== 'undefined'
&& typeof CanvasRenderingContext2D !== 'undefined'
}
\ No newline at end of file
export function isNodejs(): boolean {
return typeof global === 'object'
&& typeof require === 'function'
&& typeof module !== 'undefined'
// issues with gatsby.js: module.exports is undefined
// && !!module.exports
&& typeof process !== 'undefined' && !!process.version
}
\ No newline at end of file
export type FileSystem = {
readFile: (filePath: string) => Promise<Buffer>
}
export type Environment = FileSystem & {
Canvas: typeof HTMLCanvasElement
CanvasRenderingContext2D: typeof CanvasRenderingContext2D
Image: typeof HTMLImageElement
ImageData: typeof ImageData
Video: typeof HTMLVideoElement
createCanvasElement: () => HTMLCanvasElement
createImageElement: () => HTMLImageElement
fetch: (url: string, init?: RequestInit) => Promise<Response>
}
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, normalize, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { denseBlock4 } from './denseBlock';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, normalize, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { denseBlock3 } from './denseBlock';
import { extractParamsFromWeigthMapTiny } from './extractParamsFromWeigthMapTiny';
import { extractParamsTiny } from './extractParamsTiny';
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams, SeparableConvParams } from '../common';
import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';
import { DenseBlock3Params, DenseBlock4Params } from './types';
......@@ -13,10 +13,10 @@ export function denseBlock3(
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as TfjsImageRecognitionBase.ConvParams).filters, [2, 2], 'same'),
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as TfjsImageRecognitionBase.SeparableConvParams, [2, 2])
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
......@@ -37,10 +37,10 @@ export function denseBlock4(
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as TfjsImageRecognitionBase.ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as TfjsImageRecognitionBase.SeparableConvParams, isScaleDown ? [2, 2] : [1, 1])
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
......
import { extractWeightsFactory, ParamMapping } from '../common';
import { extractorsFactory } from './extractorsFactory';
import { FaceFeatureExtractorParams } from './types';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
export function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
export function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const {
extractDenseBlock4Params
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { disposeUnusedWeightTensors, ParamMapping } from '../common';
import { loadParamsFactory } from './loadParamsFactory';
import { FaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: FaceFeatureExtractorParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock4Params
......@@ -21,7 +21,7 @@ export function extractParamsFromWeigthMap(
dense3: extractDenseBlock4Params('dense3')
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { disposeUnusedWeightTensors, ParamMapping } from '../common';
import { loadParamsFactory } from './loadParamsFactory';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMapTiny(
weightMap: tf.NamedTensorMap
): { params: TinyFaceFeatureExtractorParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock3Params
......@@ -20,7 +20,7 @@ export function extractParamsFromWeigthMapTiny(
dense2: extractDenseBlock3Params('dense2')
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { extractWeightsFactory, ParamMapping } from '../common';
import { extractorsFactory } from './extractorsFactory';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
export function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const {
extractDenseBlock3Params
......
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import {
extractConvParamsFactory,
extractSeparableConvParamsFactory,
ExtractWeightsFunction,
ParamMapping,
} from '../common';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = TfjsImageRecognitionBase.extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = TfjsImageRecognitionBase.extractSeparableConvParamsFactory(extractWeights, paramMappings)
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings)
function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
......
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common';
import { loadConvParamsFactory } from '../common/loadConvParamsFactory';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
export function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
const extractConvParams = loadConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = TfjsImageRecognitionBase.loadSeparableConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry)
function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
const conv0 = isFirstLayer
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, TNetInput, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { NetInput, TNetInput } from '..';
import { ConvParams, SeparableConvParams } from '../common';
import { NeuralNetwork } from '../NeuralNetwork';
export type ConvWithBatchNormParams = BatchNormParams & {
filter: tf.Tensor4D
......@@ -18,13 +21,13 @@ export type SeparableConvWithBatchNormParams = {
}
export type DenseBlock3Params = {
conv0: TfjsImageRecognitionBase.SeparableConvParams | TfjsImageRecognitionBase.ConvParams
conv1: TfjsImageRecognitionBase.SeparableConvParams
conv2: TfjsImageRecognitionBase.SeparableConvParams
conv0: SeparableConvParams | ConvParams
conv1: SeparableConvParams
conv2: SeparableConvParams
}
export type DenseBlock4Params = DenseBlock3Params & {
conv3: TfjsImageRecognitionBase.SeparableConvParams
conv3: SeparableConvParams
}
export type TinyFaceFeatureExtractorParams = {
......
import * as tf from '@tensorflow/tfjs-core';
import { IDimensions, isEven, NetInput, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { IDimensions, Point } from '../classes';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
import { isEven } from '../utils';
export abstract class FaceLandmark68NetBase<
TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork } from 'tfjs-image-recognition-base';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { NetInput } from '../dom';
import {
FaceFeatureExtractorParams,
IFaceFeatureExtractor,
TinyFaceFeatureExtractorParams,
} from '../faceFeatureExtractor/types';
import { NeuralNetwork } from '../NeuralNetwork';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { NetParams } from './types';
......
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const extractFCParams = TfjsImageRecognitionBase.extractFCParamsFactory(extractWeights, paramMappings)
const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings)
const fc = extractFCParams(channelsIn, channelsOut, 'fc')
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common';
import { NetParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractFcParams(prefix: string): TfjsImageRecognitionBase.FCParams {
function extractFcParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
......@@ -21,7 +21,7 @@ export function extractParamsFromWeigthMap(
fc: extractFcParams('fc')
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { FCParams } from '../common';
export type NetParams = {
fc: TfjsImageRecognitionBase.FCParams
fc: FCParams
}
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, normalize, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { convDown } from './convLayer';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
......
import * as tf from '@tensorflow/tfjs-core';
import { isFloat, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams, extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from '../common';
import { isFloat } from '../utils';
import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';
function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
function extractFilterValues(numFilterValues: number, numFilters: number, filterSize: number): tf.Tensor4D {
const weights = extractWeights(numFilterValues)
......@@ -26,7 +27,7 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
numFilters: number,
filterSize: number,
mappedPrefix: string
): TfjsImageRecognitionBase.ConvParams {
): ConvParams {
const filters = extractFilterValues(numFilterValues, numFilters, filterSize)
const bias = tf.tensor1d(extractWeights(numFilters))
......@@ -89,14 +90,14 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
}
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractConvLayerParams,
......
import * as tf from '@tensorflow/tfjs-core';
import { isTensor2D, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common';
import { isTensor2D } from '../utils';
import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';
function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractScaleLayerParams(prefix: string): ScaleLayerParams {
......@@ -40,9 +41,9 @@ function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBa
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractConvLayerParams,
......@@ -94,7 +95,7 @@ export function extractParamsFromWeigthMap(
fc
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams } from '../common';
export type ScaleLayerParams = {
weights: tf.Tensor1D
......@@ -11,7 +12,7 @@ export type ResidualLayerParams = {
}
export type ConvLayerParams = {
conv: TfjsImageRecognitionBase.ConvParams
conv: ConvParams
scale: ScaleLayerParams
}
......
import { isValidProbablitiy } from 'tfjs-image-recognition-base';
import { Gender } from '../ageGenderNet/types';
import { isValidProbablitiy } from '../utils';
export type WithGender<TSource> = TSource & {
gender: Gender
......
import { TNetInput } from 'tfjs-image-recognition-base';
import { TNetInput } from '../dom';
import { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { extractFaces, extractFaceTensors } from '../dom';
import { extractFaces, extractFaceTensors, TNetInput } from '../dom';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { WithFaceDetection } from '../factories/WithFaceDetection';
......
import { TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { TNetInput } from '../dom';
import { extendWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
import { TinyYolov2Options } from '../tinyYolov2';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
......@@ -38,7 +38,7 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
options instanceof SsdMobilenetv1Options
? (input: TNetInput) => nets.ssdMobilenetv1.locateFaces(input, options)
: (
options instanceof TfjsImageRecognitionBase.TinyYolov2Options
options instanceof TinyYolov2Options
? (input: TNetInput) => nets.tinyYolov2.locateFaces(input, options)
: null
)
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { AgeAndGenderPrediction } from '../ageGenderNet/types';
import { TNetInput } from '../dom';
import { extendWithAge, WithAge } from '../factories/WithAge';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { TNetInput } from '../dom';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
......
import { TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { TNetInput } from '../dom';
import { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories';
import { IMtcnnOptions, MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1';
import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2';
import { detectAllFaces } from './detectFaces';
// export allFaces API for backward compatibility
......@@ -18,9 +18,9 @@ export async function allFacesSsdMobilenetv1(
export async function allFacesTinyYolov2(
input: TNetInput,
forwardParams: TfjsImageRecognitionBase.ITinyYolov2Options = {}
forwardParams: ITinyYolov2Options = {}
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new TfjsImageRecognitionBase.TinyYolov2Options(forwardParams))
return await detectAllFaces(input, new TinyYolov2Options(forwardParams))
.withFaceLandmarks()
.withFaceDescriptors()
}
......
import { TNetInput } from 'tfjs-image-recognition-base';
import { TNetInput } from '../dom';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { DetectAllFacesTask, DetectSingleFaceTask } from './DetectFacesTasks';
import { FaceDetectionOptions } from './types';
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { extractFaces, extractFaceTensors } from '../dom';
import { extractFaces, extractFaceTensors, TNetInput } from '../dom';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
......
import { TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { AgeGenderNet } from '../ageGenderNet/AgeGenderNet';
import { AgeAndGenderPrediction } from '../ageGenderNet/types';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { TNetInput } from '../dom';
import { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';
import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
......@@ -18,7 +17,7 @@ import { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetector } from '../tinyFaceDetector/TinyFaceDetector';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
import { TinyYolov2 } from '../tinyYolov2';
import { ITinyYolov2Options, TinyYolov2 } from '../tinyYolov2';
export const nets = {
ssdMobilenetv1: new SsdMobilenetv1(),
......@@ -59,7 +58,7 @@ export const tinyFaceDetector = (input: TNetInput, options: TinyFaceDetectorOpti
* @param options (optional, default: see TinyYolov2Options constructor for default parameters).
* @returns Bounding box of each face with score.
*/
export const tinyYolov2 = (input: TNetInput, options: TfjsImageRecognitionBase.ITinyYolov2Options): Promise<FaceDetection[]> =>
export const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promise<FaceDetection[]> =>
nets.tinyYolov2.locateFaces(input, options)
/**
......
import { TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { TNetInput } from '../dom';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
import { TinyYolov2Options } from '../tinyYolov2';
export type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TfjsImageRecognitionBase.TinyYolov2Options
export type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options
export type FaceDetectionFunction = (input: TNetInput) => Promise<FaceDetection[]>
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { draw as drawBase } from 'tfjs-image-recognition-base';
import * as drawExtended from './draw';
import * as draw from './draw';
import * as utils from './utils';
export {
draw,
utils,
tf
}
export * from 'tfjs-image-recognition-base';
export * from './ageGenderNet/index';
const draw = {...drawBase, ...drawExtended }
export { draw }
export * from './classes/index';
export * from './dom/index'
export * from './env/index';
export * from './faceExpressionNet/index';
export * from './faceLandmarkNet/index';
export * from './faceRecognitionNet/index';
export * from './factories/index';
export * from './globalApi/index';
export * from './mtcnn/index';
export * from './ops/index';
export * from './ssdMobilenetv1/index';
export * from './tinyFaceDetector/index';
export * from './tinyYolov2/index';
export * from './euclideanDistance';
export * from './NeuralNetwork';
export * from './resizeResults';
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, Point, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { Point, Rect } from '../classes';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { extendWithFaceDetection, extendWithFaceLandmarks } from '../factories';
import { NeuralNetwork } from '../NeuralNetwork';
import { bgrToRgbTensor } from './bgrToRgbTensor';
import { CELL_SIZE } from './config';
import { extractParams } from './extractParams';
......
import { Box } from 'tfjs-image-recognition-base';
import { Box } from '../classes';
export class MtcnnBox extends Box<MtcnnBox> {
constructor(left: number, top: number, right: number, bottom: number) {
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { convLayer } from '../common';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { prelu } from './prelu';
import { sharedLayer } from './sharedLayers';
......@@ -11,7 +11,7 @@ export function ONet(x: tf.Tensor4D, params: ONetParams): { scores: tf.Tensor1D,
let out = sharedLayer(x, params)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = TfjsImageRecognitionBase.convLayer(out, params.conv4, 'valid')
out = convLayer(out, params.conv4, 'valid')
out = prelu<tf.Tensor4D>(out, params.prelu4_alpha)
const vectorized = tf.reshape(out, [out.shape[0], params.fc1.weights.shape[0]]) as tf.Tensor2D
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { convLayer } from '../common';
import { sharedLayer } from './sharedLayers';
import { PNetParams } from './types';
......@@ -8,10 +8,10 @@ export function PNet(x: tf.Tensor4D, params: PNetParams): { prob: tf.Tensor4D, r
return tf.tidy(() => {
let out = sharedLayer(x, params, true)
const conv = TfjsImageRecognitionBase.convLayer(out, params.conv4_1, 'valid')
const conv = convLayer(out, params.conv4_1, 'valid')
const max = tf.expandDims(tf.max(conv, 3), 3)
const prob = tf.softmax(tf.sub(conv, max), 3) as tf.Tensor4D
const regions = TfjsImageRecognitionBase.convLayer(out, params.conv4_2, 'valid')
const regions = convLayer(out, params.conv4_2, 'valid')
return { prob, regions }
})
......
import * as tf from '@tensorflow/tfjs-core';
import {
Box,
createCanvas,
createCanvasFromMedia,
env,
getContext2dOrThrow,
IDimensions,
} from 'tfjs-image-recognition-base';
import { Box, IDimensions } from '../classes';
import { createCanvas, createCanvasFromMedia, getContext2dOrThrow } from '../dom';
import { env } from '../env';
import { normalize } from './normalize';
export async function extractImagePatches(
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import {
extractConvParamsFactory,
extractFCParamsFactory,
extractWeightsFactory,
ExtractWeightsFunction,
ParamMapping,
} from '../common';
import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types';
function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = TfjsImageRecognitionBase.extractConvParamsFactory(extractWeights, paramMappings)
const extractFCParams = TfjsImageRecognitionBase.extractFCParamsFactory(extractWeights, paramMappings)
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings)
function extractPReluParams(size: number, paramPath: string): tf.Tensor1D {
const alpha = tf.tensor1d(extractWeights(size))
......@@ -68,14 +74,14 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
}
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractPNetParams,
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams, disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common';
import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types';
function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractConvParams(prefix: string): TfjsImageRecognitionBase.ConvParams {
function extractConvParams(prefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/weights`, 4, `${prefix}/filters`)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
function extractFCParams(prefix: string): TfjsImageRecognitionBase.FCParams {
function extractFCParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
......@@ -81,9 +81,9 @@ function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBa
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractPNetParams,
......@@ -95,7 +95,7 @@ export function extractParamsFromWeigthMap(
const rnet = extractRNetParams()
const onet = extractONetParams()
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params: { pnet, rnet, onet }, paramMappings }
}
\ No newline at end of file
import { CELL_SIZE } from './config';
import { CELL_SIZE } from './config'
export function pyramidDown(minFaceSize: number, scaleFactor: number, dims: number[]): number[] {
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { convLayer } from '../common';
import { prelu } from './prelu';
import { SharedParams } from './types';
export function sharedLayer(x: tf.Tensor4D, params: SharedParams, isPnet: boolean = false) {
return tf.tidy(() => {
let out = TfjsImageRecognitionBase.convLayer(x, params.conv1, 'valid')
let out = convLayer(x, params.conv1, 'valid')
out = prelu<tf.Tensor4D>(out, params.prelu1_alpha)
out = tf.maxPool(out, isPnet ? [2, 2]: [3, 3], [2, 2], 'same')
out = TfjsImageRecognitionBase.convLayer(out, params.conv2, 'valid')
out = convLayer(out, params.conv2, 'valid')
out = prelu<tf.Tensor4D>(out, params.prelu2_alpha)
out = isPnet ? out : tf.maxPool(out, [3, 3], [2, 2], 'valid')
out = TfjsImageRecognitionBase.convLayer(out, params.conv3, 'valid')
out = convLayer(out, params.conv3, 'valid')
out = prelu<tf.Tensor4D>(out, params.prelu3_alpha)
return out
......
import * as tf from '@tensorflow/tfjs-core';
import { BoundingBox, nonMaxSuppression, Point } from 'tfjs-image-recognition-base';
import { BoundingBox, Point } from '../classes';
import { nonMaxSuppression } from '../ops';
import { CELL_SIZE, CELL_STRIDE } from './config';
import { getSizesForScale } from './getSizesForScale';
import { MtcnnBox } from './MtcnnBox';
......
import * as tf from '@tensorflow/tfjs-core';
import { Box, nonMaxSuppression } from 'tfjs-image-recognition-base';
import { Box } from '../classes';
import { nonMaxSuppression } from '../ops';
import { extractImagePatches } from './extractImagePatches';
import { MtcnnBox } from './MtcnnBox';
import { RNet } from './RNet';
......
import * as tf from '@tensorflow/tfjs-core';
import { BoundingBox, Box, nonMaxSuppression, Point } from 'tfjs-image-recognition-base';
import { BoundingBox, Box, Point } from '../classes';
import { nonMaxSuppression } from '../ops';
import { extractImagePatches } from './extractImagePatches';
import { MtcnnBox } from './MtcnnBox';
import { ONet } from './ONet';
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { ConvParams, FCParams } from '../common';
import { WithFaceDetection, WithFaceLandmarks } from '../factories';
export type SharedParams = {
conv1: TfjsImageRecognitionBase.ConvParams
conv1: ConvParams
prelu1_alpha: tf.Tensor1D
conv2: TfjsImageRecognitionBase.ConvParams
conv2: ConvParams
prelu2_alpha: tf.Tensor1D
conv3: TfjsImageRecognitionBase.ConvParams
conv3: ConvParams
prelu3_alpha: tf.Tensor1D
}
export type PNetParams = SharedParams & {
conv4_1: TfjsImageRecognitionBase.ConvParams
conv4_2: TfjsImageRecognitionBase.ConvParams
conv4_1: ConvParams
conv4_2: ConvParams
}
export type RNetParams = SharedParams & {
fc1: TfjsImageRecognitionBase.FCParams
fc1: FCParams
prelu4_alpha: tf.Tensor1D
fc2_1: TfjsImageRecognitionBase.FCParams
fc2_2: TfjsImageRecognitionBase.FCParams
fc2_1: FCParams
fc2_2: FCParams
}
export type ONetParams = SharedParams & {
conv4: TfjsImageRecognitionBase.ConvParams
conv4: ConvParams
prelu4_alpha: tf.Tensor1D
fc1: TfjsImageRecognitionBase.FCParams
fc1: FCParams
prelu5_alpha: tf.Tensor1D
fc2_1: TfjsImageRecognitionBase.FCParams
fc2_2: TfjsImageRecognitionBase.FCParams
fc2_3: TfjsImageRecognitionBase.FCParams
fc2_1: FCParams
fc2_2: FCParams
fc2_3: FCParams
}
export type NetParams = {
......
export * from './iou'
export * from './minBbox'
export * from './nonMaxSuppression'
export * from './normalize'
export * from './padToSquare'
export * from './shuffleArray'
export function sigmoid(x: number) {
return 1 / (1 + Math.exp(-x))
}
export function inverseSigmoid(x: number) {
return Math.log(x / (1 - x))
}
\ No newline at end of file
import { Box } from '../classes/Box';
export function iou(box1: Box, box2: Box, isIOU: boolean = true) {
const width = Math.max(0.0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left))
const height = Math.max(0.0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top))
const interSection = width * height
return isIOU
? interSection / (box1.area + box2.area - interSection)
: interSection / Math.min(box1.area, box2.area)
}
\ No newline at end of file
import { BoundingBox, IPoint } from 'tfjs-image-recognition-base';
import { BoundingBox, IPoint } from '../classes';
export function minBbox(pts: IPoint[]): BoundingBox {
const xs = pts.map(pt => pt.x)
......
import { Box } from '../classes/Box';
import { iou } from './iou';
export function nonMaxSuppression(
boxes: Box[],
scores: number[],
iouThreshold: number,
isIOU: boolean = true
): number[] {
let indicesSortedByScore = scores
.map((score, boxIndex) => ({ score, boxIndex }))
.sort((c1, c2) => c1.score - c2.score)
.map(c => c.boxIndex)
const pick: number[] = []
while(indicesSortedByScore.length > 0) {
const curr = indicesSortedByScore.pop() as number
pick.push(curr)
const indices = indicesSortedByScore
const outputs: number[] = []
for (let i = 0; i < indices.length; i++) {
const idx = indices[i]
const currBox = boxes[curr]
const idxBox = boxes[idx]
outputs.push(iou(currBox, idxBox, isIOU))
}
indicesSortedByScore = indicesSortedByScore.filter(
(_, j) => outputs[j] <= iouThreshold
)
}
return pick
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
export function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D {
return tf.tidy(() => {
const [r, g, b] = meanRgb
const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r)
const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g)
const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b)
const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3)
return tf.sub(x, avg_rgb)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
/**
* Pads the smaller dimension of an image tensor with zeros, such that width === height.
*
* @param imgTensor The image tensor.
* @param isCenterImage (optional, default: false) If true, add an equal amount of padding on
* both sides of the minor dimension oof the image.
* @returns The padded tensor with width === height.
*/
export function padToSquare(
imgTensor: tf.Tensor4D,
isCenterImage: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const [height, width] = imgTensor.shape.slice(1)
if (height === width) {
return imgTensor
}
const dimDiff = Math.abs(height - width)
const paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1))
const paddingAxis = height > width ? 2 : 1
const createPaddingTensor = (paddingAmount: number): tf.Tensor => {
const paddingTensorShape = imgTensor.shape.slice()
paddingTensorShape[paddingAxis] = paddingAmount
return tf.fill(paddingTensorShape, 0)
}
const paddingTensorAppend = createPaddingTensor(paddingAmount)
const remainingPaddingAmount = dimDiff - (paddingTensorAppend.shape[paddingAxis] as number)
const paddingTensorPrepend = isCenterImage && remainingPaddingAmount
? createPaddingTensor(remainingPaddingAmount)
: null
const tensorsToStack = [
paddingTensorPrepend,
imgTensor,
paddingTensorAppend
]
.filter(t => !!t)
.map((t: tf.Tensor) => t.toFloat()) as tf.Tensor4D[]
return tf.concat(tensorsToStack, paddingAxis)
})
}
\ No newline at end of file
export function shuffleArray(inputArray: any[]) {
const array = inputArray.slice()
for (let i = array.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * (i + 1))
const x = array[i]
array[i] = array[j]
array[j] = x
}
return array
}
\ No newline at end of file
import { Dimensions, IDimensions } from 'tfjs-image-recognition-base';
import { Dimensions, IDimensions } from './classes';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks } from './classes/FaceLandmarks';
import { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection';
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { Rect } from '../classes';
import { FaceDetection } from '../classes/FaceDetection';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { mobileNetV1 } from './mobileNetV1';
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { convLayer } from '../common';
import { BoxPredictionParams } from './types';
......@@ -13,11 +13,11 @@ export function boxPredictionLayer(
const batchSize = x.shape[0]
const boxPredictionEncoding = tf.reshape(
TfjsImageRecognitionBase.convLayer(x, params.box_encoding_predictor),
convLayer(x, params.box_encoding_predictor),
[batchSize, -1, 1, 4]
)
const classPrediction = tf.reshape(
TfjsImageRecognitionBase.convLayer(x, params.class_predictor),
convLayer(x, params.class_predictor),
[batchSize, -1, 3]
)
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ExtractWeightsFunction, ParamMapping, ConvParams, extractWeightsFactory } from '../common';
import { MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';
function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
function extractDepthwiseConvParams(numChannels: number, mappedPrefix: string): MobileNetV1.DepthwiseConvParams {
......@@ -36,7 +36,7 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
filterSize: number,
mappedPrefix: string,
isPointwiseConv?: boolean
): TfjsImageRecognitionBase.ConvParams {
): ConvParams {
const filters = tf.tensor4d(
extractWeights(channelsIn * channelsOut * filterSize * filterSize),
......@@ -191,14 +191,14 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
}
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const {
extractMobilenetV1Params,
......
import * as tf from '@tensorflow/tfjs-core';
import { isTensor3D, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams, disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common';
import { isTensor3D } from '../utils';
import { BoxPredictionParams, MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';
function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractPointwiseConvParams(prefix: string, idx: number, mappedPrefix: string): PointwiseConvParams {
......@@ -59,7 +60,7 @@ function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBa
}
}
function extractConvParams(prefix: string, mappedPrefix: string): TfjsImageRecognitionBase.ConvParams {
function extractConvParams(prefix: string, mappedPrefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/weights`, 4, `${mappedPrefix}/filters`)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/biases`, 1, `${mappedPrefix}/bias`)
......@@ -107,9 +108,9 @@ function extractorsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBa
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractMobilenetV1Params,
......@@ -131,7 +132,7 @@ export function extractParamsFromWeigthMap(
}
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ConvParams } from '../common';
export type PointwiseConvParams = {
filters: tf.Tensor4D
......@@ -41,8 +42,8 @@ export namespace MobileNetV1 {
}
export type BoxPredictionParams = {
box_encoding_predictor: TfjsImageRecognitionBase.ConvParams
class_predictor: TfjsImageRecognitionBase.ConvParams
box_encoding_predictor: ConvParams
class_predictor: ConvParams
}
export type PredictionLayerParams = {
......
import * as tf from '@tensorflow/tfjs-core';
import { Point, TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes';
import { FaceDetection, Point } from '../classes';
import { ParamMapping } from '../common';
import { TNetInput } from '../dom';
import { ITinyYolov2Options } from '../tinyYolov2';
import { TinyYolov2Base } from '../tinyYolov2/TinyYolov2Base';
import { TinyYolov2NetParams } from '../tinyYolov2/types';
import { BOX_ANCHORS, IOU_THRESHOLD, MEAN_RGB } from './const';
export class TinyFaceDetector extends TfjsImageRecognitionBase.TinyYolov2 {
export class TinyFaceDetector extends TinyYolov2Base {
constructor() {
const config = {
......@@ -24,7 +28,7 @@ export class TinyFaceDetector extends TfjsImageRecognitionBase.TinyYolov2 {
return this.config.anchors
}
public async locateFaces(input: TNetInput, forwardParams: TfjsImageRecognitionBase.ITinyYolov2Options): Promise<FaceDetection[]> {
public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise<FaceDetection[]> {
const objectDetections = await this.detect(input, forwardParams)
return objectDetections.map(det => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }))
}
......@@ -33,7 +37,7 @@ export class TinyFaceDetector extends TfjsImageRecognitionBase.TinyYolov2 {
return 'tiny_face_detector_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TfjsImageRecognitionBase.TinyYolov2NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {
return super.extractParamsFromWeigthMap(weightMap)
}
}
\ No newline at end of file
import { TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2';
export interface ITinyFaceDetectorOptions extends TfjsImageRecognitionBase.ITinyYolov2Options {}
export interface ITinyFaceDetectorOptions extends ITinyYolov2Options {}
export class TinyFaceDetectorOptions extends TfjsImageRecognitionBase.TinyYolov2Options {
export class TinyFaceDetectorOptions extends TinyYolov2Options {
protected _name: string = 'TinyFaceDetectorOptions'
}
\ No newline at end of file
import { Point } from 'tfjs-image-recognition-base';
import { Point } from '../classes'
export const IOU_THRESHOLD = 0.4
......
import * as tf from '@tensorflow/tfjs-core';
import { Point, TfjsImageRecognitionBase, TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes';
import { FaceDetection, Point } from '../classes';
import { ParamMapping } from '../common/types';
import { TNetInput } from '../dom/types';
import {
BOX_ANCHORS,
BOX_ANCHORS_SEPARABLE,
......@@ -10,8 +11,11 @@ import {
IOU_THRESHOLD,
MEAN_RGB_SEPARABLE,
} from './const';
import { TinyYolov2Base } from './TinyYolov2Base';
import { ITinyYolov2Options } from './TinyYolov2Options';
import { TinyYolov2NetParams } from './types';
export class TinyYolov2 extends TfjsImageRecognitionBase.TinyYolov2 {
export class TinyYolov2 extends TinyYolov2Base {
constructor(withSeparableConvs: boolean = true) {
const config = Object.assign({}, {
......@@ -41,7 +45,7 @@ export class TinyYolov2 extends TfjsImageRecognitionBase.TinyYolov2 {
return this.config.anchors
}
public async locateFaces(input: TNetInput, forwardParams: TfjsImageRecognitionBase.ITinyYolov2Options): Promise<FaceDetection[]> {
public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise<FaceDetection[]> {
const objectDetections = await this.detect(input, forwardParams)
return objectDetections.map(det => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }))
}
......@@ -50,7 +54,7 @@ export class TinyYolov2 extends TfjsImageRecognitionBase.TinyYolov2 {
return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TfjsImageRecognitionBase.TinyYolov2NetParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {
return super.extractParamsFromWeigthMap(weightMap)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { BoundingBox } from '../classes/BoundingBox';
import { Dimensions } from '../classes/Dimensions';
import { ObjectDetection } from '../classes/ObjectDetection';
import { convLayer } from '../common';
import { ConvParams, SeparableConvParams } from '../common/types';
import { toNetInput } from '../dom';
import { NetInput } from '../dom/NetInput';
import { TNetInput } from '../dom/types';
import { NeuralNetwork } from '../NeuralNetwork';
import { sigmoid } from '../ops';
import { nonMaxSuppression } from '../ops/nonMaxSuppression';
import { normalize } from '../ops/normalize';
import { TinyYolov2Config, validateConfig } from './config';
import { convWithBatchNorm } from './convWithBatchNorm';
import { depthwiseSeparableConv } from './depthwiseSeparableConv';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { leaky } from './leaky';
import { ITinyYolov2Options, TinyYolov2Options } from './TinyYolov2Options';
import { DefaultTinyYolov2NetParams, MobilenetParams, TinyYolov2NetParams } from './types';
export class TinyYolov2Base extends NeuralNetwork<TinyYolov2NetParams> {
public static DEFAULT_FILTER_SIZES = [
3, 16, 32, 64, 128, 256, 512, 1024, 1024
]
private _config: TinyYolov2Config
constructor(config: TinyYolov2Config) {
super('TinyYolov2')
validateConfig(config)
this._config = config
}
public get config(): TinyYolov2Config {
return this._config
}
public get withClassScores(): boolean {
return this.config.withClassScores || this.config.classes.length > 1
}
public get boxEncodingSize(): number {
return 5 + (this.withClassScores ? this.config.classes.length : 0)
}
public runTinyYolov2(x: tf.Tensor4D, params: DefaultTinyYolov2NetParams): tf.Tensor4D {
let out = convWithBatchNorm(x, params.conv0)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv1)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv2)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv3)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv4)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv5)
out = tf.maxPool(out, [2, 2], [1, 1], 'same')
out = convWithBatchNorm(out, params.conv6)
out = convWithBatchNorm(out, params.conv7)
return convLayer(out, params.conv8, 'valid', false)
}
public runMobilenet(x: tf.Tensor4D, params: MobilenetParams): tf.Tensor4D {
let out = this.config.isFirstLayerConv2d
? leaky(convLayer(x, params.conv0 as ConvParams, 'valid', false))
: depthwiseSeparableConv(x, params.conv0 as SeparableConvParams)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = depthwiseSeparableConv(out, params.conv1)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = depthwiseSeparableConv(out, params.conv2)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = depthwiseSeparableConv(out, params.conv3)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = depthwiseSeparableConv(out, params.conv4)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = depthwiseSeparableConv(out, params.conv5)
out = tf.maxPool(out, [2, 2], [1, 1], 'same')
out = params.conv6 ? depthwiseSeparableConv(out, params.conv6) : out
out = params.conv7 ? depthwiseSeparableConv(out, params.conv7) : out
return convLayer(out, params.conv8, 'valid', false)
}
public forwardInput(input: NetInput, inputSize: number): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('TinyYolov2 - load model before inference')
}
return tf.tidy(() => {
let batchTensor = input.toBatchTensor(inputSize, false).toFloat()
batchTensor = this.config.meanRgb
? normalize(batchTensor, this.config.meanRgb)
: batchTensor
batchTensor = batchTensor.div(tf.scalar(256)) as tf.Tensor4D
return this.config.withSeparableConvs
? this.runMobilenet(batchTensor, params as MobilenetParams)
: this.runTinyYolov2(batchTensor, params as DefaultTinyYolov2NetParams)
})
}
public async forward(input: TNetInput, inputSize: number): Promise<tf.Tensor4D> {
return await this.forwardInput(await toNetInput(input), inputSize)
}
public async detect(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise<ObjectDetection[]> {
const { inputSize, scoreThreshold } = new TinyYolov2Options(forwardParams)
const netInput = await toNetInput(input)
const out = await this.forwardInput(netInput, inputSize)
const out0 = tf.tidy(() => tf.unstack(out)[0].expandDims()) as tf.Tensor4D
const inputDimensions = {
width: netInput.getInputWidth(0),
height: netInput.getInputHeight(0)
}
const results = await this.extractBoxes(out0, netInput.getReshapedInputDimensions(0), scoreThreshold)
out.dispose()
out0.dispose()
const boxes = results.map(res => res.box)
const scores = results.map(res => res.score)
const classScores = results.map(res => res.classScore)
const classNames = results.map(res => this.config.classes[res.label])
const indices = nonMaxSuppression(
boxes.map(box => box.rescale(inputSize)),
scores,
this.config.iouThreshold,
true
)
const detections = indices.map(idx =>
new ObjectDetection(
scores[idx],
classScores[idx],
classNames[idx],
boxes[idx],
inputDimensions
)
)
return detections
}
protected getDefaultModelName(): string {
return ''
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap, this.config)
}
protected extractParams(weights: Float32Array) {
const filterSizes = this.config.filterSizes || TinyYolov2Base.DEFAULT_FILTER_SIZES
const numFilters = filterSizes ? filterSizes.length : undefined
if (numFilters !== 7 && numFilters !== 8 && numFilters !== 9) {
throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${numFilters} filterSizes in config`)
}
return extractParams(weights, this.config, this.boxEncodingSize, filterSizes)
}
protected async extractBoxes(
outputTensor: tf.Tensor4D,
inputBlobDimensions: Dimensions,
scoreThreshold?: number
) {
const { width, height } = inputBlobDimensions
const inputSize = Math.max(width, height)
const correctionFactorX = inputSize / width
const correctionFactorY = inputSize / height
const numCells = outputTensor.shape[1]
const numBoxes = this.config.anchors.length
const [boxesTensor, scoresTensor, classScoresTensor] = tf.tidy(() => {
const reshaped = outputTensor.reshape([numCells, numCells, numBoxes, this.boxEncodingSize])
const boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, numBoxes, 4])
const scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, numBoxes, 1])
const classScores = this.withClassScores
? tf.softmax(reshaped.slice([0, 0, 0, 5], [numCells, numCells, numBoxes, this.config.classes.length]), 3)
: tf.scalar(0)
return [boxes, scores, classScores]
})
const results = []
const scoresData = await scoresTensor.array()
const boxesData = await boxesTensor.array()
for (let row = 0; row < numCells; row ++) {
for (let col = 0; col < numCells; col ++) {
for (let anchor = 0; anchor < numBoxes; anchor ++) {
const score = sigmoid(scoresData[row][col][anchor][0]);
if (!scoreThreshold || score > scoreThreshold) {
const ctX = ((col + sigmoid(boxesData[row][col][anchor][0])) / numCells) * correctionFactorX
const ctY = ((row + sigmoid(boxesData[row][col][anchor][1])) / numCells) * correctionFactorY
const width = ((Math.exp(boxesData[row][col][anchor][2]) * this.config.anchors[anchor].x) / numCells) * correctionFactorX
const height = ((Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y) / numCells) * correctionFactorY
const x = (ctX - (width / 2))
const y = (ctY - (height / 2))
const pos = { row, col, anchor }
const { classScore, label } = this.withClassScores
? await this.extractPredictedClass(classScoresTensor as tf.Tensor4D, pos)
: { classScore: 1, label: 0 }
results.push({
box: new BoundingBox(x, y, x + width, y + height),
score: score,
classScore: score * classScore,
label,
...pos
})
}
}
}
}
boxesTensor.dispose()
scoresTensor.dispose()
classScoresTensor.dispose()
return results
}
private async extractPredictedClass(classesTensor: tf.Tensor4D, pos: { row: number, col: number, anchor: number },) {
const { row, col, anchor } = pos
const classesData = await classesTensor.array()
return Array(this.config.classes.length).fill(0)
.map((_, i) => classesData[row][col][anchor][i])
.map((classScore, label) => ({
classScore,
label
}))
.reduce((max, curr) => max.classScore > curr.classScore ? max : curr)
}
}
\ No newline at end of file
export enum TinyYolov2SizeType {
XS = 224,
SM = 320,
MD = 416,
LG = 608
}
export interface ITinyYolov2Options {
inputSize?: number
scoreThreshold?: number
}
export class TinyYolov2Options {
protected _name: string = 'TinyYolov2Options'
private _inputSize: number
private _scoreThreshold: number
constructor({ inputSize, scoreThreshold }: ITinyYolov2Options = {}) {
this._inputSize = inputSize || 416
this._scoreThreshold = scoreThreshold || 0.5
if (typeof this._inputSize !== 'number' || this._inputSize % 32 !== 0) {
throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`)
}
if (typeof this._scoreThreshold !== 'number' || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) {
throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)
}
}
get inputSize(): number { return this._inputSize }
get scoreThreshold(): number { return this._scoreThreshold }
}
\ No newline at end of file
import { Point } from '../classes/Point';
export type TinyYolov2Config = {
withSeparableConvs: boolean
iouThreshold: number
anchors: Point[]
classes: string[]
meanRgb?: [number, number, number]
withClassScores?: boolean,
filterSizes?: number[]
isFirstLayerConv2d?: boolean
}
const isNumber = (arg: any) => typeof arg === 'number'
export function validateConfig(config: any) {
if (!config) {
throw new Error(`invalid config: ${config}`)
}
if (typeof config.withSeparableConvs !== 'boolean') {
throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config.withSeparableConvs}`)
}
if (!isNumber(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1.0) {
throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config.iouThreshold}`)
}
if (
!Array.isArray(config.classes)
|| !config.classes.length
|| !config.classes.every((c: any) => typeof c === 'string')
) {
throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config.classes)}`)
}
if (
!Array.isArray(config.anchors)
|| !config.anchors.length
|| !config.anchors.map((a: any) => a || {}).every((a: any) => isNumber(a.x) && isNumber(a.y))
) {
throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config.anchors)}`)
}
if (config.meanRgb && (
!Array.isArray(config.meanRgb)
|| config.meanRgb.length !== 3
|| !config.meanRgb.every(isNumber)
)) {
throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config.meanRgb)}`)
}
}
\ No newline at end of file
import { Point } from 'tfjs-image-recognition-base';
import { Point } from '../classes';
export const IOU_THRESHOLD = 0.4
......
import * as tf from '@tensorflow/tfjs-core';
import { leaky } from './leaky';
import { ConvWithBatchNorm } from './types';
export function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm): tf.Tensor4D {
return tf.tidy(() => {
let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D
out = tf.conv2d(out, params.conv.filters, [1, 1], 'valid')
out = tf.sub(out, params.bn.sub)
out = tf.mul(out, params.bn.truediv)
out = tf.add(out, params.conv.bias)
return leaky(out)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { SeparableConvParams } from '../common/types';
import { leaky } from './leaky';
export function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams): tf.Tensor4D {
return tf.tidy(() => {
let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D
out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid')
out = tf.add(out, params.bias)
return leaky(out)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { extractConvParamsFactory } from '../common';
import { extractSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';
import { extractWeightsFactory } from '../common/extractWeightsFactory';
import { ExtractWeightsFunction, ParamMapping } from '../common/types';
import { TinyYolov2Config } from './config';
import { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
function extractBatchNormParams(size: number, mappedPrefix: string): BatchNorm {
const sub = tf.tensor1d(extractWeights(size))
const truediv = tf.tensor1d(extractWeights(size))
paramMappings.push(
{ paramPath: `${mappedPrefix}/sub` },
{ paramPath: `${mappedPrefix}/truediv` }
)
return { sub, truediv }
}
function extractConvWithBatchNormParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ConvWithBatchNorm {
const conv = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`)
const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`)
return { conv, bn }
}
const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings)
return {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
}
}
export function extractParams(
weights: Float32Array,
config: TinyYolov2Config,
boxEncodingSize: number,
filterSizes: number[]
): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const paramMappings: ParamMapping[] = []
const {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
} = extractorsFactory(extractWeights, paramMappings)
let params: TinyYolov2NetParams
if (config.withSeparableConvs) {
const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes
const conv0 = config.isFirstLayerConv2d
? extractConvParams(s0, s1, 3, 'conv0')
: extractSeparableConvParams(s0, s1, 'conv0')
const conv1 = extractSeparableConvParams(s1, s2, 'conv1')
const conv2 = extractSeparableConvParams(s2, s3, 'conv2')
const conv3 = extractSeparableConvParams(s3, s4, 'conv3')
const conv4 = extractSeparableConvParams(s4, s5, 'conv4')
const conv5 = extractSeparableConvParams(s5, s6, 'conv5')
const conv6 = s7 ? extractSeparableConvParams(s6, s7, 'conv6') : undefined
const conv7 = s8 ? extractSeparableConvParams(s7, s8, 'conv7') : undefined
const conv8 = extractConvParams(s8 || s7 || s6, 5 * boxEncodingSize, 1, 'conv8')
params = { conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8 }
} else {
const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes
const conv0 = extractConvWithBatchNormParams(s0, s1, 'conv0',)
const conv1 = extractConvWithBatchNormParams(s1, s2, 'conv1')
const conv2 = extractConvWithBatchNormParams(s2, s3, 'conv2')
const conv3 = extractConvWithBatchNormParams(s3, s4, 'conv3')
const conv4 = extractConvWithBatchNormParams(s4, s5, 'conv4')
const conv5 = extractConvWithBatchNormParams(s5, s6, 'conv5')
const conv6 = extractConvWithBatchNormParams(s6, s7, 'conv6')
const conv7 = extractConvWithBatchNormParams(s7, s8, 'conv7')
const conv8 = extractConvParams(s8, 5 * boxEncodingSize, 1, 'conv8')
params = { conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8 }
}
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from '../common';
import { disposeUnusedWeightTensors } from '../common/disposeUnusedWeightTensors';
import { loadSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';
import { extractWeightEntryFactory } from '../common/extractWeightEntryFactory';
import { ParamMapping } from '../common/types';
import { TinyYolov2Config } from './config';
import { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractBatchNormParams(prefix: string): BatchNorm {
const sub = extractWeightEntry<tf.Tensor1D>(`${prefix}/sub`, 1)
const truediv = extractWeightEntry<tf.Tensor1D>(`${prefix}/truediv`, 1)
return { sub, truediv }
}
function extractConvParams(prefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
function extractConvWithBatchNormParams(prefix: string): ConvWithBatchNorm {
const conv = extractConvParams(`${prefix}/conv`)
const bn = extractBatchNormParams(`${prefix}/bn`)
return { conv, bn }
}
const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry)
return {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
}
}
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap,
config: TinyYolov2Config
): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
} = extractorsFactory(weightMap, paramMappings)
let params: TinyYolov2NetParams
if (config.withSeparableConvs) {
const numFilters = (config.filterSizes && config.filterSizes.length || 9)
params = {
conv0: config.isFirstLayerConv2d ? extractConvParams('conv0') : extractSeparableConvParams('conv0'),
conv1: extractSeparableConvParams('conv1'),
conv2: extractSeparableConvParams('conv2'),
conv3: extractSeparableConvParams('conv3'),
conv4: extractSeparableConvParams('conv4'),
conv5: extractSeparableConvParams('conv5'),
conv6: numFilters > 7 ? extractSeparableConvParams('conv6') : undefined,
conv7: numFilters > 8 ? extractSeparableConvParams('conv7') : undefined,
conv8: extractConvParams('conv8')
}
} else {
params = {
conv0: extractConvWithBatchNormParams('conv0'),
conv1: extractConvWithBatchNormParams('conv1'),
conv2: extractConvWithBatchNormParams('conv2'),
conv3: extractConvWithBatchNormParams('conv3'),
conv4: extractConvWithBatchNormParams('conv4'),
conv5: extractConvWithBatchNormParams('conv5'),
conv6: extractConvWithBatchNormParams('conv6'),
conv7: extractConvWithBatchNormParams('conv7'),
conv8: extractConvParams('conv8')
}
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import { TinyYolov2 } from './TinyYolov2';
export * from './TinyYolov2Options';
export * from './config'
export * from './types'
export { TinyYolov2 }
export function createTinyYolov2(weights: Float32Array, withSeparableConvs: boolean = true) {
......
import * as tf from '@tensorflow/tfjs-core';
export function leaky(x: tf.Tensor4D): tf.Tensor4D {
return tf.tidy(() => {
const min = tf.mul(x, tf.scalar(0.10000000149011612))
return tf.add(tf.relu(tf.sub(x, min)), min)
//return tf.maximum(x, min)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from '../common';
import { SeparableConvParams } from '../common/types';
export type BatchNorm = {
sub: tf.Tensor1D
truediv: tf.Tensor1D
}
export type ConvWithBatchNorm = {
conv: ConvParams
bn: BatchNorm
}
export type MobilenetParams = {
conv0: SeparableConvParams | ConvParams
conv1: SeparableConvParams
conv2: SeparableConvParams
conv3: SeparableConvParams
conv4: SeparableConvParams
conv5: SeparableConvParams
conv6?: SeparableConvParams
conv7?: SeparableConvParams
conv8: ConvParams
}
export type DefaultTinyYolov2NetParams = {
conv0: ConvWithBatchNorm
conv1: ConvWithBatchNorm
conv2: ConvWithBatchNorm
conv3: ConvWithBatchNorm
conv4: ConvWithBatchNorm
conv5: ConvWithBatchNorm
conv6: ConvWithBatchNorm
conv7: ConvWithBatchNorm
conv8: ConvParams
}
export type TinyYolov2NetParams = DefaultTinyYolov2NetParams | MobilenetParams
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { Point } from '../classes';
import { Dimensions, IDimensions } from '../classes/Dimensions';
export function isTensor(tensor: any, dim: number) {
return tensor instanceof tf.Tensor && tensor.shape.length === dim
}
export function isTensor1D(tensor: any): tensor is tf.Tensor1D {
return isTensor(tensor, 1)
}
export function isTensor2D(tensor: any): tensor is tf.Tensor2D {
return isTensor(tensor, 2)
}
export function isTensor3D(tensor: any): tensor is tf.Tensor3D {
return isTensor(tensor, 3)
}
export function isTensor4D(tensor: any): tensor is tf.Tensor4D {
return isTensor(tensor, 4)
}
export function isFloat(num: number) {
return num % 1 !== 0
}
export function isEven(num: number) {
return num % 2 === 0
}
export function round(num: number, prec: number = 2) {
const f = Math.pow(10, prec)
return Math.floor(num * f) / f
}
export function isDimensions(obj: any): boolean {
return obj && obj.width && obj.height
}
export function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number) {
const scale = inputSize / Math.max(height, width)
return new Dimensions(Math.round(width * scale), Math.round(height * scale))
}
export function getCenterPoint(pts: Point[]): Point {
return pts.reduce((sum, pt) => sum.add(pt), new Point(0, 0))
.div(new Point(pts.length, pts.length))
}
export function range(num: number, start: number, step: number): number[] {
return Array(num).fill(0).map((_, i) => start + (i * step))
}
export function isValidNumber(num: any) {
return !!num && num !== Infinity && num !== -Infinity && !isNaN(num) || num === 0
}
export function isValidProbablitiy(num: any) {
return isValidNumber(num) && 0 <= num && num <= 1.0
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import {
NetInput,
NeuralNetwork,
normalize,
range,
TfjsImageRecognitionBase,
TNetInput,
toNetInput,
} from 'tfjs-image-recognition-base';
import { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';
import { bgrToRgbTensor } from '../mtcnn/bgrToRgbTensor';
import { ConvParams, depthwiseSeparableConv } from '../common';
import { NetInput, TNetInput, toNetInput } from '../dom';
import { NeuralNetwork } from '../NeuralNetwork';
import { normalize } from '../ops';
import { range } from '../utils';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function conv(x: tf.Tensor4D, params: TfjsImageRecognitionBase.ConvParams, stride: [number, number]): tf.Tensor4D {
function conv(x: tf.Tensor4D, params: ConvParams, stride: [number, number]): tf.Tensor4D {
return tf.add(tf.conv2d(x, params.filters, stride, 'same'), params.bias)
}
......
import { range, TfjsImageRecognitionBase } from 'tfjs-image-recognition-base';
import { extractConvParamsFactory, extractSeparableConvParamsFactory, extractWeightsFactory } from '../common';
import { ExtractWeightsFunction, ParamMapping } from '../common/types';
import { range } from '../utils';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = TfjsImageRecognitionBase.extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = TfjsImageRecognitionBase.extractSeparableConvParamsFactory(extractWeights, paramMappings)
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings)
function extractReductionBlockParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ReductionBlockParams {
......@@ -34,14 +35,14 @@ function extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeigh
}
export function extractParams(weights: Float32Array, numMainBlocks: number): { params: TinyXceptionParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
export function extractParams(weights: Float32Array, numMainBlocks: number): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = TfjsImageRecognitionBase.extractWeightsFactory(weights)
} = extractWeightsFactory(weights)
const {
extractConvParams,
......
import * as tf from '@tensorflow/tfjs-core';
import { TfjsImageRecognitionBase, range } from 'tfjs-image-recognition-base';
import {
disposeUnusedWeightTensors,
extractWeightEntryFactory,
loadSeparableConvParamsFactory,
ParamMapping,
} from '../common';
import { loadConvParamsFactory } from '../common/loadConvParamsFactory';
import { range } from '../utils';
import { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';
function loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]) {
function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = TfjsImageRecognitionBase.extractWeightEntryFactory(weightMap, paramMappings)
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
const extractConvParams = loadConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = TfjsImageRecognitionBase.loadSeparableConvParamsFactory(extractWeightEntry)
const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry)
function extractReductionBlockParams(mappedPrefix: string): ReductionBlockParams {
......@@ -40,9 +46,9 @@ function loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBa
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap,
numMainBlocks: number
): { params: TinyXceptionParams, paramMappings: TfjsImageRecognitionBase.ParamMapping[] } {
): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {
const paramMappings: TfjsImageRecognitionBase.ParamMapping[] = []
const paramMappings: ParamMapping[] = []
const {
extractConvParams,
......@@ -74,7 +80,7 @@ export function extractParamsFromWeigthMap(
separable_conv: exit_flow_separable_conv
}
TfjsImageRecognitionBase.disposeUnusedWeightTensors(weightMap, paramMappings)
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params: { entry_flow, middle_flow, exit_flow }, paramMappings }
}
\ No newline at end of file
import { TfjsImageRecognitionBase } from "tfjs-image-recognition-base";
import { ConvParams, SeparableConvParams } from '../common';
export type ReductionBlockParams = {
separable_conv0: TfjsImageRecognitionBase.SeparableConvParams
separable_conv1: TfjsImageRecognitionBase.SeparableConvParams
expansion_conv: TfjsImageRecognitionBase.ConvParams
separable_conv0: SeparableConvParams
separable_conv1: SeparableConvParams
expansion_conv: ConvParams
}
export type MainBlockParams = {
separable_conv0: TfjsImageRecognitionBase.SeparableConvParams
separable_conv1: TfjsImageRecognitionBase.SeparableConvParams
separable_conv2: TfjsImageRecognitionBase.SeparableConvParams
separable_conv0: SeparableConvParams
separable_conv1: SeparableConvParams
separable_conv2: SeparableConvParams
}
export type TinyXceptionParams = {
entry_flow: {
conv_in: TfjsImageRecognitionBase.ConvParams
conv_in: ConvParams
reduction_block_0: ReductionBlockParams
reduction_block_1: ReductionBlockParams
}
middle_flow: any,
exit_flow: {
reduction_block: ReductionBlockParams
separable_conv: TfjsImageRecognitionBase.SeparableConvParams
separable_conv: SeparableConvParams
}
}
\ No newline at end of file
import { NeuralNetwork } from 'tfjs-image-recognition-base';
import { NeuralNetwork } from '../src';
export type TestEnv = {
loadImage: (uri: string) => Promise<HTMLImageElement>
......
import * as tf from '@tensorflow/tfjs-core';
import { NeuralNetwork } from '../src';
class FakeNeuralNetwork extends NeuralNetwork<any> {
constructor(
convFilter: tf.Tensor = tf.tensor(0),
convBias: tf.Tensor = tf.tensor(0),
fcWeights: tf.Tensor = tf.tensor(0)
) {
super('FakeNeuralNetwork')
this._params = {
conv: {
filter: convFilter,
bias: convBias,
},
fc: fcWeights
}
this._paramMappings = [
{ originalPath: 'conv2d/filter', paramPath: 'conv/filter' },
{ originalPath: 'conv2d/bias', paramPath: 'conv/bias' },
{ originalPath: 'dense/weights', paramPath: 'fc' }
]
}
protected getDefaultModelName(): any {
throw new Error('FakeNeuralNetwork - getDefaultModelName not implemented')
}
protected extractParams(_: any): any {
throw new Error('FakeNeuralNetwork - extractParams not implemented')
}
protected extractParamsFromWeigthMap(_: any): any {
throw new Error('FakeNeuralNetwork - extractParamsFromWeigthMap not implemented')
}
}
describe('NeuralNetwork', () => {
describe('getParamFromPath', () => {
it('returns correct params', () => tf.tidy(() => {
const convFilter = tf.tensor(0)
const convBias = tf.tensor(0)
const fcWeights = tf.tensor(0)
const net = new FakeNeuralNetwork(convFilter, convBias, fcWeights)
expect(net.getParamFromPath('conv/filter')).toEqual(convFilter)
expect(net.getParamFromPath('conv/bias')).toEqual(convBias)
expect(net.getParamFromPath('fc')).toEqual(fcWeights)
}))
it('throws if param is not a tensor', () => tf.tidy(() => {
const net = new FakeNeuralNetwork(null as any)
const fakePath = 'conv/filter'
expect(
() => net.getParamFromPath(fakePath)
).toThrowError(`traversePropertyPath - parameter is not a tensor, for path ${fakePath}`)
}))
it('throws if key path invalid', () => tf.tidy(() => {
const net = new FakeNeuralNetwork()
const fakePath = 'conv2d/foo'
expect(
() => net.getParamFromPath(fakePath)
).toThrowError(`traversePropertyPath - object does not have property conv2d, for path ${fakePath}`)
}))
})
describe('reassignParamFromPath', () => {
it('sets correct params', () => tf.tidy(() => {
const net = new FakeNeuralNetwork()
const convFilter = tf.tensor(0)
const convBias = tf.tensor(0)
const fcWeights = tf.tensor(0)
net.reassignParamFromPath('conv/filter', convFilter)
net.reassignParamFromPath('conv/bias', convBias)
net.reassignParamFromPath('fc', fcWeights)
expect(net.params.conv.filter).toEqual(convFilter)
expect(net.params.conv.bias).toEqual(convBias)
expect(net.params.fc).toEqual(fcWeights)
}))
it('throws if param is not a tensor', () => tf.tidy(() => {
const net = new FakeNeuralNetwork(null as any)
const fakePath = 'conv/filter'
expect(
() => net.reassignParamFromPath(fakePath, tf.tensor(0))
).toThrowError(`traversePropertyPath - parameter is not a tensor, for path ${fakePath}`)
}))
it('throws if key path invalid', () => tf.tidy(() => {
const net = new FakeNeuralNetwork()
const fakePath = 'conv2d/foo'
expect(
() => net.reassignParamFromPath(fakePath, tf.tensor(0))
).toThrowError(`traversePropertyPath - object does not have property conv2d, for path ${fakePath}`)
}))
})
describe('getParamList', () => {
it('returns param tensors with path', () => tf.tidy(() => {
const convFilter = tf.tensor(0)
const convBias = tf.tensor(0)
const fcWeights = tf.tensor(0)
const net = new FakeNeuralNetwork(convFilter, convBias, fcWeights)
const paramList = net.getParamList()
expect(paramList.length).toEqual(3)
expect(paramList[0].path).toEqual('conv/filter')
expect(paramList[1].path).toEqual('conv/bias')
expect(paramList[2].path).toEqual('fc')
expect(paramList[0].tensor).toEqual(convFilter)
expect(paramList[1].tensor).toEqual(convBias)
expect(paramList[2].tensor).toEqual(fcWeights)
}))
})
describe('getFrozenParams', () => {
it('returns all frozen params', () => tf.tidy(() => {
const convFilter = tf.tensor(0)
const convBias = tf.tensor(0)
const fcWeights = tf.variable(tf.scalar(0))
const net = new FakeNeuralNetwork(convFilter, convBias, fcWeights)
const frozenParams = net.getFrozenParams()
expect(frozenParams.length).toEqual(2)
expect(frozenParams[0].path).toEqual('conv/filter')
expect(frozenParams[1].path).toEqual('conv/bias')
expect(frozenParams[0].tensor).toEqual(convFilter)
expect(frozenParams[1].tensor).toEqual(convBias)
}))
})
describe('getTrainableParams', () => {
it('returns all trainable params', () => tf.tidy(() => {
const convFilter = tf.variable(tf.scalar(0))
const convBias = tf.variable(tf.scalar(0))
const fcWeights = tf.tensor(0)
const net = new FakeNeuralNetwork(convFilter, convBias, fcWeights)
const trainableParams = net.getTrainableParams()
expect(trainableParams.length).toEqual(2)
expect(trainableParams[0].path).toEqual('conv/filter')
expect(trainableParams[1].path).toEqual('conv/bias')
expect(trainableParams[0].tensor).toEqual(convFilter)
expect(trainableParams[1].tensor).toEqual(convBias)
}))
})
describe('dispose', () => {
it('disposes all param tensors', () => tf.tidy(() => {
const numTensors = tf.memory().numTensors
const net = new FakeNeuralNetwork()
net.dispose()
expect(net.params).toBe(undefined)
expect(tf.memory().numTensors - numTensors).toEqual(0)
}))
})
describe('variable', () => {
it('make all param tensors trainable', () => tf.tidy(() => {
const net = new FakeNeuralNetwork()
net.variable()
expect(net.params.conv.filter instanceof tf.Variable).toBe(true)
expect(net.params.conv.bias instanceof tf.Variable).toBe(true)
expect(net.params.fc instanceof tf.Variable).toBe(true)
}))
it('disposes old tensors', () => tf.tidy(() => {
const net = new FakeNeuralNetwork()
const numTensors = tf.memory().numTensors
net.variable()
expect(tf.memory().numTensors - numTensors).toEqual(0)
}))
})
describe('freeze', () => {
it('freezes all param variables', () => tf.tidy(() => {
const net = new FakeNeuralNetwork(
tf.variable(tf.scalar(0)),
tf.variable(tf.scalar(0)),
tf.variable(tf.scalar(0))
)
net.freeze()
expect(net.params.conv.filter instanceof tf.Variable).toBe(false)
expect(net.params.conv.bias instanceof tf.Variable).toBe(false)
expect(net.params.fc instanceof tf.Variable).toBe(false)
}))
it('disposes old tensors', () => () => {
const net = new FakeNeuralNetwork(
tf.variable(tf.scalar(0)),
tf.variable(tf.scalar(0)),
tf.variable(tf.scalar(0))
)
const numTensors = tf.memory().numTensors
net.freeze()
expect(tf.memory().numTensors - numTensors).toEqual(0)
})
})
})
[{"predictions":[],"groundTruth":[{"label":14,"x":174,"y":101,"width":175,"height":250}]},{"predictions":[{"score":0.7691803822132828,"classScore":0.7687403453616095,"label":14,"x":174.80739067078562,"y":101.24652901136822,"width":107.21253846292248,"height":130.36960304401345},{"score":0.6650128354611389,"classScore":0.6648250709475522,"label":0,"x":88.00403034948206,"y":57.28796517004654,"width":346.8964870013924,"height":154.64824963698928}],"groundTruth":[{"label":0,"x":104,"y":78,"width":271,"height":105},{"label":0,"x":133,"y":88,"width":64,"height":35},{"label":14,"x":195,"y":180,"width":18,"height":49},{"label":14,"x":26,"y":189.00000000000003,"width":18,"height":49}]},{"predictions":[{"score":0.9796686439333566,"classScore":0.9796404986030244,"label":0,"x":0.7092259472062379,"y":112.93828229104882,"width":503.36772784059957,"height":125.40083611003898}],"groundTruth":[{"label":0,"x":9,"y":107,"width":490,"height":156},{"label":0,"x":421,"y":200.00000000000003,"width":61,"height":26},{"label":0,"x":325,"y":188,"width":86,"height":35}]},{"predictions":[{"score":0.6555995462593681,"classScore":0.6555995462593681,"label":19,"x":168.43528196184994,"y":103.87677334009256,"width":142.1917672089718,"height":152.21249718337248}],"groundTruth":[{"label":19,"x":156,"y":89,"width":188,"height":190.00000000000003}]},{"predictions":[{"score":0.7654244759562995,"classScore":0.7653893919815798,"label":18,"x":4.657064294604191,"y":19.396089415586324,"width":244.37305501026242,"height":295.7467446833704}],"groundTruth":[{"label":18,"x":263,"y":32,"width":237,"height":263},{"label":18,"x":1,"y":36,"width":234,"height":263}]},{"predictions":[],"groundTruth":[{"label":3,"x":274,"y":11,"width":163,"height":268},{"label":3,"x":184,"y":213.99999999999997,"width":97,"height":38}]},{"predictions":[{"score":0.6628550520040408,"classScore":0.6247242104538263,"label":11,"x":72.80442223810171,"y":96.69588943582755,"width":348.97090277779495,"height":201.39196990473874}],"groundTruth":[{"label":11,"x":123,"y":114.99999999999999,"width":256,"height":160},{"label":8,"x":75,"y":1,"width":353,"height":374}]},{"predictions":[{"score":0.9813790697369239,"classScore":0.9785855359209782,"label":2,"x":27.170210865162982,"y":71.72488948863362,"width":236.38816975654763,"height":288.48520937333933}],"groundTruth":[{"label":2,"x":27,"y":45,"width":239,"height":330}]},{"predictions":[{"score":0.994037798212397,"classScore":0.994037798212397,"label":19,"x":221.0525346736894,"y":17.11757711236289,"width":270.7030429591586,"height":238.46462518346138},{"score":0.9744214784574556,"classScore":0.9744214784574556,"label":19,"x":-8.843471823254735,"y":26.29916942997549,"width":300.1775416643035,"height":221.028915747058}],"groundTruth":[{"label":19,"x":251,"y":28.000000000000004,"width":224,"height":239},{"label":19,"x":22,"y":28.000000000000004,"width":229,"height":245}]},{"predictions":[{"score":0.9887495371422556,"classScore":0.6017762085053843,"label":16,"x":-19.909558311815623,"y":21.542763412106453,"width":414.6293719402734,"height":308.439681183784}],"groundTruth":[{"label":18,"x":1,"y":26,"width":357,"height":314}]},{"predictions":[{"score":0.9098243761845755,"classScore":0.9012128531842766,"label":14,"x":71.38750749942446,"y":54.7591392923992,"width":223.77602020255526,"height":366.18752833394205}],"groundTruth":[{"label":1,"x":70,"y":202,"width":185,"height":298},{"label":1,"x":251,"y":242,"width":83,"height":258},{"label":1,"x":1,"y":144,"width":66,"height":292},{"label":14,"x":1,"y":1,"width":65,"height":362},{"label":14,"x":74,"y":1,"width":198,"height":461},{"label":14,"x":251.99999999999997,"y":19,"width":82,"height":468}]},{"predictions":[],"groundTruth":[{"label":4,"x":87,"y":100,"width":22,"height":65},{"label":4,"x":41,"y":114,"width":32,"height":67},{"label":4,"x":324,"y":148,"width":28,"height":58},{"label":4,"x":426,"y":157,"width":17,"height":38},{"label":14,"x":3,"y":91,"width":40,"height":114.99999999999999},{"label":14,"x":4,"y":28.000000000000004,"width":457,"height":344}]},{"predictions":[{"score":0.7958273091892446,"classScore":0.7818440392104067,"label":16,"x":56.79348195528594,"y":31.70232255780979,"width":313.6508630022377,"height":233.41861230559402}],"groundTruth":[{"label":16,"x":25,"y":34,"width":394,"height":237}]},{"predictions":[{"score":0.9201249561107422,"classScore":0.9190199099731305,"label":19,"x":-26.03511346499099,"y":86.38443500732924,"width":297.6374106029245,"height":254.9645026012354}],"groundTruth":[{"label":19,"x":1,"y":95.00000000000001,"width":239,"height":241.00000000000003}]},{"predictions":[{"score":0.9161253701392962,"classScore":0.9058719094189636,"label":3,"x":32.61301936266575,"y":115.64641272085254,"width":124.58349953920403,"height":101.73433847921152},{"score":0.8166243906870652,"classScore":0.8145709063786849,"label":3,"x":377.0253934850286,"y":187.73553354763786,"width":117.8902182556456,"height":98.97330181906452},{"score":0.682690236232842,"classScore":0.6699430513504846,"label":3,"x":235.7334992854692,"y":132.159729119493,"width":107.03585321829007,"height":91.19282688695355}],"groundTruth":[{"label":3,"x":356,"y":183,"width":144,"height":97},{"label":3,"x":60,"y":109,"width":82,"height":104},{"label":3,"x":246,"y":134,"width":102,"height":83}]},{"predictions":[],"groundTruth":[{"label":0,"x":181,"y":127,"width":93,"height":66}]},{"predictions":[{"score":0.7002533672629911,"classScore":0.6986950241075993,"label":10,"x":15.059672065713514,"y":183.7041276920172,"width":442.0153237833172,"height":187.85830177400535}],"groundTruth":[{"label":10,"x":1,"y":170,"width":473,"height":205},{"label":4,"x":97,"y":124,"width":53,"height":173}]},{"predictions":[{"score":0.8785236648425084,"classScore":0.8785169098747734,"label":0,"x":5.851446853403164,"y":97.19943552245653,"width":498.761858292814,"height":123.84420987488453},{"score":0.6115907530866461,"classScore":0.50117202877969,"label":13,"x":53.19739845463284,"y":133.81894255992148,"width":226.98909377605403,"height":110.1798238654668}],"groundTruth":[{"label":0,"x":8,"y":96,"width":483,"height":136}]},{"predictions":[{"score":0.944397664196853,"classScore":0.944397664196853,"label":14,"x":-4.598343170909137,"y":82.8255836803386,"width":335.55829964238654,"height":426.5030936299306}],"groundTruth":[{"label":14,"x":25,"y":71,"width":279,"height":429}]},{"predictions":[{"score":0.8280960338115692,"classScore":0.8280960338115692,"label":14,"x":-1.5531990164389664,"y":-25.619537343989954,"width":343.5896574164529,"height":418.021221131633}],"groundTruth":[{"label":14,"x":277,"y":3,"width":223,"height":372},{"label":14,"x":12,"y":3,"width":293,"height":372}]},{"predictions":[],"groundTruth":[{"label":12,"x":54,"y":50,"width":231,"height":212}]},{"predictions":[],"groundTruth":[{"label":18,"x":1,"y":39,"width":366,"height":231}]},{"predictions":[{"score":0.9923042836360139,"classScore":0.9923042836360139,"label":14,"x":142.7915727970977,"y":103.88205108871067,"width":355.23004763212964,"height":246.3305211907232}],"groundTruth":[{"label":4,"x":124,"y":107,"width":106,"height":236},{"label":14,"x":137,"y":78,"width":360,"height":297},{"label":14,"x":89,"y":201.99999999999997,"width":40,"height":45},{"label":14,"x":72,"y":209,"width":39,"height":50}]},{"predictions":[{"score":0.7524108551868512,"classScore":0.7511045464768792,"label":2,"x":118.02073153439419,"y":100.075300105594,"width":422.1093645713623,"height":236.96557132518132},{"score":0.6675556654678318,"classScore":0.6547138694454316,"label":2,"x":73.15242855329525,"y":27.560020743027827,"width":138.40089468837368,"height":233.12522715598948}],"groundTruth":[{"label":2,"x":59,"y":15,"width":161,"height":224.00000000000003},{"label":2,"x":161,"y":122,"width":339,"height":211}]},{"predictions":[{"score":0.8762162584051255,"classScore":0.8753776565500138,"label":13,"x":316.9200075142487,"y":15.978265485680515,"width":176.55547521701214,"height":162.79950103493826},{"score":0.7377645066274695,"classScore":0.7377447182413641,"label":13,"x":4.093778050646285,"y":53.134905261691884,"width":484.3047072124648,"height":242.59882876631897}],"groundTruth":[{"label":13,"x":54,"y":25,"width":400,"height":290},{"label":13,"x":318,"y":37,"width":171,"height":124},{"label":14,"x":369,"y":1,"width":89,"height":129}]},{"predictions":[{"score":0.6987292311899743,"classScore":0.6981430425202332,"label":14,"x":190.34686802768005,"y":49.30728986502912,"width":125.24766886822394,"height":156.9966166652352}],"groundTruth":[{"label":12,"x":100,"y":96,"width":255,"height":228},{"label":14,"x":198,"y":58,"width":88,"height":139}]},{"predictions":[{"score":0.7222071563927183,"classScore":0.722190583335829,"label":14,"x":334.27473386380956,"y":84.92576867213481,"width":130.96630892426847,"height":266.5457370602929}],"groundTruth":[{"label":14,"x":51,"y":80,"width":71,"height":186},{"label":14,"x":367,"y":92,"width":94,"height":223}]},{"predictions":[],"groundTruth":[{"label":17,"x":50,"y":121,"width":245,"height":217},{"label":8,"x":474,"y":140,"width":26,"height":144}]},{"predictions":[{"score":0.9539370238688553,"classScore":0.9261299783258337,"label":9,"x":53.1153804004625,"y":182.89761062792732,"width":186.2471532372765,"height":135.7749268658459}],"groundTruth":[{"label":9,"x":71,"y":252,"width":145,"height":62},{"label":9,"x":58,"y":202,"width":183,"height":93}]},{"predictions":[{"score":0.9517353634593023,"classScore":0.9517353634593023,"label":14,"x":295.17106719541056,"y":154.82150461830355,"width":110.50320081452242,"height":211.90106600291168},{"score":0.8956736871398788,"classScore":0.858500424420289,"label":0,"x":37.63960073319886,"y":77.60772153913085,"width":428.89316111622963,"height":269.4121395866779},{"score":0.8835170300270249,"classScore":0.815483197617464,"label":14,"x":2.93943475182442,"y":180.41620151640234,"width":112.98117869748658,"height":203.70600878430784}],"groundTruth":[{"label":14,"x":293,"y":162,"width":126,"height":212.99999999999997},{"label":14,"x":114,"y":165,"width":114,"height":208},{"label":14,"x":5,"y":172,"width":111,"height":201},{"label":0,"x":2,"y":68,"width":484,"height":257}]},{"predictions":[{"score":0.9625422337065817,"classScore":0.5203173443928317,"label":5,"x":-21.917914358316192,"y":20.243043364143507,"width":496.5538125661707,"height":282.63866737628933}],"groundTruth":[{"label":9,"x":2,"y":2,"width":457,"height":332}]},{"predictions":[],"groundTruth":[{"label":9,"x":341,"y":102,"width":159,"height":304},{"label":14,"x":36,"y":34,"width":43,"height":89},{"label":14,"x":2,"y":44,"width":34,"height":76},{"label":14,"x":29,"y":42,"width":12,"height":22}]},{"predictions":[],"groundTruth":[{"label":1,"x":1,"y":198.99999999999997,"width":39,"height":94},{"label":14,"x":1,"y":85,"width":28,"height":194},{"label":6,"x":64,"y":164,"width":87,"height":46},{"label":6,"x":147,"y":167,"width":39,"height":33}]},{"predictions":[],"groundTruth":[{"label":7,"x":124,"y":68,"width":195,"height":242}]},{"predictions":[],"groundTruth":[{"label":3,"x":290,"y":143,"width":210,"height":107},{"label":6,"x":422,"y":237,"width":39,"height":18}]},{"predictions":[{"score":0.8962182918520621,"classScore":0.8930069154800067,"label":7,"x":-4.008327108320088,"y":70.37482224480168,"width":352.62230878734033,"height":434.67563823380846}],"groundTruth":[{"label":7,"x":1,"y":49,"width":340,"height":450}]},{"predictions":[{"score":0.8076964825223881,"classScore":0.8067911153833874,"label":19,"x":94.5857889149978,"y":15.249397515597346,"width":312.17182522265784,"height":232.76671025556075}],"groundTruth":[{"label":4,"x":36,"y":250,"width":43,"height":104},{"label":19,"x":160,"y":26,"width":211,"height":214.99999999999997}]},{"predictions":[],"groundTruth":[{"label":1,"x":1,"y":178,"width":234,"height":153},{"label":1,"x":8,"y":134,"width":218,"height":163}]},{"predictions":[{"score":0.8167527436184676,"classScore":0.8150299759023477,"label":17,"x":324.627645002174,"y":209.56248991054514,"width":178.6013940037733,"height":160.7920031534356}],"groundTruth":[{"label":1,"x":31,"y":74,"width":338,"height":272},{"label":17,"x":344,"y":207.00000000000003,"width":156,"height":167}]},{"predictions":[{"score":0.9407357523667703,"classScore":0.8181894101476949,"label":3,"x":-12.598615939355684,"y":77.28791949345168,"width":451.4040682919014,"height":104.75131312062308}],"groundTruth":[{"label":18,"x":34,"y":84,"width":466,"height":100},{"label":14,"x":87,"y":100,"width":20,"height":24}]},{"predictions":[{"score":0.752703742634889,"classScore":0.7477982878493062,"label":18,"x":81.62897083839832,"y":109.91140563880278,"width":329.63940263951787,"height":207.33306830787868}],"groundTruth":[{"label":18,"x":79,"y":96,"width":313,"height":204}]},{"predictions":[{"score":0.8644956045627069,"classScore":0.8612763441448485,"label":2,"x":132.49570312762185,"y":53.63054832818552,"width":361.1467077508502,"height":308.41826405754813}],"groundTruth":[{"label":2,"x":135,"y":46,"width":365,"height":328},{"label":2,"x":124,"y":146,"width":241,"height":229}]},{"predictions":[{"score":0.7878781057167802,"classScore":0.7875985926864165,"label":5,"x":-52.21908643538992,"y":93.02825869092997,"width":459.81048644960373,"height":189.74556017463232}],"groundTruth":[{"label":14,"x":394,"y":199,"width":10,"height":24},{"label":14,"x":424,"y":199,"width":12,"height":20.999999999999996},{"label":14,"x":434,"y":195.99999999999997,"width":10,"height":24},{"label":14,"x":443,"y":195,"width":9,"height":25},{"label":5,"x":29,"y":113,"width":324,"height":153},{"label":0,"x":328,"y":86,"width":146,"height":106},{"label":6,"x":2,"y":213,"width":26,"height":22}]},{"predictions":[{"score":0.743069405764908,"classScore":0.6373241114093381,"label":15,"x":78.30857626719687,"y":92.88842351228324,"width":90.06631442691248,"height":108.2613080721939},{"score":0.6118246612799929,"classScore":0.5526098988725323,"label":17,"x":123.27749832194552,"y":160.7244957117589,"width":181.02068890434398,"height":101.80487324073387}],"groundTruth":[{"label":17,"x":119,"y":177,"width":212,"height":100.99999999999999},{"label":15,"x":65,"y":90,"width":105,"height":107},{"label":8,"x":133,"y":159,"width":46,"height":38}]},{"predictions":[{"score":0.8145260519407391,"classScore":0.81435355543938,"label":5,"x":7.672527282127808,"y":32.34257681806332,"width":382.86829973169773,"height":419.57299155829344}],"groundTruth":[{"label":5,"x":12,"y":2,"width":405,"height":465},{"label":6,"x":7.000000000000001,"y":39,"width":56.00000000000001,"height":55},{"label":6,"x":362,"y":24,"width":60,"height":29},{"label":6,"x":376,"y":36,"width":46,"height":45},{"label":6,"x":373,"y":68,"width":49,"height":40},{"label":6,"x":376,"y":98,"width":46,"height":112}]},{"predictions":[{"score":0.9424702777106074,"classScore":0.9424702777106074,"label":14,"x":36.45312644547885,"y":6.678077418736085,"width":290.71862354973774,"height":363.4338941971187}],"groundTruth":[{"label":14,"x":51,"y":2,"width":274,"height":373}]},{"predictions":[],"groundTruth":[{"label":16,"x":59,"y":94,"width":413,"height":176}]},{"predictions":[{"score":0.573654529177009,"classScore":0.3838500178452209,"label":3,"x":69.91134561978498,"y":181.02671387178248,"width":207.20328569794563,"height":137.88072756263017}],"groundTruth":[{"label":3,"x":80,"y":168,"width":174,"height":144},{"label":3,"x":184,"y":160,"width":288,"height":109}]},{"predictions":[{"score":0.5588748494638404,"classScore":0.5467246329459337,"label":11,"x":201.5844166952682,"y":114.26291552404027,"width":127.83844565704325,"height":148.51310204029576}],"groundTruth":[{"label":11,"x":217,"y":118,"width":88,"height":145}]},{"predictions":[{"score":0.7826680552768471,"classScore":0.3132984973801781,"label":8,"x":110.13095572354015,"y":296.0810272956989,"width":136.69938049361562,"height":149.26902152141102}],"groundTruth":[{"label":5,"x":89,"y":323,"width":184,"height":113},{"label":6,"x":272,"y":369,"width":51.00000000000001,"height":19},{"label":6,"x":2,"y":380,"width":87,"height":55},{"label":14,"x":333,"y":356,"width":25,"height":82},{"label":14,"x":319,"y":357,"width":22,"height":67},{"label":14,"x":354,"y":363,"width":21,"height":51},{"label":6,"x":48.99999999999999,"y":367,"width":41,"height":22},{"label":6,"x":113,"y":138,"width":47,"height":26}]}]
\ No newline at end of file
import * as fs from 'fs';
import * as path from 'path';
import { NeuralNetwork } from 'tfjs-image-recognition-base';
import { env } from '../src';
import { env, NeuralNetwork } from '../src';
import { TestEnv } from './Environment';
require('@tensorflow/tfjs-node')
......
import * as tf from '@tensorflow/tfjs-core';
import { fetchNetWeights, NeuralNetwork } from 'tfjs-image-recognition-base';
import { fetchImage, fetchJson } from '../src';
import { fetchImage, fetchJson, fetchNetWeights, NeuralNetwork } from '../src';
import { TestEnv } from './Environment';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
......
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
import { FaceLandmarks68, Point } from '../../src';
import { getTestEnv } from '../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../utils';
describeWithBackend('faceLandmark68Net, uncompressed', () => {
......
import { Point } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../../utils';
import { Point } from '../../src';
import { FaceLandmarks68 } from '../../src/classes/FaceLandmarks68';
import { getTestEnv } from '../env';
import { describeWithBackend, describeWithNets, expectPointClose } from '../utils';
describeWithBackend('faceLandmark68TinyNet, uncompressed', () => {
......
import { createCanvasFromMedia } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets } from '../../utils';
import { createCanvasFromMedia, euclideanDistance } from '../../src';
import { getTestEnv } from '../env';
import { describeWithBackend, describeWithNets } from '../utils';
describeWithBackend('faceRecognitionNet, uncompressed', () => {
......
import * as faceapi from '../../../src';
import { getTestEnv } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../../utils';
import { expectedSsdBoxes } from './expectedBoxes';
import * as faceapi from '../../src';
import { getTestEnv } from '../env';
import { expectFaceDetections } from '../expectFaceDetections';
import { describeWithBackend, describeWithNets } from '../utils';
import { expectedSsdBoxes } from '../tests/ssdMobilenetv1/expectedBoxes';
describeWithBackend('ssdMobilenetv1.locateFaces, uncompressed', () => {
......
import { BoundingBox } from '../../../src/classes/BoundingBox';
describe('BoundingBox', () => {
describe('constructor', () => {
it('properties', () => {
const box = new BoundingBox(5, 10, 15, 20)
expect(box.left).toEqual(5)
expect(box.x).toEqual(5)
expect(box.top).toEqual(10)
expect(box.y).toEqual(10)
expect(box.right).toEqual(15)
expect(box.bottom).toEqual(20)
expect(box.width).toEqual(10)
expect(box.height).toEqual(10)
expect(box.area).toEqual(100)
})
})
})
\ No newline at end of file
import { Box } from '../../../src/classes/Box';
describe('BoundingBox', () => {
describe('constructor', () => {
describe('from IBoundingBox', () => {
it('properties', () => {
const box = new Box({ left: 5, top: 10, right: 15, bottom: 20 })
expect(box.left).toEqual(5)
expect(box.x).toEqual(5)
expect(box.top).toEqual(10)
expect(box.y).toEqual(10)
expect(box.right).toEqual(15)
expect(box.bottom).toEqual(20)
expect(box.width).toEqual(10)
expect(box.height).toEqual(10)
expect(box.area).toEqual(100)
})
})
describe('from IRect', () => {
it('properties', () => {
const box = new Box({ x: 5, y: 10, width: 15, height: 20 })
expect(box.left).toEqual(5)
expect(box.x).toEqual(5)
expect(box.top).toEqual(10)
expect(box.y).toEqual(10)
expect(box.right).toEqual(20)
expect(box.bottom).toEqual(30)
expect(box.width).toEqual(15)
expect(box.height).toEqual(20)
expect(box.area).toEqual(300)
})
})
})
describe('rescale', () => {
it('scale down by factor 0.5', () => {
const box = new Box({ x: 10, y: 20, width: 20, height: 40 })
const rescaled = box.rescale(0.5)
expect(rescaled.x).toEqual(5)
expect(rescaled.y).toEqual(10)
expect(rescaled.width).toEqual(10)
expect(rescaled.height).toEqual(20)
})
it('scale up by factor 2', () => {
const box = new Box({ x: 10, y: 20, width: 20, height: 40 })
const rescaled = box.rescale(2)
expect(rescaled.x).toEqual(20)
expect(rescaled.y).toEqual(40)
expect(rescaled.width).toEqual(40)
expect(rescaled.height).toEqual(80)
})
it('scale to dimensions ', () => {
const box = new Box({ x: 0.1, y: 0.2, width: 0.2, height: 0.4 })
const rescaled = box.rescale({ width: 100, height: 200 })
expect(rescaled.x).toEqual(10)
expect(rescaled.y).toEqual(40)
expect(rescaled.width).toEqual(20)
expect(rescaled.height).toEqual(80)
})
})
describe('shift', () => {
it('should shift box by x, y', () => {
const box = new Box({ x: 10, y: 20, width: 20, height: 40 })
const shifted = box.shift(20, 40)
expect(shifted.x).toEqual(30)
expect(shifted.y).toEqual(60)
expect(shifted.width).toEqual(20)
expect(shifted.height).toEqual(40)
})
})
})
\ No newline at end of file
import { Box } from '../../../src/classes/Box';
import { Rect } from '../../../src/classes/Rect';
describe('Rect', () => {
describe('constructor', () => {
it('can be created', () => {
const rect = new Rect(0, 10, 20, 30)
expect(rect instanceof Rect).toBe(true)
expect(rect instanceof Box).toBe(true)
expect(rect.x).toEqual(0)
expect(rect.y).toEqual(10)
expect(rect.width).toEqual(20)
expect(rect.height).toEqual(30)
})
it('throws if coordinates are invalid', () => {
const expectConstructorToThrow = (x: any, y: any, width: any, height: any) => {
expect(() => new Rect(x, y, width, height)).toThrowError(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify({ x, y, width, height })}`)
}
expectConstructorToThrow(NaN, 10, 20, 30)
expectConstructorToThrow(0, Infinity, 20, 30)
expectConstructorToThrow(0, 10, -Infinity, 30)
expectConstructorToThrow(0, 10, 20, null)
expectConstructorToThrow(NaN, Infinity, undefined, null)
expectConstructorToThrow(undefined, undefined, undefined, undefined)
})
it('throws if height or width invalid', () => {
expect(() => new Rect(0, 10, -20, 30, false)).toThrowError('Box.constructor - width (-20) and height (30) must be positive numbers')
expect(() => new Rect(0, 10, 20, -30, false)).toThrowError('Box.constructor - width (20) and height (-30) must be positive numbers')
})
it('properties', () => {
const rect = new Rect(5, 10, 15, 20)
expect(rect.left).toEqual(5)
expect(rect.x).toEqual(5)
expect(rect.top).toEqual(10)
expect(rect.y).toEqual(10)
expect(rect.right).toEqual(20)
expect(rect.bottom).toEqual(30)
expect(rect.width).toEqual(15)
expect(rect.height).toEqual(20)
expect(rect.area).toEqual(300)
})
})
})
\ No newline at end of file
import { getModelUris } from '../../../src/common/getModelUris';
const FAKE_DEFAULT_MODEL_NAME = 'fake_model_name'
describe('getModelUris', () => {
it('returns uris from relative url if no argument passed', () => {
const result = getModelUris(undefined, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual('')
})
it('returns uris from relative url for empty string', () => {
const result = getModelUris('', FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual('')
})
it('returns uris for top level url, leading slash preserved', () => {
const result = getModelUris('/', FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`/${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual('/')
})
it('returns uris, given url path', () => {
const uri = 'path/to/modelfiles'
const result = getModelUris(uri, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`${uri}/${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual(uri)
})
it('returns uris, given url path, leading slash preserved', () => {
const uri = '/path/to/modelfiles'
const result = getModelUris(`/${uri}`, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`${uri}/${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual(uri)
})
it('returns uris, given manifest uri', () => {
const uri = 'path/to/modelfiles/model-weights_manifest.json'
const result = getModelUris(uri, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(uri)
expect(result.modelBaseUri).toEqual('path/to/modelfiles')
})
it('returns uris, given manifest uri, leading slash preserved', () => {
const uri = '/path/to/modelfiles/model-weights_manifest.json'
const result = getModelUris(uri, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(uri)
expect(result.modelBaseUri).toEqual('/path/to/modelfiles')
})
it('returns correct uris, given external path', () => {
const uri = 'https://example.com/path/to/modelfiles';
const result = getModelUris(uri, FAKE_DEFAULT_MODEL_NAME)
expect(result.manifestUri).toEqual(`${uri}/${FAKE_DEFAULT_MODEL_NAME}-weights_manifest.json`)
expect(result.modelBaseUri).toEqual(uri)
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../../../src';
import { getTestEnv } from '../../env';
import { expectAllTensorsReleased, fakeTensor3d } from '../../utils';
describe('NetInput', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
imgEl = await getTestEnv().loadImage('test/images/white.png')
})
describe('toBatchTensor', () => {
it('HTMLImageElement, batchSize === 1', () => tf.tidy(() => {
const netInput = new NetInput([imgEl])
const batchTensor = netInput.toBatchTensor(100)
expect(batchTensor.shape).toEqual([1, 100, 100, 3])
}))
it('tf.Tensor3D, batchSize === 1', () => tf.tidy(() => {
const tensor = tf.zeros<tf.Rank.R3>([200, 200, 3], 'int32')
const netInput = new NetInput([tensor])
const batchTensor = netInput.toBatchTensor(100)
expect(batchTensor.shape).toEqual([1, 100, 100, 3])
}))
it('HTMLImageElements, batchSize === 4', () => tf.tidy(() => {
const netInput = new NetInput([imgEl, imgEl, imgEl, imgEl])
const batchTensor = netInput.toBatchTensor(100)
expect(batchTensor.shape).toEqual([4, 100, 100, 3])
}))
it('tf.Tensor3Ds, batchSize === 4', () => tf.tidy(() => {
const tensor = tf.zeros<tf.Rank.R3>([200, 200, 3], 'int32')
const netInput = new NetInput([tensor, tensor, tensor, tensor])
const batchTensor = netInput.toBatchTensor(100)
expect(batchTensor.shape).toEqual([4, 100, 100, 3])
}))
it('tf.Tensor3Ds and HTMLImageElements, batchSize === 4', () => tf.tidy(() => {
const tensor = tf.zeros<tf.Rank.R3>([200, 200, 3], 'int32')
const netInput = new NetInput([tensor, tensor, imgEl, imgEl])
const batchTensor = netInput.toBatchTensor(100)
expect(batchTensor.shape).toEqual([4, 100, 100, 3])
}))
})
describe('no memory leaks', () => {
it('constructor', async () => {
const tensors = [fakeTensor3d(), fakeTensor3d(), fakeTensor3d()]
await expectAllTensorsReleased(() => {
new NetInput([imgEl])
new NetInput([imgEl, imgEl, imgEl])
new NetInput([tensors[0]])
new NetInput(tensors)
})
tensors.forEach(t => t.dispose())
})
describe('toBatchTensor', () => {
it('single image element', async () => {
await expectAllTensorsReleased(() => {
const batchTensor = new NetInput([imgEl]).toBatchTensor(100, false)
batchTensor.dispose()
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(() => {
const batchTensor = new NetInput([imgEl, imgEl, imgEl]).toBatchTensor(100, false)
batchTensor.dispose()
})
})
})
})
})
import { fetchImage } from '../../../src';
describe('fetchImage', () => {
it('invalid mime type', async () => {
const url = 'base/test/data/boxes.json'
let err = ''
try {
await fetchImage(url)
} catch (e) {
err = e.toString()
}
expect(err).toContain('fetchImage - expected blob type to be of type image/*, instead have: application/json')
expect(err).toContain(url)
})
it('fetches image', async () => {
const url = 'base/test/images/white.png'
const img = await fetchImage(url)
expect(img instanceof HTMLImageElement).toBe(true)
})
})
import { fetchJson } from '../../../src';
describe('fetchJson', () => {
it('fetches json', async () => {
const url = 'test/data/boxes.json'
expect(async () => await fetchJson(url)).not.toThrow()
})
})
import { fetchNetWeights } from '../../../src';
describe('fetchNetWeights', () => {
it('fetches .weights file', async () => {
const url = 'base/test/data/dummy.weights'
const weights = await fetchNetWeights(url)
expect(weights instanceof Float32Array).toBe(true)
})
})
import { fetchOrThrow } from '../../../src';
describe('fetchOrThrow', () => {
it('404, throws', async () => {
const url = '/does/not/exist'
let err = ''
try {
await fetchOrThrow(url)
} catch (e) {
err = e.toString()
}
expect(err).toContain('failed to fetch: (404)')
expect(err).toContain(url)
})
})
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, env, NetInput, toNetInput } from '../../../src';
import { getTestEnv } from '../../env';
import { expectAllTensorsReleased } from '../../utils';
describe('toNetInput', () => {
let imgEl: HTMLImageElement, canvasEl: HTMLCanvasElement
beforeAll(async () => {
imgEl = await getTestEnv().loadImage('test/images/white.png')
canvasEl = createCanvasFromMedia(imgEl)
})
describe('valid args', () => {
it('from HTMLImageElement', async () => {
const netInput = await toNetInput(imgEl)
expect(netInput instanceof NetInput).toBe(true)
expect(netInput.batchSize).toEqual(1)
})
it('from HTMLCanvasElement', async () => {
const netInput = await toNetInput(canvasEl)
expect(netInput instanceof NetInput).toBe(true)
expect(netInput.batchSize).toEqual(1)
})
it('from HTMLImageElement array', async () => {
const netInput = await toNetInput([
imgEl,
imgEl
])
expect(netInput instanceof NetInput).toBe(true)
expect(netInput.batchSize).toEqual(2)
})
it('from HTMLCanvasElement array', async () => {
const netInput = await toNetInput([
canvasEl,
canvasEl
])
expect(netInput instanceof NetInput).toBe(true)
expect(netInput.batchSize).toEqual(2)
})
it('from mixed media array', async () => {
const netInput = await toNetInput([
imgEl,
canvasEl,
canvasEl
])
expect(netInput instanceof NetInput).toBe(true)
expect(netInput.batchSize).toEqual(3)
})
})
describe('invalid args', () => {
it('undefined', async () => {
let errorMessage
try {
await toNetInput(undefined as any)
} catch (error) {
errorMessage = error.message;
}
expect(errorMessage).toBe('toNetInput - expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id')
})
it('empty array', async () => {
let errorMessage
try {
await toNetInput([])
} catch (error) {
errorMessage = error.message;
}
expect(errorMessage).toBe('toNetInput - empty array passed as input')
})
it('undefined at input index 1', async () => {
let errorMessage
try {
await toNetInput([env.getEnv().createImageElement(), undefined] as any)
} catch (error) {
errorMessage = error.message;
}
expect(errorMessage).toBe('toNetInput - at input index 1: expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id')
})
})
describe('no memory leaks', () => {
it('constructor', async () => {
const tensors = [imgEl, imgEl, imgEl].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const tensor4ds = tensors.map(t => t.expandDims<tf.Rank.R4>())
await expectAllTensorsReleased(async () => {
await toNetInput(imgEl)
await toNetInput([imgEl, imgEl, imgEl])
await toNetInput(tensors[0])
await toNetInput(tensors)
await toNetInput(tensor4ds[0])
await toNetInput(tensor4ds)
})
tensors.forEach(t => t.dispose())
tensor4ds.forEach(t => t.dispose())
})
})
})
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createCanvasFromMedia, IDimensions, FaceLandmarks68, utils, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { getTestEnv } from '../../env';
import {
describeWithBackend,
......@@ -13,7 +12,7 @@ import {
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
if (input instanceof tf.Tensor) {
const [height, width] = input.shape.slice(isTensor3D(input) ? 0 : 1)
const [height, width] = input.shape.slice(utils.isTensor3D(input) ? 0 : 1)
return { width, height }
}
return input
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createCanvasFromMedia, IDimensions, utils, NetInput, Point, TMediaElement, toNetInput, FaceLandmarks68 } from '../../../src';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
if (input instanceof tf.Tensor) {
const [height, width] = input.shape.slice(isTensor3D(input) ? 0 : 1)
const [height, width] = input.shape.slice(utils.isTensor3D(input) ? 0 : 1)
return { width, height }
}
return input
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { createCanvasFromMedia, euclideanDistance, NetInput, toNetInput } from '../../../src';
import { getTestEnv } from '../../env';
import { describeWithBackend, describeWithNets, expectAllTensorsReleased } from '../../utils';
......
import { Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { extendWithFaceDetection, FaceDetection, Rect } from '../../../src';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
......
import { Point, Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks';
import { extendWithFaceDetection, extendWithFaceLandmarks, FaceDetection, FaceLandmarks68, Point, Rect } from '../../../src';
const detection = new FaceDetection(1.0, new Rect(0.5, 0.5, 0.5, 0.5), { width: 100, height: 100 })
const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 })
......
import { FaceMatcher } from '../../../src/globalApi/FaceMatcher';
import { FaceMatcher } from '../../../src';
import { LabeledFaceDescriptors } from '../../../src';
describe('globalApi', () => {
......
import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { WithAge, detectAllFaces, WithFaceExpressions, WithFaceDetection, WithGender } from '../../../src';
import { getTestEnv } from '../../env';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
......@@ -58,8 +54,7 @@ describeWithBackend('globalApi', () => {
describe('without face alignment', () => {
it('detectAllFaces.withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceExpressions()
expect(results.length).toEqual(6)
......@@ -67,8 +62,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withAgeAndGender()
expect(results.length).toEqual(6)
......@@ -76,8 +70,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceExpressions().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceExpressions()
.withAgeAndGender()
......@@ -87,8 +80,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withAgeAndGender().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withAgeAndGender()
.withFaceExpressions()
......@@ -102,8 +94,7 @@ describeWithBackend('globalApi', () => {
describe('with face alignment', () => {
it('detectAllFaces.withFaceLandmarks().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
......@@ -113,8 +104,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
......@@ -124,8 +114,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -134,8 +123,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......@@ -147,8 +135,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender().withFaceExpressions()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceExpressions()
......@@ -160,8 +147,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptors()
......@@ -172,8 +158,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withAgeAndGender().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceDescriptors()
......@@ -184,8 +169,7 @@ describeWithBackend('globalApi', () => {
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()', async () => {
const results = await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
const results = await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......@@ -203,8 +187,7 @@ describeWithBackend('globalApi', () => {
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptors()
})
......@@ -212,8 +195,7 @@ describeWithBackend('globalApi', () => {
it('detectAllFaces.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, faceDetectorOptions)
await detectAllFaces(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......
import * as faceapi from '../../../src';
import { WithAge } from '../../../src/factories/WithAge';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
import { WithGender } from '../../../src/factories/WithGender';
import { getTestEnv } from '../../env';
import {
detectSingleFace,
WithAge,
WithFaceDescriptor,
WithFaceDetection,
WithFaceExpressions,
WithFaceLandmarks,
WithGender,
} from '../../../src';
import { expectedTinyFaceDetectorBoxes } from '../../expectedTinyFaceDetectorBoxes';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
......@@ -14,6 +18,7 @@ import {
ExpectedFullFaceDescription,
} from '../../utils';
import { deltas, expectedScores, faceDetectorOptions, withNetArgs } from './consts';
import { getTestEnv } from '../../env';
function expectFaceExpressions(result: WithFaceExpressions<{}> | undefined) {
......@@ -44,7 +49,7 @@ describeWithBackend('globalApi', () => {
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
function expectFaceDetectionWithLandmarks(result: faceapi.WithFaceLandmarks<faceapi.WithFaceDetection<{}>> | undefined) {
function expectFaceDetectionWithLandmarks(result: WithFaceLandmarks<WithFaceDetection<{}>> | undefined) {
expect(!!result).toBeTruthy()
if (result) {
expectFaceDetectionsWithLandmarks(
......@@ -56,7 +61,7 @@ describeWithBackend('globalApi', () => {
}
}
function expectFullFaceDescription(result: faceapi.WithFaceDescriptor<faceapi.WithFaceLandmarks<faceapi.WithFaceDetection<{}>>> | undefined) {
function expectFullFaceDescription(result: WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>> | undefined) {
expect(!!result).toBeTruthy()
if (result) {
expectFullFaceDescriptions(
......@@ -73,24 +78,21 @@ describeWithBackend('globalApi', () => {
describe('without face alignment', () => {
it('detectSingleFace.withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceExpressions()
expectFaceExpressions(result)
})
it('detectSingleFace.withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withAgeAndGender()
expectAgeAndGender(result, false)
})
it('detectSingleFace.withFaceExpressions().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceExpressions()
.withAgeAndGender()
......@@ -99,8 +101,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withAgeAndGender().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withAgeAndGender()
.withFaceExpressions()
......@@ -113,8 +114,7 @@ describeWithBackend('globalApi', () => {
describe('with face alignment', () => {
it('detectSingleFace.withFaceLandmarks().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
......@@ -123,8 +123,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
......@@ -133,8 +132,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptor()
......@@ -142,8 +140,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......@@ -154,8 +151,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender().withFaceExpressions()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceExpressions()
......@@ -166,8 +162,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withFaceDescriptor()
......@@ -177,8 +172,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withAgeAndGender().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withAgeAndGender()
.withFaceDescriptor()
......@@ -188,8 +182,7 @@ describeWithBackend('globalApi', () => {
})
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()', async () => {
const result = await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
const result = await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......@@ -206,8 +199,7 @@ describeWithBackend('globalApi', () => {
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceDescriptor()
})
......@@ -215,8 +207,7 @@ describeWithBackend('globalApi', () => {
it('detectSingleFace.withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, faceDetectorOptions)
await detectSingleFace(imgEl, faceDetectorOptions)
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
......
import * as tf from '@tensorflow/tfjs-core';
import { iou, Rect } from '../../../src';
describe('iou', () => {
it('should be 1.0', () => tf.tidy(() => {
const box = new Rect(0, 0, 20, 20)
expect(iou(box, box)).toEqual(1)
}))
it('should be 0', () => tf.tidy(() => {
const box1 = new Rect(0, 0, 20, 20)
const box2 = new Rect(20, 20, 20, 20)
expect(iou(box1, box2)).toEqual(0)
}))
it('should be 0.5', () => tf.tidy(() => {
const box1 = new Rect(0, 0, 20, 20)
const box2 = new Rect(0, 0, 10, 20)
expect(iou(box1, box2)).toEqual(0.5)
}))
it('should be 0.5', () => tf.tidy(() => {
const box1 = new Rect(0, 0, 20, 20)
const box2 = new Rect(0, 10, 20, 10)
expect(iou(box1, box2)).toEqual(0.5)
}))
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { padToSquare } from '../../../src';
import { ones, zeros } from '../../utils';
describe('padToSquare', () => {
describe('even size', () => {
it('is padded to square by 2 columns', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(24).fill(1), [1, 4, 2, 3])
const result = padToSquare(imgTensor)
expect(result.shape).toEqual([1, 4, 4, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(4)
expect(paddedCols[0].dataSync()).toEqual(ones(12))
expect(paddedCols[1].dataSync()).toEqual(ones(12))
expect(paddedCols[2].dataSync()).toEqual(zeros(12))
expect(paddedCols[3].dataSync()).toEqual(zeros(12))
}))
it('is padded to square by 2 columns and centered', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(24).fill(1), [1, 4, 2, 3])
const result = padToSquare(imgTensor, true)
expect(result.shape).toEqual([1, 4, 4, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(4)
expect(paddedCols[0].dataSync()).toEqual(zeros(12))
expect(paddedCols[1].dataSync()).toEqual(ones(12))
expect(paddedCols[2].dataSync()).toEqual(ones(12))
expect(paddedCols[3].dataSync()).toEqual(zeros(12))
}))
it('is padded to square by 1 column', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(36).fill(1), [1, 4, 3, 3])
const result = padToSquare(imgTensor)
expect(result.shape).toEqual([1, 4, 4, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(4)
expect(paddedCols[0].dataSync()).toEqual(ones(12))
expect(paddedCols[1].dataSync()).toEqual(ones(12))
expect(paddedCols[2].dataSync()).toEqual(ones(12))
expect(paddedCols[3].dataSync()).toEqual(zeros(12))
}))
it('is padded to square by 1 column and centered', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(36).fill(1), [1, 4, 3, 3])
const result = padToSquare(imgTensor, true)
expect(result.shape).toEqual([1, 4, 4, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(4)
expect(paddedCols[0].dataSync()).toEqual(ones(12))
expect(paddedCols[1].dataSync()).toEqual(ones(12))
expect(paddedCols[2].dataSync()).toEqual(ones(12))
expect(paddedCols[3].dataSync()).toEqual(zeros(12))
}))
})
describe('uneven size', () => {
it('is padded to square by 3 columns', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(30).fill(1), [1, 5, 2, 3])
const result = padToSquare(imgTensor)
expect(result.shape).toEqual([1, 5, 5, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(5)
expect(paddedCols[0].dataSync()).toEqual(ones(15))
expect(paddedCols[1].dataSync()).toEqual(ones(15))
expect(paddedCols[2].dataSync()).toEqual(zeros(15))
expect(paddedCols[3].dataSync()).toEqual(zeros(15))
expect(paddedCols[4].dataSync()).toEqual(zeros(15))
}))
it('is padded to square by 3 columns and centered', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(30).fill(1), [1, 5, 2, 3])
const result = padToSquare(imgTensor, true)
expect(result.shape).toEqual([1, 5, 5, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(5)
expect(paddedCols[0].dataSync()).toEqual(zeros(15))
expect(paddedCols[1].dataSync()).toEqual(ones(15))
expect(paddedCols[2].dataSync()).toEqual(ones(15))
expect(paddedCols[3].dataSync()).toEqual(zeros(15))
expect(paddedCols[4].dataSync()).toEqual(zeros(15))
}))
it('is padded to square by 1 column', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(60).fill(1), [1, 5, 4, 3])
const result = padToSquare(imgTensor)
expect(result.shape).toEqual([1, 5, 5, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(5)
expect(paddedCols[0].dataSync()).toEqual(ones(15))
expect(paddedCols[1].dataSync()).toEqual(ones(15))
expect(paddedCols[2].dataSync()).toEqual(ones(15))
expect(paddedCols[3].dataSync()).toEqual(ones(15))
expect(paddedCols[4].dataSync()).toEqual(zeros(15))
}))
it('is padded to square by 1 column and centered', () => tf.tidy(() => {
const imgTensor = tf.tensor4d(Array(60).fill(1), [1, 5, 4, 3])
const result = padToSquare(imgTensor, true)
expect(result.shape).toEqual([1, 5, 5, 3])
const paddedCols = tf.unstack(result, 2)
expect(paddedCols.length).toEqual(5)
expect(paddedCols[0].dataSync()).toEqual(ones(15))
expect(paddedCols[1].dataSync()).toEqual(ones(15))
expect(paddedCols[2].dataSync()).toEqual(ones(15))
expect(paddedCols[3].dataSync()).toEqual(ones(15))
expect(paddedCols[4].dataSync()).toEqual(zeros(15))
}))
})
})
import { Point } from 'tfjs-image-recognition-base';
import { Rect } from '../../src';
import { FaceDetection } from '../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../src/factories/WithFaceLandmarks';
import { resizeResults } from '../../src/resizeResults';
import {
extendWithFaceDetection,
extendWithFaceLandmarks,
FaceDetection,
FaceLandmarks68,
Point,
Rect,
resizeResults,
} from '../../src';
import { expectPointsClose, expectRectClose } from '../utils';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
......
import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { AgeGenderNet } from '../src/ageGenderNet/AgeGenderNet';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceExpressionNet } from '../src/faceExpressionNet/FaceExpressionNet';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
import {
AgeGenderNet,
FaceDetection,
FaceExpressionNet,
FaceLandmark68Net,
FaceLandmark68TinyNet,
FaceLandmarks,
FaceRecognitionNet,
IPoint,
IRect,
LabeledBox,
Mtcnn,
nets,
PredictedBox,
SsdMobilenetv1,
TinyFaceDetector,
TinyYolov2,
} from '../src';
import { getTestEnv } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
......@@ -80,6 +88,41 @@ export function sortByFaceDetection<T extends { detection: FaceDetection }>(objs
return sortByDistanceToOrigin(objs, d => d.detection.box)
}
export function fakeTensor3d(dtype: tf.DataType = 'int32') {
return tf.tensor3d([0], [1, 1, 1], dtype)
}
export function zeros(length: number): Float32Array {
return new Float32Array(length)
}
export function ones(length: number): Float32Array {
return new Float32Array(length).fill(1)
}
export function createLabeledBox(
x: number,
y: number,
width: number,
height: number,
classLabel: number = 0
): LabeledBox {
return new LabeledBox({ x, y, width, height }, classLabel)
}
export function createPredictedBox(
x: number,
y: number,
width: number,
height: number,
classLabel: number = 0,
score: number = 1.0,
classScore: number = 1.0
): PredictedBox {
return new PredictedBox({ x, y, width, height }, classLabel, score, classScore)
}
export type ExpectedFaceDetectionWithLandmarks = {
detection: IRect
landmarks: IPoint[]
......@@ -191,7 +234,7 @@ export function describeWithNets(
faceExpressionNet,
ageGenderNet,
tinyYolov2
} = faceapi.nets
} = nets
beforeAll(async () => {
const {
......
import { utils } from '../../src';
describe('utils', () => {
describe('isValidNumber', () => {
it('0 is valid', () => {
expect(utils.isValidNumber(0)).toBe(true)
})
it('1 is valid', () => {
expect(utils.isValidNumber(1)).toBe(true)
})
it('-1 is valid', () => {
expect(utils.isValidNumber(-1)).toBe(true)
})
it('NaN is invalid', () => {
expect(utils.isValidNumber(NaN)).toBe(false)
})
it('Infinity is invalid', () => {
expect(utils.isValidNumber(Infinity)).toBe(false)
})
it('-Infinity is invalid', () => {
expect(utils.isValidNumber(-Infinity)).toBe(false)
})
it('null is invalid', () => {
expect(utils.isValidNumber(null)).toBe(false)
})
it('undefined is invalid', () => {
expect(utils.isValidNumber(undefined)).toBe(false)
})
})
})
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment