Commit 03ed2598 by vincent

use tfjs-tiny-yolov2 codebase

parent b6baf8c0
...@@ -5843,6 +5843,15 @@ ...@@ -5843,6 +5843,15 @@
"@tensorflow/tfjs-core": "0.12.14" "@tensorflow/tfjs-core": "0.12.14"
} }
}, },
"tfjs-tiny-yolov2": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/tfjs-tiny-yolov2/-/tfjs-tiny-yolov2-0.0.2.tgz",
"integrity": "sha512-NtiPErN2tIP9EkZA7rGjW5wF7iN4JAjI0LwoC1HvMfd4oPpmqwFSbXxwDvFbZY3a5mPUyW6E4/AqoXjSg4w3yA==",
"requires": {
"@tensorflow/tfjs-core": "0.12.14",
"tfjs-image-recognition-base": "git+https://github.com/justadudewhohacks/tfjs-image-recognition-base.git#2f2072f883dd098bc539e2e89a61878720e400a1"
}
},
"through2": { "through2": {
"version": "2.0.1", "version": "2.0.1",
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.1.tgz", "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.1.tgz",
...@@ -6003,9 +6012,9 @@ ...@@ -6003,9 +6012,9 @@
} }
}, },
"typescript": { "typescript": {
"version": "2.9.2", "version": "2.8.4",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-2.9.2.tgz", "resolved": "https://registry.npmjs.org/typescript/-/typescript-2.8.4.tgz",
"integrity": "sha512-Gr4p6nFNaoufRIY4NMdpQRNmgxVIGMs4Fcu/ujdYk3nAZqk7supzBE9idmvfZIlH/Cuj//dvi+019qEue9lV0w==", "integrity": "sha512-IIU5cN1mR5J3z9jjdESJbnxikTrEz3lzAw/D0Tf45jHpBp55nY31UkUvmVHoffCfKHTqJs3fCLPDxknQTTFegQ==",
"dev": true "dev": true
}, },
"uglify-js": { "uglify-js": {
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
"scripts": { "scripts": {
"rollup-min": "rollup -c rollup.config.js --environment minify:true", "rollup-min": "rollup -c rollup.config.js --environment minify:true",
"rollup": "rollup -c rollup.config.js", "rollup": "rollup -c rollup.config.js",
"build": "npm run rollup && npm run rollup-min && tsc", "build-tsc": "tsc",
"build": "npm run rollup && npm run rollup-min && npm run build-tsc",
"test": "karma start" "test": "karma start"
}, },
"keywords": [ "keywords": [
...@@ -21,7 +22,8 @@ ...@@ -21,7 +22,8 @@
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tensorflow/tfjs-core": "^0.12.14", "@tensorflow/tfjs-core": "^0.12.14",
"tfjs-image-recognition-base": "git+https://github.com/justadudewhohacks/tfjs-image-recognition-base.git" "tfjs-image-recognition-base": "^0.0.0",
"tfjs-tiny-yolov2": "0.0.2"
}, },
"devDependencies": { "devDependencies": {
"@types/jasmine": "^2.8.8", "@types/jasmine": "^2.8.8",
...@@ -38,6 +40,6 @@ ...@@ -38,6 +40,6 @@
"rollup-plugin-typescript2": "^0.16.1", "rollup-plugin-typescript2": "^0.16.1",
"rollup-plugin-uglify": "^4.0.0", "rollup-plugin-uglify": "^4.0.0",
"tslib": "^1.9.3", "tslib": "^1.9.3",
"typescript": "^2.9.2" "typescript": "2.8.4"
} }
} }
import { Point, Rect, TNetInput } from 'tfjs-image-recognition-base'; import { Point, Rect, TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { TinyYolov2 } from '.'; import { TinyYolov2 } from '.';
import { FaceDetection } from './classes/FaceDetection'; import { FaceDetection } from './classes/FaceDetection';
...@@ -10,7 +11,6 @@ import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet'; ...@@ -10,7 +11,6 @@ import { FaceLandmarkNet } from './faceLandmarkNet/FaceLandmarkNet';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet'; import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { Mtcnn } from './mtcnn/Mtcnn'; import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams } from './mtcnn/types'; import { MtcnnForwardParams } from './mtcnn/types';
import { TinyYolov2ForwardParams } from './tinyYolov2/types';
function computeDescriptorsFactory( function computeDescriptorsFactory(
recognitionNet: FaceRecognitionNet recognitionNet: FaceRecognitionNet
...@@ -95,7 +95,7 @@ export function allFacesTinyYolov2Factory( ...@@ -95,7 +95,7 @@ export function allFacesTinyYolov2Factory(
) { ) {
return async function( return async function(
input: TNetInput, input: TNetInput,
forwardParams: TinyYolov2ForwardParams = {}, forwardParams: TinyYolov2Types.TinyYolov2ForwardParams = {},
useBatchProcessing: boolean = false useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> { ): Promise<FullFaceDescription[]> {
const detectFaces = (input: TNetInput) => tinyYolov2.locateFaces(input, forwardParams) const detectFaces = (input: TNetInput) => tinyYolov2.locateFaces(input, forwardParams)
......
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from './types';
export function convLayer(
x: tf.Tensor4D,
params: ConvParams,
padding: 'valid' | 'same' = 'same',
withRelu: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out = tf.add(
tf.conv2d(x, params.filters, [1, 1], padding),
params.bias
) as tf.Tensor4D
return withRelu ? tf.relu(out) : out
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base';
import { ConvParams } from './types';
export function extractConvParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
filterSize: number,
mappedPrefix: string
): ConvParams {
const filters = tf.tensor4d(
extractWeights(channelsIn * channelsOut * filterSize * filterSize),
[filterSize, filterSize, channelsIn, channelsOut]
)
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/filters` },
{ paramPath: `${mappedPrefix}/bias` }
)
return { filters, bias }
}
}
import * as tf from '@tensorflow/tfjs-core';
import { ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base';
import { FCParams } from './types';
export function extractFCParamsFactory(
extractWeights: ExtractWeightsFunction,
paramMappings: ParamMapping[]
) {
return function(
channelsIn: number,
channelsOut: number,
mappedPrefix: string
): FCParams {
const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut])
const fc_bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/weights` },
{ paramPath: `${mappedPrefix}/bias` }
)
return {
weights: fc_weights,
bias: fc_bias
}
}
}
import * as tf from '@tensorflow/tfjs-core';
export type ConvParams = {
filters: tf.Tensor4D
bias: tf.Tensor1D
}
export type FCParams = {
weights: tf.Tensor2D
bias: tf.Tensor1D
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { convLayer } from 'tfjs-tiny-yolov2';
import { convLayer } from '../commons/convLayer';
import { BoxPredictionParams } from './types'; import { BoxPredictionParams } from './types';
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base'; import { extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base';
import { ConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams } from '../commons/types';
import { MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types'; import { MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) { function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
......
...@@ -6,8 +6,8 @@ import { ...@@ -6,8 +6,8 @@ import {
loadWeightMap, loadWeightMap,
ParamMapping, ParamMapping,
} from 'tfjs-image-recognition-base'; } from 'tfjs-image-recognition-base';
import { ConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams } from '../commons/types';
import { BoxPredictionParams, MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types'; import { BoxPredictionParams, MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';
const DEFAULT_MODEL_NAME = 'ssd_mobilenetv1_model' const DEFAULT_MODEL_NAME = 'ssd_mobilenetv1_model'
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams } from '../commons/types';
export type PointwiseConvParams = { export type PointwiseConvParams = {
filters: tf.Tensor4D filters: tf.Tensor4D
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base'; import { isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { convLayer, ConvParams } from 'tfjs-tiny-yolov2';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68'; import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { convLayer } from '../commons/convLayer';
import { ConvParams } from '../commons/types';
import { extractParams } from './extractParams'; import { extractParams } from './extractParams';
import { fullyConnectedLayer } from './fullyConnectedLayer'; import { fullyConnectedLayer } from './fullyConnectedLayer';
import { loadQuantizedParams } from './loadQuantizedParams'; import { loadQuantizedParams } from './loadQuantizedParams';
......
import { extractWeightsFactory, ParamMapping } from 'tfjs-image-recognition-base'; import { extractWeightsFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { extractConvParamsFactory, extractFCParamsFactory } from 'tfjs-tiny-yolov2';
import { extractConvParamsFactory } from '../commons/extractConvParamsFactory';
import { extractFCParamsFactory } from '../commons/extractFCParamsFactory';
import { NetParams } from './types'; import { NetParams } from './types';
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } { export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { FCParams } from 'tfjs-tiny-yolov2';
import { FCParams } from '../commons/types';
export function fullyConnectedLayer( export function fullyConnectedLayer(
x: tf.Tensor2D, x: tf.Tensor2D,
......
...@@ -5,8 +5,8 @@ import { ...@@ -5,8 +5,8 @@ import {
loadWeightMap, loadWeightMap,
ParamMapping, ParamMapping,
} from 'tfjs-image-recognition-base'; } from 'tfjs-image-recognition-base';
import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
import { ConvParams, FCParams } from '../commons/types';
import { NetParams } from './types'; import { NetParams } from './types';
const DEFAULT_MODEL_NAME = 'face_landmark_68_model' const DEFAULT_MODEL_NAME = 'face_landmark_68_model'
......
import { ConvParams, FCParams } from '../commons/types'; import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
export type NetParams = { export type NetParams = {
conv0: ConvParams conv0: ConvParams
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { extractWeightsFactory, ExtractWeightsFunction, isFloat, ParamMapping } from 'tfjs-image-recognition-base'; import { extractWeightsFactory, ExtractWeightsFunction, isFloat, ParamMapping } from 'tfjs-image-recognition-base';
import { ConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams } from '../commons/types';
import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types'; import { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) { function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { ConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams } from '../commons/types';
export type ScaleLayerParams = { export type ScaleLayerParams = {
weights: tf.Tensor1D weights: tf.Tensor1D
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput } from 'tfjs-image-recognition-base'; import { NetInput, TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from './allFacesFactory'; import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from './allFacesFactory';
import { FaceDetection } from './classes/FaceDetection'; import { FaceDetection } from './classes/FaceDetection';
...@@ -11,7 +12,6 @@ import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet'; ...@@ -11,7 +12,6 @@ import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { Mtcnn } from './mtcnn/Mtcnn'; import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams, MtcnnResult } from './mtcnn/types'; import { MtcnnForwardParams, MtcnnResult } from './mtcnn/types';
import { TinyYolov2 } from './tinyYolov2/TinyYolov2'; import { TinyYolov2 } from './tinyYolov2/TinyYolov2';
import { TinyYolov2ForwardParams } from './tinyYolov2/types';
export const detectionNet = new FaceDetectionNet() export const detectionNet = new FaceDetectionNet()
export const landmarkNet = new FaceLandmarkNet() export const landmarkNet = new FaceLandmarkNet()
...@@ -90,7 +90,7 @@ export function mtcnn( ...@@ -90,7 +90,7 @@ export function mtcnn(
export function tinyYolov2( export function tinyYolov2(
input: TNetInput, input: TNetInput,
forwardParams: TinyYolov2ForwardParams forwardParams: TinyYolov2Types.TinyYolov2ForwardParams
): Promise<FaceDetection[]> { ): Promise<FaceDetection[]> {
return nets.tinyYolov2.locateFaces(input, forwardParams) return nets.tinyYolov2.locateFaces(input, forwardParams)
} }
...@@ -109,7 +109,7 @@ export const allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function = allFacesSs ...@@ -109,7 +109,7 @@ export const allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function = allFacesSs
export type allFacesTinyYolov2Function = ( export type allFacesTinyYolov2Function = (
input: tf.Tensor | NetInput | TNetInput, input: tf.Tensor | NetInput | TNetInput,
forwardParams?: TinyYolov2ForwardParams, forwardParams?: TinyYolov2Types.TinyYolov2ForwardParams,
useBatchProcessing?: boolean useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]> ) => Promise<FullFaceDescription[]>
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { convLayer } from 'tfjs-tiny-yolov2';
import { convLayer } from '../commons/convLayer';
import { fullyConnectedLayer } from '../faceLandmarkNet/fullyConnectedLayer'; import { fullyConnectedLayer } from '../faceLandmarkNet/fullyConnectedLayer';
import { prelu } from './prelu'; import { prelu } from './prelu';
import { sharedLayer } from './sharedLayers'; import { sharedLayer } from './sharedLayers';
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { convLayer } from 'tfjs-tiny-yolov2';
import { convLayer } from '../commons/convLayer';
import { sharedLayer } from './sharedLayers'; import { sharedLayer } from './sharedLayers';
import { PNetParams } from './types'; import { PNetParams } from './types';
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base'; import { extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base';
import { extractConvParamsFactory, extractFCParamsFactory } from 'tfjs-tiny-yolov2';
import { extractConvParamsFactory } from '../commons/extractConvParamsFactory';
import { extractFCParamsFactory } from '../commons/extractFCParamsFactory';
import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types'; import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) { function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
......
...@@ -5,8 +5,8 @@ import { ...@@ -5,8 +5,8 @@ import {
loadWeightMap, loadWeightMap,
ParamMapping, ParamMapping,
} from 'tfjs-image-recognition-base'; } from 'tfjs-image-recognition-base';
import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
import { ConvParams, FCParams } from '../commons/types';
import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types'; import { NetParams, ONetParams, PNetParams, RNetParams, SharedParams } from './types';
const DEFAULT_MODEL_NAME = 'mtcnn_model' const DEFAULT_MODEL_NAME = 'mtcnn_model'
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { convLayer } from 'tfjs-tiny-yolov2';
import { convLayer } from '../commons/convLayer';
import { prelu } from './prelu'; import { prelu } from './prelu';
import { SharedParams } from './types'; import { SharedParams } from './types';
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection'; import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5'; import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { ConvParams, FCParams } from '../commons/types';
export type SharedParams = { export type SharedParams = {
conv1: ConvParams conv1: ConvParams
......
import * as tf from '@tensorflow/tfjs-core'; import { Point, TNetInput } from 'tfjs-image-recognition-base';
import { import { TinyYolov2 as TinyYolov2Base, TinyYolov2Types } from 'tfjs-tiny-yolov2';
BoundingBox,
NetInput,
NeuralNetwork,
nonMaxSuppression,
normalize,
Point,
sigmoid,
TNetInput,
toNetInput,
} from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { convLayer } from '../commons/convLayer';
import { BOX_ANCHORS, BOX_ANCHORS_SEPARABLE, INPUT_SIZES, IOU_THRESHOLD, MEAN_RGB, NUM_BOXES } from './config';
import { convWithBatchNorm } from './convWithBatchNorm';
import { extractParams } from './extractParams';
import { getDefaultParams } from './getDefaultParams';
import { loadQuantizedParams } from './loadQuantizedParams';
import { NetParams, PostProcessingParams, TinyYolov2ForwardParams } from './types';
export class TinyYolov2 extends NeuralNetwork<NetParams> { import { FaceDetection } from '../classes';
import {
BOX_ANCHORS,
BOX_ANCHORS_SEPARABLE,
DEFAULT_MODEL_NAME,
DEFAULT_MODEL_NAME_SEPARABLE_CONV,
IOU_THRESHOLD,
MEAN_RGB_SEPARABLE,
} from './const';
private _withSeparableConvs: boolean export class TinyYolov2 extends TinyYolov2Base {
private _anchors: Point[]
constructor(withSeparableConvs: boolean = true) { constructor(withSeparableConvs: boolean = true) {
super('TinyYolov2') const config = Object.assign({}, {
this._withSeparableConvs = withSeparableConvs withSeparableConvs,
this._anchors = withSeparableConvs ? BOX_ANCHORS_SEPARABLE : BOX_ANCHORS iouThreshold: IOU_THRESHOLD,
classes: ['face']
},
withSeparableConvs
? {
anchors: BOX_ANCHORS_SEPARABLE,
meanRgb: MEAN_RGB_SEPARABLE
}
: {
anchors: BOX_ANCHORS,
withClassScores: true
}
)
super(config)
} }
public get withSeparableConvs(): boolean { public get withSeparableConvs(): boolean {
return this._withSeparableConvs return this.config.withSeparableConvs
} }
public get anchors(): Point[] { public get anchors(): Point[] {
return this._anchors return this.config.anchors
}
public forwardInput(input: NetInput, inputSize: number): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('TinyYolov2 - load model before inference')
}
const out = tf.tidy(() => {
let batchTensor = input.toBatchTensor(inputSize, false)
batchTensor = this.withSeparableConvs
? normalize(batchTensor, MEAN_RGB)
: batchTensor
batchTensor = batchTensor.div(tf.scalar(256)) as tf.Tensor4D
let out = convWithBatchNorm(batchTensor, params.conv0)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv1)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv2)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv3)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv4)
out = tf.maxPool(out, [2, 2], [2, 2], 'same')
out = convWithBatchNorm(out, params.conv5)
out = tf.maxPool(out, [2, 2], [1, 1], 'same')
out = convWithBatchNorm(out, params.conv6)
out = convWithBatchNorm(out, params.conv7)
out = convLayer(out, params.conv8, 'valid', false)
return out
})
return out
}
public async forward(input: TNetInput, inputSize: number): Promise<tf.Tensor4D> {
return await this.forwardInput(await toNetInput(input, true, true), inputSize)
}
public async locateFaces(input: TNetInput, forwardParams: TinyYolov2ForwardParams = {}): Promise<FaceDetection[]> {
const { inputSize: _inputSize, scoreThreshold } = getDefaultParams(forwardParams)
const inputSize = typeof _inputSize === 'string'
? INPUT_SIZES[_inputSize]
: _inputSize
if (typeof inputSize !== 'number') {
throw new Error(`TinyYolov2 - unknown inputSize: ${inputSize}, expected number or one of xs | sm | md | lg`)
}
const netInput = await toNetInput(input, true)
const out = await this.forwardInput(netInput, inputSize)
const out0 = tf.tidy(() => tf.unstack(out)[0].expandDims()) as tf.Tensor4D
const inputDimensions = {
width: netInput.getInputWidth(0),
height: netInput.getInputHeight(0)
}
const paddings = netInput.getRelativePaddings(0)
const results = this.postProcess(out0, { scoreThreshold, paddings })
const boxes = results.map(res => res.box)
const scores = results.map(res => res.score)
out.dispose()
out0.dispose()
const indices = nonMaxSuppression(
boxes.map(box => box.rescale(inputSize)),
scores,
IOU_THRESHOLD,
true
)
const detections = indices.map(idx =>
new FaceDetection(
scores[idx],
boxes[idx].toRect(),
inputDimensions
)
)
return detections
}
public postProcess(outputTensor: tf.Tensor4D, { scoreThreshold, paddings }: PostProcessingParams) {
const numCells = outputTensor.shape[1]
const [boxesTensor, scoresTensor] = tf.tidy(() => {
const reshaped = outputTensor.reshape([numCells, numCells, NUM_BOXES, this.withSeparableConvs ? 5 : 6])
const boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, NUM_BOXES, 4])
const scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, NUM_BOXES, 1])
return [boxes, scores]
})
const results = []
for (let row = 0; row < numCells; row ++) {
for (let col = 0; col < numCells; col ++) {
for (let anchor = 0; anchor < NUM_BOXES; anchor ++) {
const score = sigmoid(scoresTensor.get(row, col, anchor, 0))
if (!scoreThreshold || score > scoreThreshold) {
const ctX = ((col + sigmoid(boxesTensor.get(row, col, anchor, 0))) / numCells) * paddings.x
const ctY = ((row + sigmoid(boxesTensor.get(row, col, anchor, 1))) / numCells) * paddings.y
const width = ((Math.exp(boxesTensor.get(row, col, anchor, 2)) * this.anchors[anchor].x) / numCells) * paddings.x
const height = ((Math.exp(boxesTensor.get(row, col, anchor, 3)) * this.anchors[anchor].y) / numCells) * paddings.y
const x = (ctX - (width / 2))
const y = (ctY - (height / 2))
results.push({
box: new BoundingBox(x, y, x + width, y + height),
score,
row,
col,
anchor
})
}
}
}
}
boxesTensor.dispose()
scoresTensor.dispose()
return results
} }
protected loadQuantizedParams(uri: string | undefined) { public async locateFaces(input: TNetInput, forwardParams: TinyYolov2Types.TinyYolov2ForwardParams): Promise<FaceDetection[]> {
return loadQuantizedParams(uri, this.withSeparableConvs) const objectDetections = await this.detect(input, forwardParams)
return objectDetections.map(det => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }))
} }
protected extractParams(weights: Float32Array) { protected loadQuantizedParams(modelUri: string | undefined) {
return extractParams(weights, this.withSeparableConvs) const defaultModelName = this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME
return super.loadQuantizedParams(modelUri, defaultModelName) as any
} }
} }
\ No newline at end of file
import { Point } from 'tfjs-image-recognition-base'; import { Point } from 'tfjs-image-recognition-base';
export const INPUT_SIZES = { xs: 224, sm: 320, md: 416, lg: 608 }
export const NUM_BOXES = 5
export const IOU_THRESHOLD = 0.4 export const IOU_THRESHOLD = 0.4
export const BOX_ANCHORS = [ export const BOX_ANCHORS = [
...@@ -20,4 +18,7 @@ export const BOX_ANCHORS_SEPARABLE = [ ...@@ -20,4 +18,7 @@ export const BOX_ANCHORS_SEPARABLE = [
new Point(9.041765, 10.66308) new Point(9.041765, 10.66308)
] ]
export const MEAN_RGB = [117.001, 114.697, 97.404] export const MEAN_RGB_SEPARABLE: [number, number, number] = [117.001, 114.697, 97.404]
\ No newline at end of file
export const DEFAULT_MODEL_NAME = 'tiny_yolov2_model'
export const DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model'
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { leaky } from './leaky';
import { ConvWithBatchNorm, SeparableConvParams } from './types';
export function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm | SeparableConvParams): tf.Tensor4D {
return tf.tidy(() => {
let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D
if (params instanceof SeparableConvParams) {
out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid')
out = tf.add(out, params.bias)
} else {
out = tf.conv2d(out, params.conv.filters, [1, 1], 'valid')
out = tf.sub(out, params.bn.sub)
out = tf.mul(out, params.bn.truediv)
out = tf.add(out, params.conv.bias)
}
return leaky(out)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from 'tfjs-image-recognition-base';
import { extractConvParamsFactory } from '../commons/extractConvParamsFactory';
import { BatchNorm, ConvWithBatchNorm, NetParams, SeparableConvParams } from './types';
function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
function extractBatchNormParams(size: number, mappedPrefix: string): BatchNorm {
const sub = tf.tensor1d(extractWeights(size))
const truediv = tf.tensor1d(extractWeights(size))
paramMappings.push(
{ paramPath: `${mappedPrefix}/sub` },
{ paramPath: `${mappedPrefix}/truediv` }
)
return { sub, truediv }
}
function extractConvWithBatchNormParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ConvWithBatchNorm {
const conv = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`)
const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`)
return { conv, bn }
}
function extractSeparableConvParams(channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams {
const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1])
const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut])
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/depthwise_filter` },
{ paramPath: `${mappedPrefix}/pointwise_filter` },
{ paramPath: `${mappedPrefix}/bias` }
)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
return {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
}
}
export function extractParams(weights: Float32Array, withSeparableConvs: boolean): { params: NetParams, paramMappings: ParamMapping[] } {
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const paramMappings: ParamMapping[] = []
const {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
} = extractorsFactory(extractWeights, paramMappings)
const extractConvFn = withSeparableConvs ? extractSeparableConvParams : extractConvWithBatchNormParams
const numAnchorEncodings = withSeparableConvs ? 5 : 6
const conv0 = extractConvFn(3, 16, 'conv0',)
const conv1 = extractConvFn(16, 32, 'conv1')
const conv2 = extractConvFn(32, 64, 'conv2')
const conv3 = extractConvFn(64, 128, 'conv3')
const conv4 = extractConvFn(128, 256, 'conv4')
const conv5 = extractConvFn(256, 512, 'conv5')
const conv6 = extractConvFn(512, 1024, 'conv6')
const conv7 = extractConvFn(1024, 1024, 'conv7')
const conv8 = extractConvParams(1024, 5 * numAnchorEncodings, 1, 'conv8')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
const params = { conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8 }
return { params, paramMappings }
}
\ No newline at end of file
import { SizeType, TinyYolov2ForwardParams } from './types';
export function getDefaultParams(params: TinyYolov2ForwardParams) {
return Object.assign(
{},
{
inputSize: SizeType.MD,
scoreThreshold: 0.5
},
params
)
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
export function leaky(x: tf.Tensor4D): tf.Tensor4D {
return tf.tidy(() => {
const min = tf.mul(x, tf.scalar(0.10000000149011612))
return tf.add(tf.relu(tf.sub(x, min)), min)
//return tf.maximum(x, min)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import {
disposeUnusedWeightTensors,
extractWeightEntryFactory,
loadWeightMap,
ParamMapping,
} from 'tfjs-image-recognition-base';
import { ConvParams } from '../commons/types';
import { BatchNorm, ConvWithBatchNorm, NetParams, SeparableConvParams } from './types';
const DEFAULT_MODEL_NAME = 'tiny_yolov2_model'
const DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model'
function extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractBatchNormParams(prefix: string): BatchNorm {
const sub = extractWeightEntry<tf.Tensor1D>(`${prefix}/sub`, 1)
const truediv = extractWeightEntry<tf.Tensor1D>(`${prefix}/truediv`, 1)
return { sub, truediv }
}
function extractConvParams(prefix: string): ConvParams {
const filters = extractWeightEntry<tf.Tensor4D>(`${prefix}/filters`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { filters, bias }
}
function extractConvWithBatchNormParams(prefix: string): ConvWithBatchNorm {
const conv = extractConvParams(`${prefix}/conv`)
const bn = extractBatchNormParams(`${prefix}/bn`)
return { conv, bn }
}
function extractSeparableConvParams(prefix: string): SeparableConvParams {
const depthwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/depthwise_filter`, 4)
const pointwise_filter = extractWeightEntry<tf.Tensor4D>(`${prefix}/pointwise_filter`, 4)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return new SeparableConvParams(
depthwise_filter,
pointwise_filter,
bias
)
}
return {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
}
}
export async function loadQuantizedParams(
uri: string | undefined,
withSeparableConvs: boolean
): Promise<{ params: NetParams, paramMappings: ParamMapping[] }> {
const weightMap = await loadWeightMap(uri, withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME)
const paramMappings: ParamMapping[] = []
const {
extractConvParams,
extractConvWithBatchNormParams,
extractSeparableConvParams
} = extractorsFactory(weightMap, paramMappings)
const extractConvFn = withSeparableConvs ? extractSeparableConvParams : extractConvWithBatchNormParams
const params = {
conv0: extractConvFn('conv0'),
conv1: extractConvFn('conv1'),
conv2: extractConvFn('conv2'),
conv3: extractConvFn('conv3'),
conv4: extractConvFn('conv4'),
conv5: extractConvFn('conv5'),
conv6: extractConvFn('conv6'),
conv7: extractConvFn('conv7'),
conv8: extractConvParams('conv8')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { Point } from 'tfjs-image-recognition-base';
import { ConvParams } from '../commons/types';
export type BatchNorm = {
sub: tf.Tensor1D
truediv: tf.Tensor1D
}
export class SeparableConvParams {
constructor(
public depthwise_filter: tf.Tensor4D,
public pointwise_filter: tf.Tensor4D,
public bias: tf.Tensor1D
) {}
}
export type ConvWithBatchNorm = {
conv: ConvParams
bn: BatchNorm
}
export type NetParams = {
conv0: ConvWithBatchNorm | SeparableConvParams
conv1: ConvWithBatchNorm | SeparableConvParams
conv2: ConvWithBatchNorm | SeparableConvParams
conv3: ConvWithBatchNorm | SeparableConvParams
conv4: ConvWithBatchNorm | SeparableConvParams
conv5: ConvWithBatchNorm | SeparableConvParams
conv6: ConvWithBatchNorm | SeparableConvParams
conv7: ConvWithBatchNorm | SeparableConvParams
conv8: ConvParams
}
export enum SizeType {
XS = 'xs',
SM = 'sm',
MD = 'md',
LG = 'lg'
}
export type TinyYolov2ForwardParams = {
inputSize?: SizeType | number
scoreThreshold?: number
}
export type PostProcessingParams = {
scoreThreshold?: number
paddings: Point
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { bufferToImage, NetInput, Point, toNetInput } from '../../../src'; import { bufferToImage, NetInput, Point, toNetInput } from '../../../src';
import { SizeType } from '../../../src/tinyYolov2/types';
import { describeWithNets, expectAllTensorsReleased, expectMaxDelta, expectPointClose, expectRectClose } from '../../utils'; import { describeWithNets, expectAllTensorsReleased, expectMaxDelta, expectPointClose, expectRectClose } from '../../utils';
import { expectedTinyYolov2SeparableConvBoxes } from './expectedResults'; import { expectedTinyYolov2SeparableConvBoxes } from './expectedResults';
...@@ -21,13 +21,13 @@ describe('allFacesTinyYolov2', () => { ...@@ -21,13 +21,13 @@ describe('allFacesTinyYolov2', () => {
describeWithNets('computes full face descriptions', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => { describeWithNets('computes full face descriptions', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => {
it('SizeType.LG', async () => { it('TinyYolov2Types.SizeType.LG', async () => {
const expectedScores = [0.9, 0.9, 0.89, 0.85, 0.85, 0.85] const expectedScores = [0.9, 0.9, 0.89, 0.85, 0.85, 0.85]
const maxBoxDelta = 5 const maxBoxDelta = 5
const maxLandmarkPointsDelta = 10 const maxLandmarkPointsDelta = 10
const maxDescriptorDelta = 0.06 const maxDescriptorDelta = 0.06
const results = await allFacesTinyYolov2(imgEl, { inputSize: SizeType.LG }) const results = await allFacesTinyYolov2(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const detectionOrder = [0, 2, 3, 4, 1, 5] const detectionOrder = [0, 2, 3, 4, 1, 5]
expect(results.length).toEqual(6) expect(results.length).toEqual(6)
...@@ -41,13 +41,13 @@ describe('allFacesTinyYolov2', () => { ...@@ -41,13 +41,13 @@ describe('allFacesTinyYolov2', () => {
}) })
}) })
it('SizeType.MD', async () => { it('TinyYolov2Types.SizeType.MD', async () => {
const expectedScores = [0.85, 0.85, 0.84, 0.83, 0.8, 0.8] const expectedScores = [0.85, 0.85, 0.84, 0.83, 0.8, 0.8]
const maxBoxDelta = 17 const maxBoxDelta = 17
const maxLandmarkPointsDelta = 16 const maxLandmarkPointsDelta = 16
const maxDescriptorDelta = 0.05 const maxDescriptorDelta = 0.05
const results = await allFacesTinyYolov2(imgEl, { inputSize: SizeType.MD }) const results = await allFacesTinyYolov2(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const boxOrder = [5, 1, 4, 3, 2, 0] const boxOrder = [5, 1, 4, 3, 2, 0]
const detectionOrder = [5, 2, 1, 4, 3, 0] const detectionOrder = [5, 2, 1, 4, 3, 0]
......
import * as faceapi from '../../../src'; import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { SizeType } from '../../../src/tinyYolov2/types';
import { bufferToImage, createTinyYolov2, TinyYolov2 } from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils'; import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { expectedTinyYolov2Boxes, expectedTinyYolov2SeparableConvBoxes } from './expectedResults'; import { expectedTinyYolov2Boxes } from './expectedResults';
describe('tinyYolov2', () => { describe('tinyYolov2', () => {
...@@ -9,13 +10,13 @@ describe('tinyYolov2', () => { ...@@ -9,13 +10,13 @@ describe('tinyYolov2', () => {
beforeAll(async () => { beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob() const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img) imgEl = await bufferToImage(img)
}) })
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => { describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => { it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.LG }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const expectedScores = [0.86, 0.86, 0.85, 0.83, 0.81, 0.81] const expectedScores = [0.86, 0.86, 0.85, 0.83, 0.81, 0.81]
const maxBoxDelta = 3 const maxBoxDelta = 3
...@@ -29,7 +30,7 @@ describe('tinyYolov2', () => { ...@@ -29,7 +30,7 @@ describe('tinyYolov2', () => {
}) })
it('inputSize md, finds all faces', async () => { it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.MD }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const expectedScores = [0.89, 0.87, 0.83, 0.82, 0.81, 0.72] const expectedScores = [0.89, 0.87, 0.83, 0.82, 0.81, 0.72]
const maxBoxDelta = 16 const maxBoxDelta = 16
...@@ -61,7 +62,7 @@ describe('tinyYolov2', () => { ...@@ -61,7 +62,7 @@ describe('tinyYolov2', () => {
describeWithNets('uncompressed weights', { withTinyYolov2: { quantized: false, withSeparableConv: false } }, ({ tinyYolov2 }) => { describeWithNets('uncompressed weights', { withTinyYolov2: { quantized: false, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => { it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.LG }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const expectedScores = [0.86, 0.86, 0.85, 0.83, 0.81, 0.81] const expectedScores = [0.86, 0.86, 0.85, 0.83, 0.81, 0.81]
const maxBoxDelta = 1 const maxBoxDelta = 1
...@@ -75,7 +76,7 @@ describe('tinyYolov2', () => { ...@@ -75,7 +76,7 @@ describe('tinyYolov2', () => {
}) })
it('inputSize md, finds all faces', async () => { it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.MD }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const expectedScores = [0.89, 0.87, 0.83, 0.83, 0.81, 0.73] const expectedScores = [0.89, 0.87, 0.83, 0.83, 0.81, 0.73]
const maxBoxDelta = 14 const maxBoxDelta = 14
...@@ -112,7 +113,7 @@ describe('tinyYolov2', () => { ...@@ -112,7 +113,7 @@ describe('tinyYolov2', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_model.weights') const res = await fetch('base/weights_uncompressed/tiny_yolov2_model.weights')
const weights = new Float32Array(await res.arrayBuffer()) const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createTinyYolov2(weights, false) const net = createTinyYolov2(weights, false)
net.dispose() net.dispose()
}) })
}) })
...@@ -123,7 +124,7 @@ describe('tinyYolov2', () => { ...@@ -123,7 +124,7 @@ describe('tinyYolov2', () => {
it('disposes all param tensors', async () => { it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const net = new faceapi.TinyYolov2(false) const net = new TinyYolov2(false)
await net.load('base/weights_unused') await net.load('base/weights_unused')
net.dispose() net.dispose()
}) })
......
import * as faceapi from '../../../src'; import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { SizeType } from '../../../src/tinyYolov2/types';
import { bufferToImage, createTinyYolov2, TinyYolov2 } from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils'; import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { expectedTinyYolov2SeparableConvBoxes } from './expectedResults'; import { expectedTinyYolov2SeparableConvBoxes } from './expectedResults';
...@@ -9,13 +10,13 @@ describe('tinyYolov2, with separable convolutions', () => { ...@@ -9,13 +10,13 @@ describe('tinyYolov2, with separable convolutions', () => {
beforeAll(async () => { beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob() const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img) imgEl = await bufferToImage(img)
}) })
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true } }, ({ tinyYolov2 }) => { describeWithNets('quantized weights', { withTinyYolov2: { quantized: true } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => { it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.LG }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const expectedScores = [0.9, 0.9, 0.89, 0.85, 0.85, 0.85] const expectedScores = [0.9, 0.9, 0.89, 0.85, 0.85, 0.85]
const maxBoxDelta = 1 const maxBoxDelta = 1
...@@ -29,7 +30,7 @@ describe('tinyYolov2, with separable convolutions', () => { ...@@ -29,7 +30,7 @@ describe('tinyYolov2, with separable convolutions', () => {
}) })
it('inputSize md, finds all faces', async () => { it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: SizeType.MD }) const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const expectedScores = [0.85, 0.85, 0.84, 0.83, 0.8, 0.8] const expectedScores = [0.85, 0.85, 0.84, 0.83, 0.8, 0.8]
const maxBoxDelta = 17 const maxBoxDelta = 17
...@@ -66,7 +67,7 @@ describe('tinyYolov2, with separable convolutions', () => { ...@@ -66,7 +67,7 @@ describe('tinyYolov2, with separable convolutions', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights_uncompressed/tiny_yolov2_separable_conv_model.weights') const res = await fetch('base/weights_uncompressed/tiny_yolov2_separable_conv_model.weights')
const weights = new Float32Array(await res.arrayBuffer()) const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createTinyYolov2(weights) const net = createTinyYolov2(weights)
net.dispose() net.dispose()
}) })
}) })
...@@ -77,7 +78,7 @@ describe('tinyYolov2, with separable convolutions', () => { ...@@ -77,7 +78,7 @@ describe('tinyYolov2, with separable convolutions', () => {
it('disposes all param tensors', async () => { it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const net = new faceapi.TinyYolov2() const net = new TinyYolov2()
await net.load('base/weights') await net.load('base/weights')
net.dispose() net.dispose()
}) })
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../src/'; import {
import { IPoint } from '../src/'; FaceDetectionNet,
FaceLandmarkNet,
FaceRecognitionNet,
IPoint,
IRect,
Mtcnn,
NeuralNetwork,
TinyYolov2,
} from '../src/';
import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from '../src/allFacesFactory'; import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from '../src/allFacesFactory';
import { allFacesMtcnnFunction, allFacesSsdMobilenetv1Function, allFacesTinyYolov2Function } from '../src/globalApi'; import { allFacesMtcnnFunction, allFacesSsdMobilenetv1Function, allFacesTinyYolov2Function } from '../src/globalApi';
import { NeuralNetwork, IRect } from 'tfjs-image-recognition-base';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000 jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) { export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
...@@ -52,11 +58,11 @@ export type InjectNetArgs = { ...@@ -52,11 +58,11 @@ export type InjectNetArgs = {
allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function
allFacesTinyYolov2: allFacesTinyYolov2Function allFacesTinyYolov2: allFacesTinyYolov2Function
allFacesMtcnn: allFacesMtcnnFunction allFacesMtcnn: allFacesMtcnnFunction
faceDetectionNet: faceapi.FaceDetectionNet faceDetectionNet: FaceDetectionNet
faceLandmarkNet: faceapi.FaceLandmarkNet faceLandmarkNet: FaceLandmarkNet
faceRecognitionNet: faceapi.FaceRecognitionNet faceRecognitionNet: FaceRecognitionNet
mtcnn: faceapi.Mtcnn mtcnn: Mtcnn
tinyYolov2: faceapi.TinyYolov2 tinyYolov2: TinyYolov2
} }
...@@ -94,11 +100,11 @@ export function describeWithNets( ...@@ -94,11 +100,11 @@ export function describeWithNets(
) { ) {
describe(description, () => { describe(description, () => {
let faceDetectionNet: faceapi.FaceDetectionNet = new faceapi.FaceDetectionNet() let faceDetectionNet: FaceDetectionNet = new FaceDetectionNet()
let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet() let faceLandmarkNet: FaceLandmarkNet = new FaceLandmarkNet()
let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet() let faceRecognitionNet: FaceRecognitionNet = new FaceRecognitionNet()
let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn() let mtcnn: Mtcnn = new Mtcnn()
let tinyYolov2: faceapi.TinyYolov2 = new faceapi.TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv) let tinyYolov2: TinyYolov2 = new TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv)
let allFacesSsdMobilenetv1 = allFacesSsdMobilenetv1Factory(faceDetectionNet, faceLandmarkNet, faceRecognitionNet) let allFacesSsdMobilenetv1 = allFacesSsdMobilenetv1Factory(faceDetectionNet, faceLandmarkNet, faceRecognitionNet)
let allFacesTinyYolov2 = allFacesTinyYolov2Factory(tinyYolov2, faceLandmarkNet, faceRecognitionNet) let allFacesTinyYolov2 = allFacesTinyYolov2Factory(tinyYolov2, faceLandmarkNet, faceRecognitionNet)
let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet) let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet)
...@@ -116,21 +122,21 @@ export function describeWithNets( ...@@ -116,21 +122,21 @@ export function describeWithNets(
} = options } = options
if (withFaceDetectionNet || withAllFacesSsdMobilenetv1) { if (withFaceDetectionNet || withAllFacesSsdMobilenetv1) {
await initNet<faceapi.FaceDetectionNet>( await initNet<FaceDetectionNet>(
faceDetectionNet, faceDetectionNet,
!!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'ssd_mobilenetv1_model.weights' !!withFaceDetectionNet && !withFaceDetectionNet.quantized && 'ssd_mobilenetv1_model.weights'
) )
} }
if (withFaceLandmarkNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyYolov2) { if (withFaceLandmarkNet || withAllFacesSsdMobilenetv1 || withAllFacesTinyYolov2) {
await initNet<faceapi.FaceLandmarkNet>( await initNet<FaceLandmarkNet>(
faceLandmarkNet, faceLandmarkNet,
!!withFaceLandmarkNet && !withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights' !!withFaceLandmarkNet && !withFaceLandmarkNet.quantized && 'face_landmark_68_model.weights'
) )
} }
if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesMtcnn || withAllFacesTinyYolov2) { if (withFaceRecognitionNet || withAllFacesSsdMobilenetv1 || withAllFacesMtcnn || withAllFacesTinyYolov2) {
await initNet<faceapi.FaceRecognitionNet>( await initNet<FaceRecognitionNet>(
faceRecognitionNet, faceRecognitionNet,
// TODO: figure out why quantized weights results in NaNs in testcases // TODO: figure out why quantized weights results in NaNs in testcases
'face_recognition_model.weights' 'face_recognition_model.weights'
...@@ -138,14 +144,14 @@ export function describeWithNets( ...@@ -138,14 +144,14 @@ export function describeWithNets(
} }
if (withMtcnn || withAllFacesMtcnn) { if (withMtcnn || withAllFacesMtcnn) {
await initNet<faceapi.Mtcnn>( await initNet<Mtcnn>(
mtcnn, mtcnn,
!!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights' !!withMtcnn && !withMtcnn.quantized && 'mtcnn_model.weights'
) )
} }
if (withTinyYolov2 || withAllFacesTinyYolov2) { if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<faceapi.TinyYolov2>( await initNet<TinyYolov2>(
tinyYolov2, tinyYolov2,
!!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights', !!withTinyYolov2 && !withTinyYolov2.quantized && 'tiny_yolov2_model.weights',
withTinyYolov2 && withTinyYolov2.withSeparableConv === false withTinyYolov2 && withTinyYolov2.withSeparableConv === false
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
"suppressImplicitAnyIndexErrors": true, "suppressImplicitAnyIndexErrors": true,
"strictNullChecks": true, "strictNullChecks": true,
"importHelpers": true, "importHelpers": true,
"module": "commonjs", "module": "es6",
"moduleResolution": "node", "moduleResolution": "node",
"target": "es5", "target": "es5",
"outDir": "build", "outDir": "build",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment