Commit 5442a71d by vincent

rearranged tests

parent 7396324d
......@@ -2,7 +2,14 @@ import { FaceDetection } from './FaceDetection';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export class FaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> {
export interface IFaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> {
detection: FaceDetection,
landmarks: TFaceLandmarks
}
export class FaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
implements IFaceDetectionWithLandmarks<TFaceLandmarks> {
private _detection: FaceDetection
private _unshiftedLandmarks: TFaceLandmarks
......
import { FaceDetection } from './FaceDetection';
import { FaceDetectionWithLandmarks } from './FaceDetectionWithLandmarks';
import { FaceDetectionWithLandmarks, IFaceDetectionWithLandmarks } from './FaceDetectionWithLandmarks';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export class FullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> extends FaceDetectionWithLandmarks<TFaceLandmarks> {
export interface IFullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends IFaceDetectionWithLandmarks<TFaceLandmarks>{
detection: FaceDetection,
landmarks: TFaceLandmarks,
descriptor: Float32Array
}
export class FullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends FaceDetectionWithLandmarks<TFaceLandmarks>
implements IFullFaceDescription<TFaceLandmarks> {
private _descriptor: Float32Array
constructor(
......
......@@ -3,6 +3,7 @@ import { ITinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
......@@ -63,7 +64,7 @@ export const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promi
* @param options (optional, default: see MtcnnOptions constructor for default parameters).
* @returns Bounding box of each face with score and 5 point face landmarks.
*/
export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<FaceDetectionWithLandmarks[]> =>
export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> =>
nets.mtcnn.forward(input, options)
/**
......
......@@ -2,6 +2,7 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, Point, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { bgrToRgbTensor } from './bgrToRgbTensor';
import { CELL_SIZE } from './config';
......@@ -14,7 +15,6 @@ import { stage1 } from './stage1';
import { stage2 } from './stage2';
import { stage3 } from './stage3';
import { NetParams } from './types';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
export class Mtcnn extends NeuralNetwork<NetParams> {
......@@ -25,7 +25,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardInput(
input: NetInput,
forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks[], stats: any }> {
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> {
const { params } = this
......@@ -101,7 +101,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
const out3 = await stage3(inputCanvas, out2.boxes, scoreThresholds[2], params.onet, stats)
stats.total_stage3 = Date.now() - ts
const results = out3.boxes.map((box, idx) => new FaceDetectionWithLandmarks(
const results = out3.boxes.map((box, idx) => new FaceDetectionWithLandmarks<FaceLandmarks5>(
new FaceDetection(
out3.scores[idx],
new Rect(
......@@ -127,7 +127,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forward(
input: TNetInput,
forwardParams: IMtcnnOptions = {}
): Promise<FaceDetectionWithLandmarks[]> {
): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> {
return (
await this.forwardInput(
await toNetInput(input),
......@@ -139,7 +139,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardWithStats(
input: TNetInput,
forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks[], stats: any }> {
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> {
return this.forwardInput(
await toNetInput(input),
forwardParams
......
[[{"x":117.85171800851822,"y":58.91067498922348},{"x":157.70139408111572,"y":64.48519098758698},{"x":142.3133249282837,"y":88.54253697395325},{"x":110.16610914468765,"y":99.86233913898468},{"x":149.25052666664124,"y":106.37608766555786}], [{"x":260.46802616119385,"y":82.86598587036133},{"x":305.55760955810547,"y":83.54110813140869},{"x":281.4357223510742,"y":113.98349380493164},{"x":257.06039476394653,"y":125.50608730316164},{"x":306.0191822052002,"y":127.20984458923341}], [{"x":82.91613873839378,"y":292.6100924015045},{"x":133.91112035512924,"y":304.814593821764},{"x":104.43486452102661,"y":330.3951778411865},{"x":72.6984107196331,"y":342.633121073246},{"x":120.51901644468307,"y":354.2677878141403}], [{"x":278.20400857925415,"y":273.83238887786865},{"x":318.7582621574402,"y":273.39686036109924},{"x":295.54277753829956,"y":300.43398427963257},{"x":279.5109224319458,"y":311.497838973999},{"x":317.0187101364136,"y":313.05305886268616}], [{"x":489.58824399113655,"y":224.56882098317146},{"x":534.514480471611,"y":223.28146517276764},{"x":507.2082565128803,"y":250.17186474800113},{"x":493.0139665305615,"y":271.0716395378113},{"x":530.7517347931862,"y":270.4143014550209}], [{"x":606.397784024477,"y":105.43332603573799},{"x":645.2468676567078,"y":111.50095802545547},{"x":625.1735819578171,"y":133.40740483999252},{"x":598.8033188581467,"y":141.26283955574036},{"x":637.2144679427147,"y":147.32198816537857}]]
\ No newline at end of file
[[{"x":117.85171800851822,"y":58.91067159175873},{"x":157.70139408111572,"y":64.48519098758698},{"x":142.3133249282837,"y":88.54254376888275},{"x":110.1661057472229,"y":99.86233913898468},{"x":149.25052666664124,"y":106.37608766555786}],[{"x":82.91613873839378,"y":292.6100924015045},{"x":133.91112035512924,"y":304.814593821764},{"x":104.43486452102661,"y":330.3951778411865},{"x":72.6984107196331,"y":342.63312900066376},{"x":120.51901644468307,"y":354.2677878141403}],[{"x":278.20400857925415,"y":273.8323953151703},{"x":318.7582621574402,"y":273.39686357975006},{"x":295.5427807569504,"y":300.43398427963257},{"x":279.5109224319458,"y":311.497838973999},{"x":317.0187101364136,"y":313.05305886268616}],[{"x":260.46802616119385,"y":82.86598253250122},{"x":305.55760955810547,"y":83.54110813140869},{"x":281.43571567535395,"y":113.98349380493164},{"x":257.0603914260864,"y":125.50608730316162},{"x":306.01917552948,"y":127.2098445892334}],[{"x":489.5882513225079,"y":224.56882098317146},{"x":534.514480471611,"y":223.28146517276764},{"x":507.20826017856604,"y":250.1718647480011},{"x":493.0139665305615,"y":271.0716395378113},{"x":530.7517347931862,"y":270.4143014550209}],[{"x":606.397784024477,"y":105.43332290649414},{"x":645.2468676567078,"y":111.50095802545547},{"x":625.1735819578171,"y":133.40740483999252},{"x":598.8033188581467,"y":141.26284581422806},{"x":637.2144679427147,"y":147.32198816537857}]]
\ No newline at end of file
import { IRect } from '../src';
import { FaceDetection } from '../src/classes/FaceDetection';
import { expectRectClose, sortBoxes, sortFaceDetections } from './utils';
export function expectDetectionResults(
results: FaceDetection[],
allExpectedFaceDetections: IRect[],
expectedScores: number[],
maxBoxDelta: number
) {
const expectedDetections = expectedScores
.map((score, i) => ({
score,
...allExpectedFaceDetections[i]
}))
.filter(expected => expected.score !== -1)
const sortedResults = sortFaceDetections(results)
expectedDetections.forEach((expectedDetection, i) => {
const det = sortedResults[i]
expect(det.score).toBeCloseTo(expectedDetection.score, 2)
expectRectClose(det.box, expectedDetection, maxBoxDelta)
})
}
\ No newline at end of file
import { FaceDetectionWithLandmarks } from '../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmarks68 } from '../src/classes/FaceLandmarks68';
import { ExpectedFaceDetectionWithLandmarks, expectPointClose, expectRectClose, sortByFaceDetection } from './utils';
export type BoxAndLandmarksDeltas = {
maxBoxDelta: number
maxLandmarksDelta: number
}
export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(
results: FaceDetectionWithLandmarks<TFaceLandmarks>[],
allExpectedFullFaceDescriptions: ExpectedFaceDetectionWithLandmarks[],
expectedScores: number[],
deltas: BoxAndLandmarksDeltas
) {
const expectedFullFaceDescriptions = expectedScores
.map((score, i) => ({
score,
...allExpectedFullFaceDescriptions[i]
}))
.filter(expected => expected.score !== -1)
const sortedResults = sortByFaceDetection(results)
expectedFullFaceDescriptions.forEach((expected, i) => {
const { detection, landmarks } = sortedResults[i]
expect(detection.score).toBeCloseTo(expected.score, 2)
expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta)
landmarks.positions.forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta))
})
}
\ No newline at end of file
import { FullFaceDescription } from '../src/classes/FullFaceDescription';
import { euclideanDistance } from '../src/euclideanDistance';
import { BoxAndLandmarksDeltas } from './expectFaceDetectionsWithLandmarks';
import { ExpectedFullFaceDescription, expectPointClose, expectRectClose, sortByFaceDetection } from './utils';
export type FullFaceDescriptionDeltas = BoxAndLandmarksDeltas & {
maxDescriptorDelta: number
}
export function expectFullFaceDescriptions(
results: FullFaceDescription[],
allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[],
expectedScores: number[],
deltas: FullFaceDescriptionDeltas
) {
const expectedFullFaceDescriptions = expectedScores
.map((score, i) => ({
score,
...allExpectedFullFaceDescriptions[i]
}))
.filter(expected => expected.score !== -1)
const sortedResults = sortByFaceDetection(results)
expectedFullFaceDescriptions.forEach((expected, i) => {
const { detection, landmarks, descriptor } = sortedResults[i]
expect(detection.score).toBeCloseTo(expected.score, 2)
expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta)
landmarks.positions.forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta))
expect(euclideanDistance(descriptor, expected.descriptor)).toBeLessThan(deltas.maxDescriptorDelta)
})
}
\ No newline at end of file
import { bufferToImage } from 'tfjs-image-recognition-base';
import {
assembleExpectedFullFaceDescriptions,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
} from '../../utils';
import { expectAllFacesResults, expectedMtcnnBoxes } from './expectedResults';
describe('allFacesMtcnn', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await bufferToImage(img)
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes, 'mtcnnFaceLandmarkPositions.json')
})
describeWithNets('computes full face descriptions', { withAllFacesMtcnn: true }, ({ allFacesMtcnn }) => {
it('minFaceSize = 20', async () => {
const forwardParams = {
minFaceSize: 20
}
const results = await allFacesMtcnn(imgEl, forwardParams)
expect(results.length).toEqual(6)
const expectedScores = [1, 1, 1, 1, 0.99, 0.99]
const deltas = {
maxBoxDelta: 2,
maxLandmarksDelta: 1,
maxDescriptorDelta: 0.4
}
expectAllFacesResults(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describeWithNets('no memory leaks', { withAllFacesMtcnn: true }, ({ allFacesMtcnn }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFacesMtcnn(imgEl)
})
})
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { bufferToImage } from '../../../src';
import {
assembleExpectedFullFaceDescriptions,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
} from '../../utils';
import { expectAllFacesResults, expectedSsdBoxes } from './expectedResults';
describe('allFacesSsdMobilenetv1', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await bufferToImage(img)
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
describeWithNets('computes full face descriptions', { withAllFacesSsdMobilenetv1: true }, ({ allFacesSsdMobilenetv1 }) => {
it('scores > 0.8', async () => {
const results = await allFacesSsdMobilenetv1(imgEl, 0.8)
expect(results.length).toEqual(4)
const expectedScores = [-1, 0.81, 0.97, 0.88, 0.84, -1]
const deltas = {
maxBoxDelta: 5,
maxLandmarksDelta: 4,
maxDescriptorDelta: 0.01
}
expectAllFacesResults(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('scores > 0.5', async () => {
const results = await allFacesSsdMobilenetv1(imgEl, 0.5)
expect(results.length).toEqual(6)
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
const deltas = {
maxBoxDelta: 5,
maxLandmarksDelta: 4,
maxDescriptorDelta: 0.01
}
expectAllFacesResults(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describeWithNets('no memory leaks', { withAllFacesSsdMobilenetv1: true }, ({ allFacesSsdMobilenetv1 }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFacesSsdMobilenetv1(imgEl)
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl)
await expectAllTensorsReleased(async () => {
await allFacesSsdMobilenetv1(tensor)
})
tensor.dispose()
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await allFacesSsdMobilenetv1(tensor)
})
tensor.dispose()
})
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { bufferToImage } from '../../../src';
import {
assembleExpectedFullFaceDescriptions,
describeWithNets,
expectAllTensorsReleased,
ExpectedFullFaceDescription,
} from '../../utils';
import { expectAllFacesResults, expectedTinyYolov2Boxes } from './expectedResults';
describe('allFacesTinyYolov2', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await bufferToImage(img)
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyYolov2Boxes)
})
describeWithNets('computes full face descriptions', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => {
it('TinyYolov2Types.SizeType.LG', async () => {
const results = await allFacesTinyYolov2(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
expect(results.length).toEqual(6)
const expectedScores = [0.85, 0.88, 0.9, 0.86, 0.9, 0.85]
const deltas = {
maxBoxDelta: 25,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.24
}
expectAllFacesResults(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('TinyYolov2Types.SizeType.MD', async () => {
const results = await allFacesTinyYolov2(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
expect(results.length).toEqual(6)
const expectedScores = [0.85, 0.8, 0.8, 0.85, 0.85, 0.82]
const deltas = {
maxBoxDelta: 34,
maxLandmarksDelta: 18,
maxDescriptorDelta: 0.2
}
expectAllFacesResults(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describeWithNets('no memory leaks', { withAllFacesTinyYolov2: true }, ({ allFacesTinyYolov2 }) => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await allFacesTinyYolov2(imgEl)
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl)
await expectAllTensorsReleased(async () => {
await allFacesTinyYolov2(tensor)
})
tensor.dispose()
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await allFacesTinyYolov2(tensor)
})
tensor.dispose()
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
import { euclideanDistance } from '../../../src/euclideanDistance';
import {
ExpectedFullFaceDescription,
expectMaxDelta,
expectPointClose,
expectRectClose,
sortBoxes,
sortByDistanceToOrigin,
sortFullFaceDescriptions,
} from '../../utils';
import { IPoint, IRect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { sortFaceDetections } from '../../utils';
export type BoxAndLandmarksDeltas = {
maxBoxDelta: number
maxLandmarksDelta: number
}
export type AllFacesDeltas = BoxAndLandmarksDeltas & {
maxDescriptorDelta: number
}
export const expectedSsdBoxes = sortBoxes([
{ x: 48, y: 253, width: 104, height: 129 },
{ x: 260, y: 227, width: 76, height: 117 },
{ x: 466, y: 165, width: 88, height: 130 },
{ x: 234, y: 36, width: 84, height: 119 },
{ x: 577, y: 65, width: 84, height: 105 },
{ x: 84, y: 14, width: 79, height: 132 }
])
export const expectedTinyYolov2Boxes = sortBoxes([
{ x: 52, y: 263, width: 106, height: 102 },
{ x: 455, y: 191, width: 103, height: 97 },
{ x: 236, y: 57, width: 90, height: 85 },
{ x: 257, y: 243, width: 86, height: 95 },
{ x: 578, y: 76, width: 86, height: 91 },
{ x: 87, y: 30, width: 92, height: 93 }
])
export const expectedTinyYolov2SeparableConvBoxes = sortBoxes([
{ x: 42, y: 257, width: 111, height: 121 },
{ x: 454, y: 175, width: 104, height: 121 },
{ x: 230, y: 45, width: 94, height: 104 },
{ x: 574, y: 62, width: 88, height: 113 },
{ x: 260, y: 233, width: 82, height: 104 },
{ x: 83, y: 24, width: 85, height: 111 }
])
export const expectedMtcnnBoxes = sortBoxes([
{ x: 70, y: 21, width: 112, height: 112 },
{ x: 36, y: 250, width: 133, height: 132 },
{ x: 221, y: 43, width: 112, height: 111 },
{ x: 247, y: 231, width: 106, height: 107 },
{ x: 566, y: 67, width: 104, height: 104 },
{ x: 451, y: 176, width: 122, height: 122 }
])
export function expectMtcnnResults(
results: { faceDetection: faceapi.FaceDetection, faceLandmarks: faceapi.FaceLandmarks5 }[],
expectedMtcnnFaceLandmarks: IPoint[][],
deltas: BoxAndLandmarksDeltas
) {
sortByDistanceToOrigin(results, res => res.faceDetection.box).forEach((result, i) => {
const { faceDetection, faceLandmarks } = result
expect(faceDetection instanceof faceapi.FaceDetection).toBe(true)
expect(faceLandmarks instanceof faceapi.FaceLandmarks5).toBe(true)
expectRectClose(faceDetection.getBox(), expectedMtcnnBoxes[i], deltas.maxBoxDelta)
faceLandmarks.getPositions().forEach((pt, j) => expectPointClose(pt, expectedMtcnnFaceLandmarks[i][j], deltas.maxLandmarksDelta))
expectMaxDelta(faceDetection.getScore(), 0.99, 0.01)
})
}
export function expectDetectionResults(results: FaceDetection[], allExpectedFaceDetections: IRect[], expectedScores: number[], maxBoxDelta: number) {
const expectedDetections = expectedScores
.map((score, i) => ({
score,
...allExpectedFaceDetections[i]
}))
.filter(expected => expected.score !== -1)
const sortedResults = sortFaceDetections(results)
expectedDetections.forEach((expectedDetection, i) => {
const det = sortedResults[i]
expect(det.score).toBeCloseTo(expectedDetection.score, 2)
expectRectClose(det.box, expectedDetection, maxBoxDelta)
})
}
export function expectAllFacesResults(results: FullFaceDescription[], allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[], expectedScores: number[], deltas: AllFacesDeltas) {
const expectedFullFaceDescriptions = expectedScores
.map((score, i) => ({
score,
...allExpectedFullFaceDescriptions[i]
}))
.filter(expected => expected.score !== -1)
const sortedResults = sortFullFaceDescriptions(results)
expectedFullFaceDescriptions.forEach((expected, i) => {
const { detection, landmarks, descriptor } = sortedResults[i]
expect(detection.score).toBeCloseTo(expected.score, 2)
expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta)
landmarks.getPositions().forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta))
expect(euclideanDistance(descriptor, expected.descriptor)).toBeLessThan(deltas.maxDescriptorDelta)
})
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { bufferToImage, Dimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { fetchImage, fetchJson, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { FaceLandmark68Net } from '../../../src/faceLandmarkNet/FaceLandmark68Net';
import { describeWithNets, expectAllTensorsReleased, expectMaxDelta, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): Dimensions {
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
if (input instanceof tf.Tensor) {
const [height, width] = input.shape.slice(isTensor3D(input) ? 0 : 1)
return { width, height }
......@@ -24,15 +24,12 @@ describe('faceLandmark68Net', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
const img1 = await (await fetch('base/test/images/face1.png')).blob()
imgEl1 = await bufferToImage(img1)
const img2 = await (await fetch('base/test/images/face2.png')).blob()
imgEl2 = await bufferToImage(img2)
const imgRect = await (await fetch('base/test/images/face_rectangular.png')).blob()
imgElRect = await bufferToImage(imgRect)
faceLandmarkPositions1 = await (await fetch('base/test/data/faceLandmarkPositions1.json')).json()
faceLandmarkPositions2 = await (await fetch('base/test/data/faceLandmarkPositions2.json')).json()
faceLandmarkPositionsRect = await (await fetch('base/test/data/faceLandmarkPositionsRect.json')).json()
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1.json')
faceLandmarkPositions2 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions2.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRect.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68Net: { quantized: false } }, ({ faceLandmark68Net }) => {
......@@ -41,11 +38,11 @@ describe('faceLandmark68Net', () => {
const { width, height } = imgEl1
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 1)
})
......@@ -55,11 +52,11 @@ describe('faceLandmark68Net', () => {
const { width, height } = imgElRect
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 2)
})
......@@ -73,11 +70,11 @@ describe('faceLandmark68Net', () => {
const { width, height } = imgEl1
const result = await faceLandmark68Net.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 2)
})
......@@ -87,11 +84,11 @@ describe('faceLandmark68Net', () => {
const { width, height } = imgElRect
const result = await faceLandmark68Net.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 6)
})
......@@ -115,11 +112,11 @@ describe('faceLandmark68Net', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach(({ x, y }, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach(({ x, y }, i) => {
expectMaxDelta(x, faceLandmarkPositions[batchIdx][i].x, 2)
expectMaxDelta(y, faceLandmarkPositions[batchIdx][i].y, 2)
})
......@@ -140,11 +137,11 @@ describe('faceLandmark68Net', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach(({ x, y }, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach(({ x, y }, i) => {
expectMaxDelta(x, faceLandmarkPositions[batchIdx][i].x, 3)
expectMaxDelta(y, faceLandmarkPositions[batchIdx][i].y, 3)
})
......@@ -165,11 +162,11 @@ describe('faceLandmark68Net', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach(({ x, y }, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach(({ x, y }, i) => {
expectMaxDelta(x, faceLandmarkPositions[batchIdx][i].x, 3)
expectMaxDelta(y, faceLandmarkPositions[batchIdx][i].y, 3)
})
......
import * as tf from '@tensorflow/tfjs-core';
import { bufferToImage, Dimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { fetchImage, fetchJson, IDimensions, isTensor3D, NetInput, Point, TMediaElement, toNetInput } from '../../../src';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { createFaceLandmarkNet } from '../../../src/faceLandmarkNet';
import { FaceLandmark68TinyNet } from '../../../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { describeWithNets, expectAllTensorsReleased, expectMaxDelta, expectPointClose } from '../../utils';
import { describeWithNets, expectAllTensorsReleased, expectPointClose } from '../../utils';
function getInputDims (input: tf.Tensor | TMediaElement): Dimensions {
function getInputDims (input: tf.Tensor | TMediaElement): IDimensions {
if (input instanceof tf.Tensor) {
const [height, width] = input.shape.slice(isTensor3D(input) ? 0 : 1)
return { width, height }
......@@ -24,15 +24,12 @@ describe('faceLandmark68TinyNet', () => {
let faceLandmarkPositionsRect: Point[]
beforeAll(async () => {
const img1 = await (await fetch('base/test/images/face1.png')).blob()
imgEl1 = await bufferToImage(img1)
const img2 = await (await fetch('base/test/images/face2.png')).blob()
imgEl2 = await bufferToImage(img2)
const imgRect = await (await fetch('base/test/images/face_rectangular.png')).blob()
imgElRect = await bufferToImage(imgRect)
faceLandmarkPositions1 = await (await fetch('base/test/data/faceLandmarkPositions1Tiny.json')).json()
faceLandmarkPositions2 = await (await fetch('base/test/data/faceLandmarkPositions2Tiny.json')).json()
faceLandmarkPositionsRect = await (await fetch('base/test/data/faceLandmarkPositionsRectTiny.json')).json()
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceLandmarkPositions1 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions1Tiny.json')
faceLandmarkPositions2 = await fetchJson<Point[]>('base/test/data/faceLandmarkPositions2Tiny.json')
faceLandmarkPositionsRect = await fetchJson<Point[]>('base/test/data/faceLandmarkPositionsRectTiny.json')
})
describeWithNets('uncompressed weights', { withFaceLandmark68TinyNet: { quantized: false } }, ({ faceLandmark68TinyNet }) => {
......@@ -41,11 +38,11 @@ describe('faceLandmark68TinyNet', () => {
const { width, height } = imgEl1
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 5)
})
......@@ -55,11 +52,11 @@ describe('faceLandmark68TinyNet', () => {
const { width, height } = imgElRect
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 5)
})
......@@ -73,11 +70,11 @@ describe('faceLandmark68TinyNet', () => {
const { width, height } = imgEl1
const result = await faceLandmark68TinyNet.detectLandmarks(imgEl1) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions1[i]
expectPointClose(pt, { x, y }, 5)
})
......@@ -87,11 +84,11 @@ describe('faceLandmark68TinyNet', () => {
const { width, height } = imgElRect
const result = await faceLandmark68TinyNet.detectLandmarks(imgElRect) as FaceLandmarks68
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositionsRect[i]
expectPointClose(pt, { x, y }, 5)
})
......@@ -115,11 +112,11 @@ describe('faceLandmark68TinyNet', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions[batchIdx][i]
expectPointClose(pt, { x, y }, 5)
})
......@@ -140,11 +137,11 @@ describe('faceLandmark68TinyNet', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions[batchIdx][i]
expectPointClose(pt, { x, y }, 3)
})
......@@ -165,11 +162,11 @@ describe('faceLandmark68TinyNet', () => {
expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => {
const { width, height } = getInputDims(inputs[batchIdx])
expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0)
expect(result.getShift().y).toEqual(0)
result.getPositions().forEach((pt, i) => {
expect(result.imageWidth).toEqual(width)
expect(result.imageHeight).toEqual(height)
expect(result.shift.x).toEqual(0)
expect(result.shift.y).toEqual(0)
result.positions.forEach((pt, i) => {
const { x, y } = faceLandmarkPositions[batchIdx][i]
expectPointClose(pt, { x, y }, 3)
})
......
import * as tf from '@tensorflow/tfjs-core';
import { bufferToImage, FaceRecognitionNet, NetInput, toNetInput } from '../../../src';
import { FaceRecognitionNet, fetchImage, fetchJson, NetInput, toNetInput } from '../../../src';
import { euclideanDistance } from '../../../src/euclideanDistance';
import { createFaceRecognitionNet } from '../../../src/faceRecognitionNet';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
......@@ -15,15 +15,12 @@ describe('faceRecognitionNet', () => {
let faceDescriptorRect: number[]
beforeAll(async () => {
const img1 = await (await fetch('base/test/images/face1.png')).blob()
imgEl1 = await bufferToImage(img1)
const img2 = await (await fetch('base/test/images/face2.png')).blob()
imgEl2 = await bufferToImage(img2)
const imgRect = await (await fetch('base/test/images/face_rectangular.png')).blob()
imgElRect = await bufferToImage(imgRect)
faceDescriptor1 = await (await fetch('base/test/data/faceDescriptor1.json')).json()
faceDescriptor2 = await (await fetch('base/test/data/faceDescriptor2.json')).json()
faceDescriptorRect = await (await fetch('base/test/data/faceDescriptorRect.json')).json()
imgEl1 = await fetchImage('base/test/images/face1.png')
imgEl2 = await fetchImage('base/test/images/face2.png')
imgElRect = await fetchImage('base/test/images/face_rectangular.png')
faceDescriptor1 = await fetchJson<number[]>('base/test/data/faceDescriptor1.json')
faceDescriptor2 = await fetchJson<number[]>('base/test/data/faceDescriptor2.json')
faceDescriptorRect = await fetchJson<number[]>('base/test/data/faceDescriptorRect.json')
})
describeWithNets('uncompressed weights', { withFaceRecognitionNet: { quantized: false } }, ({ faceRecognitionNet }) => {
......
import { IPoint, IRect } from '../../../src';
import { FaceDetectionWithLandmarks } from '../../../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../../../src/classes/FaceLandmarks5';
import { BoxAndLandmarksDeltas, expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { sortBoxes, sortByDistanceToOrigin } from '../../utils';
export const expectedMtcnnBoxes: IRect[] = sortBoxes([
{ x: 70, y: 21, width: 112, height: 112 },
{ x: 36, y: 250, width: 133, height: 132 },
{ x: 221, y: 43, width: 112, height: 111 },
{ x: 247, y: 231, width: 106, height: 107 },
{ x: 566, y: 67, width: 104, height: 104 },
{ x: 451, y: 176, width: 122, height: 122 }
])
export function expectMtcnnResults(
results: FaceDetectionWithLandmarks<FaceLandmarks5>[],
expectedMtcnnFaceLandmarks: IPoint[][],
deltas: BoxAndLandmarksDeltas
) {
const expectedMtcnnFaceLandmarksSorted = sortByDistanceToOrigin(expectedMtcnnFaceLandmarks, obj => obj[0])
const expectedResults = expectedMtcnnBoxes
.map((detection, i) => ({ detection, landmarks: expectedMtcnnFaceLandmarksSorted[i] }))
const expectedScores = results.map(_ => 1.0)
return expectFaceDetectionsWithLandmarks<FaceLandmarks5>(results, expectedResults, expectedScores, deltas)
}
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, sortByDistanceToOrigin } from '../../utils';
import { expectMtcnnResults } from './expectedResults';
import { IPoint } from '../../../src';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectMtcnnResults } from './expectMtcnnResults';
import { IPoint, fetchImage, fetchJson } from '../../../src';
describe('mtcnn', () => {
......@@ -10,9 +9,8 @@ describe('mtcnn', () => {
let expectedMtcnnLandmarks: IPoint[][]
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
expectedMtcnnLandmarks = await (await fetch('base/test/data/mtcnnFaceLandmarkPositions.json')).json()
imgEl = await fetchImage('base/test/images/faces.jpg')
expectedMtcnnLandmarks = await fetchJson<IPoint[][]>('base/test/data/mtcnnFaceLandmarkPositions.json')
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
......
import { IRect } from '../../../src';
import { sortBoxes } from '../../utils';
export const expectedSsdBoxes: IRect[] = sortBoxes([
{ x: 48, y: 253, width: 104, height: 129 },
{ x: 260, y: 227, width: 76, height: 117 },
{ x: 466, y: 165, width: 88, height: 130 },
{ x: 234, y: 36, width: 84, height: 119 },
{ x: 577, y: 65, width: 84, height: 105 },
{ x: 84, y: 14, width: 79, height: 132 }
])
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { expectedSsdBoxes, expectDetectionResults } from './expectedResults';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectDetectionResults } from '../../expectDetectionResults';
import { fetchImage } from '../../../src';
import { expectedSsdBoxes } from './expectedBoxes';
describe('faceDetectionNet', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('uncompressed weights', { withFaceDetectionNet: { quantized: false } }, ({ faceDetectionNet }) => {
it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as faceapi.FaceDetection[]
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(3)
......@@ -25,7 +26,7 @@ describe('faceDetectionNet', () => {
})
it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as faceapi.FaceDetection[]
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
......@@ -40,7 +41,7 @@ describe('faceDetectionNet', () => {
describeWithNets('quantized weights', { withFaceDetectionNet: { quantized: true } }, ({ faceDetectionNet }) => {
it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as faceapi.FaceDetection[]
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.8 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(4)
......@@ -51,7 +52,7 @@ describe('faceDetectionNet', () => {
})
it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as faceapi.FaceDetection[]
const detections = await faceDetectionNet.locateFaces(imgEl, { minConfidence: 0.5 }) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6)
......
import { IRect } from '../../../src';
import { sortBoxes } from '../../utils';
export const expectedTinyYolov2Boxes: IRect[] = sortBoxes([
{ x: 52, y: 263, width: 106, height: 102 },
{ x: 455, y: 191, width: 103, height: 97 },
{ x: 236, y: 57, width: 90, height: 85 },
{ x: 257, y: 243, width: 86, height: 95 },
{ x: 578, y: 76, width: 86, height: 91 },
{ x: 87, y: 30, width: 92, height: 93 }
])
export const expectedTinyYolov2SeparableConvBoxes: IRect[] = sortBoxes([
{ x: 42, y: 257, width: 111, height: 121 },
{ x: 454, y: 175, width: 104, height: 121 },
{ x: 230, y: 45, width: 94, height: 104 },
{ x: 574, y: 62, width: 88, height: 113 },
{ x: 260, y: 233, width: 82, height: 104 },
{ x: 83, y: 24, width: 85, height: 111 }
])
\ No newline at end of file
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { bufferToImage, createTinyYolov2, TinyYolov2 } from '../../../src';
import { createTinyYolov2, fetchImage, TinyYolov2 } from '../../../src';
import { expectDetectionResults } from '../../expectDetectionResults';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectDetectionResults, expectedTinyYolov2Boxes } from './expectedResults';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await bufferToImage(img)
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.8, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxBoxDelta = 4
......@@ -26,7 +26,7 @@ describe('tinyYolov2', () => {
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.81, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 27
......@@ -50,7 +50,7 @@ describe('tinyYolov2', () => {
describeWithNets('uncompressed weights', { withTinyYolov2: { quantized: false, withSeparableConv: false } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.81, 0.85, 0.86, 0.83, 0.86, 0.81]
const maxBoxDelta = 1
......@@ -60,7 +60,7 @@ describe('tinyYolov2', () => {
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.89, 0.82, 0.82, 0.72, 0.81, 0.86]
const maxBoxDelta = 24
......
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { TinyYolov2SizeType } from 'tfjs-tiny-yolov2';
import { bufferToImage, createTinyYolov2, TinyYolov2 } from '../../../src';
import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { expectedTinyYolov2SeparableConvBoxes, expectDetectionResults, expectedTinyYolov2Boxes } from './expectedResults';
import { createTinyYolov2, fetchImage, TinyYolov2 } from '../../../src';
import { expectDetectionResults } from '../../expectDetectionResults';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
import { expectedTinyYolov2Boxes } from './expectedBoxes';
describe('tinyYolov2, with separable convolutions', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await bufferToImage(img)
imgEl = await fetchImage('base/test/images/faces.jpg')
})
describeWithNets('quantized weights', { withTinyYolov2: { quantized: true } }, ({ tinyYolov2 }) => {
it('inputSize lg, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.LG })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.LG })
const expectedScores = [0.85, 0.88, 0.9, 0.85, 0.9, 0.85]
const maxBoxDelta = 25
......@@ -26,7 +26,7 @@ describe('tinyYolov2, with separable convolutions', () => {
})
it('inputSize md, finds all faces', async () => {
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2Types.SizeType.MD })
const detections = await tinyYolov2.locateFaces(imgEl, { inputSize: TinyYolov2SizeType.MD })
const expectedScores = [0.85, 0.8, 0.8, 0.85, 0.85, 0.83]
const maxBoxDelta = 34
......
import * as tf from '@tensorflow/tfjs-core';
import { FaceDetectionNet, FaceRecognitionNet, IPoint, IRect, Mtcnn, NeuralNetwork, TinyYolov2 } from '../src/';
import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from '../src/allFacesFactory';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FullFaceDescription } from '../src/classes/FullFaceDescription';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { allFacesMtcnnFunction, allFacesSsdMobilenetv1Function, allFacesTinyYolov2Function } from '../src/globalApi';
jasmine.DEFAULT_TIMEOUT_INTERVAL = 60000
......@@ -59,16 +57,19 @@ export function sortFaceDetections(boxes: FaceDetection[]) {
}
export function sortLandmarks(landmarks: FaceLandmarks[]) {
return sortByDistanceToOrigin(landmarks, l => l.getPositions()[0])
return sortByDistanceToOrigin(landmarks, l => l.positions[0])
}
export function sortFullFaceDescriptions(descs: FullFaceDescription[]) {
export function sortByFaceDetection<T extends { detection: FaceDetection }>(descs: T[]) {
return sortByDistanceToOrigin(descs, d => d.detection.box)
}
export type ExpectedFullFaceDescription = {
export type ExpectedFaceDetectionWithLandmarks = {
detection: IRect
landmarks: IPoint[]
}
export type ExpectedFullFaceDescription = ExpectedFaceDetectionWithLandmarks & {
descriptor: Float32Array
}
......@@ -95,9 +96,6 @@ export type WithTinyYolov2Options = WithNetOptions & {
}
export type InjectNetArgs = {
allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function
allFacesTinyYolov2: allFacesTinyYolov2Function
allFacesMtcnn: allFacesMtcnnFunction
faceDetectionNet: FaceDetectionNet
faceLandmark68Net: FaceLandmark68Net
faceLandmark68TinyNet: FaceLandmark68TinyNet
......@@ -148,9 +146,6 @@ export function describeWithNets(
let faceRecognitionNet: FaceRecognitionNet = new FaceRecognitionNet()
let mtcnn: Mtcnn = new Mtcnn()
let tinyYolov2: TinyYolov2 = new TinyYolov2(options.withTinyYolov2 && options.withTinyYolov2.withSeparableConv)
let allFacesSsdMobilenetv1 = allFacesSsdMobilenetv1Factory(faceDetectionNet, faceLandmark68Net, faceRecognitionNet)
let allFacesTinyYolov2 = allFacesTinyYolov2Factory(tinyYolov2, faceLandmark68Net, faceRecognitionNet)
let allFacesMtcnn = allFacesMtcnnFactory(mtcnn, faceRecognitionNet)
beforeAll(async () => {
const {
......@@ -219,9 +214,6 @@ export function describeWithNets(
})
specDefinitions({
allFacesSsdMobilenetv1,
allFacesTinyYolov2,
allFacesMtcnn,
faceDetectionNet,
faceLandmark68Net,
faceLandmark68TinyNet,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment