Commit e3571360 by vincent

support tensor inputs in high level API

parent 958ca95f
const spec_files = ['**/*.test.ts'].concat(
let spec_files = ['**/*.test.ts'].concat(
process.env.EXCLUDE_UNCOMPRESSED
? ['!**/*.uncompressed.test.ts']
: []
)
// exclude browser tests
spec_files = spec_files.concat(['!**/*.browser.test.ts'])
module.exports = {
spec_dir: 'test',
spec_files,
......
......@@ -36,6 +36,10 @@ exclude = exclude.concat(
: []
)
// exclude nodejs tests
exclude = exclude.concat(['**/*.node.test.ts'])
module.exports = function(config) {
const args = []
if (process.env.BACKEND_CPU) {
......
import * as tf from '@tensorflow/tfjs-core';
import { isTensor4D, Rect } from 'tfjs-image-recognition-base';
import { isTensor4D, Rect, isTensor3D } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
......@@ -18,6 +18,10 @@ export async function extractFaceTensors(
detections: Array<FaceDetection | Rect>
): Promise<tf.Tensor3D[]> {
if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) {
throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D')
}
if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {
throw new Error('extractFaceTensors - batchSize > 1 not supported')
}
......
import {
createCanvas,
env,
getContext2dOrThrow,
imageTensorToCanvas,
Rect,
......@@ -8,7 +9,6 @@ import {
} from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { env } from 'tfjs-image-recognition-base';
/**
* Extracts the image regions containing the detected faces.
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { extractFaces } from '../dom';
import { extractFaces, extractFaceTensors } from '../dom';
import { ComposableTask } from './ComposableTask';
import { nets } from './nets';
......@@ -20,15 +21,20 @@ export class ComputeAllFaceDescriptorsTask extends ComputeFaceDescriptorsTaskBas
public async run(): Promise<FullFaceDescription[]> {
const facesWithLandmarks = await this.detectFaceLandmarksTask
const alignedFaceCanvases = await extractFaces(
this.input,
facesWithLandmarks.map(({ landmarks }) => landmarks.align())
)
return await Promise.all(facesWithLandmarks.map(async ({ detection, landmarks }, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvases[i]) as Float32Array
const alignedRects = facesWithLandmarks.map(({ alignedRect }) => alignedRect)
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, alignedRects)
: await extractFaces(this.input, alignedRects)
const fullFaceDescriptions = await Promise.all(facesWithLandmarks.map(async ({ detection, landmarks }, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[i]) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor)
}))
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return fullFaceDescriptions
}
}
......@@ -42,8 +48,12 @@ export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskB
}
const { detection, landmarks, alignedRect } = detectionWithLandmarks
const alignedFaceCanvas = (await extractFaces(this.input, [alignedRect]))[0]
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvas) as Float32Array
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [alignedRect])
: await extractFaces(this.input, [alignedRect])
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[0]) as Float32Array
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FullFaceDescription(detection, landmarks, descriptor)
}
......
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { extractFaces } from '../dom';
import { extractFaces, extractFaceTensors } from '../dom';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { ComposableTask } from './ComposableTask';
......@@ -31,12 +32,17 @@ export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<Face
public async run(): Promise<FaceDetectionWithLandmarks[]> {
const detections = await this.detectFacesTask
const faceCanvases = await extractFaces(this.input, detections)
const faceLandmarksByFace = await Promise.all(faceCanvases.map(
canvas => this.landmarkNet.detectLandmarks(canvas)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, detections)
: await extractFaces(this.input, detections)
const faceLandmarksByFace = await Promise.all(faces.map(
face => this.landmarkNet.detectLandmarks(face)
)) as FaceLandmarks68[]
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return detections.map((detection, i) =>
new FaceDetectionWithLandmarks(detection, faceLandmarksByFace[i])
)
......@@ -56,10 +62,18 @@ export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<F
return
}
const faceCanvas = (await extractFaces(this.input, [detection]))[0]
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection])
const landmarks = await this.landmarkNet.detectLandmarks(faces[0]) as FaceLandmarks68
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FaceDetectionWithLandmarks(
detection,
await this.landmarkNet.detectLandmarks(faceCanvas) as FaceLandmarks68
landmarks
)
}
......
......@@ -23,7 +23,7 @@ describe('mtcnn', () => {
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedMtcnnBoxes)
})
describeWithNets('detectAllFaces', { withAllFacesMtcnn: true }, () => {
describeWithNets('globalApi', { withAllFacesMtcnn: true }, () => {
it('detectAllFaces', async () => {
const options = new MtcnnOptions({
......
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { SsdMobilenetv1Options, createCanvasFromMedia } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
describe('ssdMobilenetv1 - node', () => {
let imgTensor: faceapi.tf.Tensor3D
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
describeWithNets('globalApi, tensor inputs', { withAllFacesSsdMobilenetv1: true }, () => {
it('detectAllFaces', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi.detectAllFaces(imgTensor, options)
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedSsdBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi
.detectAllFaces(imgTensor, options)
.withFaceLandmarks()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 4
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new SsdMobilenetv1Options({
minConfidence: 0.5
})
const results = await faceapi
.detectAllFaces(imgTensor, options)
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 4,
maxDescriptorDelta: 0.2
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgTensor, new SsdMobilenetv1Options())
.withFaceLandmarks()
.withFaceDescriptors()
})
})
})
})
\ No newline at end of file
import * as faceapi from '../../../src';
import { describeWithNets, expectAllTensorsReleased, assembleExpectedFullFaceDescriptions, ExpectedFullFaceDescription } from '../../utils';
import { TinyFaceDetectorOptions, createCanvasFromMedia } from '../../../src';
import { expectFaceDetections } from '../../expectFaceDetections';
import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
describe('tinyFaceDetector - node', () => {
let imgTensor: faceapi.tf.Tensor3D
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
describeWithNets('globalApi, tensor inputs', { withAllFacesTinyFaceDetector: true }, () => {
it('detectAllFaces', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi.detectAllFaces(imgTensor, options)
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgTensor, options)
.withFaceLandmarks()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgTensor, options)
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('no memory leaks', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgTensor, new TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptors()
})
})
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { getContext2dOrThrow } from 'tfjs-image-recognition-base';
import * as faceapi from '../src';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { createCanvasFromMedia, FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment