Commit f36fb464 by vincent

some adjustments to new tfjs-image-recognition-base utility

parent c44a4c12
import { Dimensions, ObjectDetection, Rect } from 'tfjs-image-recognition-base';
import { IDimensions, ObjectDetection, Rect } from 'tfjs-image-recognition-base';
export class FaceDetection extends ObjectDetection {
constructor(
score: number,
relativeBox: Rect,
imageDims: Dimensions
imageDims: IDimensions
) {
super(score, score, '', relativeBox, imageDims)
}
......
......@@ -14,18 +14,22 @@ export class FaceDetectionWithLandmarks {
}
public get detection(): FaceDetection { return this._detection }
public get relativeLandmarks(): FaceLandmarks { return this._relativeLandmarks }
public get alignedRect(): FaceDetection {
const rect = this.landmarks.align()
const { imageDims } = this.detection
return new FaceDetection(this._detection.score, rect.rescale(imageDims.reverse()), imageDims)
}
public get landmarks(): FaceLandmarks {
const { x, y } = this.detection.box
return this._relativeLandmarks.shift(x, y)
}
public forSize(width: number, height: number): FaceDetectionWithLandmarks {
return new FaceDetectionWithLandmarks(
this._detection.forSize(width, height),
this._relativeLandmarks.forSize(width, height)
)
const resizedDetection = this._detection.forSize(width, height)
const resizedLandmarks = this._relativeLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height)
return new FaceDetectionWithLandmarks(resizedDetection, resizedLandmarks)
}
}
\ No newline at end of file
import { Dimensions, getCenterPoint, Point, Rect } from 'tfjs-image-recognition-base';
import { Dimensions, getCenterPoint, IDimensions, Point, Rect } from 'tfjs-image-recognition-base';
import { FaceDetection } from './FaceDetection';
......@@ -8,21 +8,19 @@ const relY = 0.43
const relScale = 0.45
export class FaceLandmarks {
protected _imageWidth: number
protected _imageHeight: number
protected _shift: Point
protected _faceLandmarks: Point[]
protected _positions: Point[]
protected _imgDims: Dimensions
constructor(
relativeFaceLandmarkPositions: Point[],
imageDims: Dimensions,
imgDims: IDimensions,
shift: Point = new Point(0, 0)
) {
const { width, height } = imageDims
this._imageWidth = width
this._imageHeight = height
const { width, height } = imgDims
this._imgDims = new Dimensions(width, height)
this._shift = shift
this._faceLandmarks = relativeFaceLandmarkPositions.map(
this._positions = relativeFaceLandmarkPositions.map(
pt => pt.mul(new Point(width, height)).add(shift)
)
}
......@@ -31,35 +29,26 @@ export class FaceLandmarks {
return new Point(this._shift.x, this._shift.y)
}
public getImageWidth(): number {
return this._imageWidth
}
public getImageHeight(): number {
return this._imageHeight
}
public getPositions(): Point[] {
return this._faceLandmarks
}
public getRelativePositions(): Point[] {
return this._faceLandmarks.map(
pt => pt.sub(this._shift).div(new Point(this._imageWidth, this._imageHeight))
public get imageWidth(): number { return this._imgDims.width }
public get imageHeight(): number { return this._imgDims.height }
public get positions(): Point[] { return this._positions }
public get relativePositions(): Point[] {
return this._positions.map(
pt => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight))
)
}
public forSize<T extends FaceLandmarks>(width: number, height: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
this.relativePositions,
{ width, height }
)
}
public shift<T extends FaceLandmarks>(x: number, y: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
{ width: this._imageWidth, height: this._imageHeight },
this.relativePositions,
this._imgDims,
new Point(x, y)
)
}
......@@ -84,7 +73,7 @@ export class FaceLandmarks {
): Rect {
if (detection) {
const box = detection instanceof FaceDetection
? detection.getBox().floor()
? detection.box.floor()
: detection
return this.shift(box.x, box.y).align()
......@@ -103,7 +92,7 @@ export class FaceLandmarks {
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, Math.min(size, this._imageWidth + x), Math.min(size, this._imageHeight + y))
return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y))
}
protected getRefPointsForAlignment(): Point[] {
......
......@@ -5,7 +5,7 @@ import { FaceLandmarks } from './FaceLandmarks';
export class FaceLandmarks5 extends FaceLandmarks {
protected getRefPointsForAlignment(): Point[] {
const pts = this.getPositions()
const pts = this.positions
return [
pts[0],
pts[1],
......
......@@ -2,34 +2,33 @@ import { getCenterPoint, Point } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
export class FaceLandmarks68 extends FaceLandmarks {
public getJawOutline(): Point[] {
return this._faceLandmarks.slice(0, 17)
return this.positions.slice(0, 17)
}
public getLeftEyeBrow(): Point[] {
return this._faceLandmarks.slice(17, 22)
return this.positions.slice(17, 22)
}
public getRightEyeBrow(): Point[] {
return this._faceLandmarks.slice(22, 27)
return this.positions.slice(22, 27)
}
public getNose(): Point[] {
return this._faceLandmarks.slice(27, 36)
return this.positions.slice(27, 36)
}
public getLeftEye(): Point[] {
return this._faceLandmarks.slice(36, 42)
return this.positions.slice(36, 42)
}
public getRightEye(): Point[] {
return this._faceLandmarks.slice(42, 48)
return this.positions.slice(42, 48)
}
public getMouth(): Point[] {
return this._faceLandmarks.slice(48, 68)
return this.positions.slice(48, 68)
}
protected getRefPointsForAlignment(): Point[] {
......
......@@ -44,6 +44,6 @@ export function drawLandmarks(
// else draw points
const ptOffset = lineWidth / 2
ctx.fillStyle = color
landmarks.getPositions().forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
landmarks.positions.forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
})
}
\ No newline at end of file
......@@ -27,7 +27,7 @@ export async function extractFaceTensors(
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(imgWidth, imgHeight).getBox()
? det.forSize(imgWidth, imgHeight).box
: det
)
.map(box => box.clipAtImageBorders(imgWidth, imgHeight))
......
......@@ -39,7 +39,7 @@ export async function extractFaces(
const ctx = getContext2dOrThrow(canvas)
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(canvas.width, canvas.height).getBox().floor()
? det.forSize(canvas.width, canvas.height).box.floor()
: det
)
.map(box => box.clipAtImageBorders(canvas.width, canvas.height))
......
import * as tf from '@tensorflow/tfjs-core';
import { isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput, Dimensions } from 'tfjs-image-recognition-base';
import { IDimensions, isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
......@@ -17,7 +17,7 @@ export class FaceLandmark68NetBase<NetParams> extends NeuralNetwork<NetParams> {
throw new Error(`${this.__name} - runNet not implemented`)
}
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: Dimensions[]): tf.Tensor2D {
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {
const inputDimensions = originalDimensions.map(({ width, height }) => {
const scale = inputSize / Math.max(height, width)
......
......@@ -41,8 +41,8 @@ export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskB
return
}
const { detection, landmarks } = detectionWithLandmarks
const alignedFaceCanvas = (await extractFaces(this.input, [landmarks.align()]))[0]
const { detection, landmarks, alignedRect } = detectionWithLandmarks
const alignedFaceCanvas = (await extractFaces(this.input, [alignedRect]))[0]
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvas) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor)
......
......@@ -6,7 +6,7 @@ import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFacesLandmarksTasks';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
import { FaceDetectionOptions } from './types';
......
......@@ -2,7 +2,7 @@ export * from './allFaces'
export * from './ComposableTask'
export * from './ComputeFaceDescriptorsTasks'
export * from './DetectFacesTasks'
export * from './DetectFacesLandmarksTasks'
export * from './DetectFaceLandmarksTasks'
export * from './nets'
export * from './types'
......@@ -73,15 +73,6 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
})
.slice(0, maxNumScales)
console.log({
minFaceSize,
scaleFactor,
maxNumScales,
scoreThresholds,
scales
})
stats.scales = scales
stats.pyramid = scales.map(scale => getSizesForScale(scale, [height, width]))
......
......@@ -43,8 +43,8 @@ export class MtcnnOptions {
}
if (
!Array.isArray(this._scaleSteps)
|| this._scaleSteps.some(th => typeof th !== 'number')
this._scaleSteps
&& (!Array.isArray(this._scaleSteps) || this._scaleSteps.some(th => typeof th !== 'number'))
) {
throw new Error(`${this._name} - expected scaleSteps to be an array of numbers`)
}
......
import * as tf from '@tensorflow/tfjs-core';
import { Box, createCanvas, Dimensions, getContext2dOrThrow } from 'tfjs-image-recognition-base';
import { Box, createCanvas, getContext2dOrThrow, IDimensions } from 'tfjs-image-recognition-base';
import { normalize } from './normalize';
export async function extractImagePatches(
img: HTMLCanvasElement,
boxes: Box[],
{ width, height }: Dimensions
{ width, height }: IDimensions
): Promise<tf.Tensor4D[]> {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment