Commit d3ddbb5d by vincent

add batch face recognition example + batch processing in allFaces can be enabled via flag

parent 1e2d2616
...@@ -117,6 +117,10 @@ function renderNavBar(navbarId, exampleUri) { ...@@ -117,6 +117,10 @@ function renderNavBar(navbarId, exampleUri) {
{ {
uri: 'batch_face_landmarks', uri: 'batch_face_landmarks',
name: 'Batch Face Landmarks' name: 'Batch Face Landmarks'
},
{
uri: 'batch_face_recognition',
name: 'Batch Face Recognition'
} }
] ]
......
...@@ -49,6 +49,10 @@ ...@@ -49,6 +49,10 @@
margin: 20px; margin: 20px;
} }
.button-sm {
padding: 0 10px !important;
}
#github-link { #github-link {
display: flex !important; display: flex !important;
justify-content: center; justify-content: center;
......
...@@ -25,8 +25,7 @@ app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(views ...@@ -25,8 +25,7 @@ app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(views
app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html'))) app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html')))
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html'))) app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html'))) app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
app.post('/fetch_external_image', async (req, res) => { app.post('/fetch_external_image', async (req, res) => {
const { imageUrl } = req.body const { imageUrl } = req.body
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div>
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div class="row side-by-side">
<div class="row">
<label for="timeNoBatch">Time for processing each face seperately:</label>
<input disabled value="-" id="timeNoBatch" type="text" class="bold"/>
</div>
<div class="row">
<label for="timeBatch">Time for processing in Batch:</label>
<input disabled value="-" id="timeBatch" type="text" class="bold"/>
</div>
</div>
<div class="row side-by-side">
<div>
<label for="numImages">Num Images:</label>
<input id="numImages" type="text" class="bold" value="32"/>
</div>
<button
class="waves-effect waves-light btn"
onclick="measureTimingsAndDisplay()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="center-content">
<div id="faceContainer"></div>
</div>
</div>
</div>
</div>
<script>
let images = []
let trainDescriptorsByClass = []
let descriptorsByFace = []
let numImages = 32
let maxDistance = 0.6
function onNumImagesChanged(e) {
const val = parseInt(e.target.value) || 32
numImages = Math.min(Math.max(val, 0), 32)
e.target.value = numImages
}
function displayTimeStats(timeNoBatch, timeBatch) {
$('#timeNoBatch').val(`${timeNoBatch} ms`)
$('#timeBatch').val(`${timeBatch} ms`)
}
function drawFaceRecognitionCanvas(img, descriptor) {
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const x = 20, y = canvas.height - 20
faceapi.drawText(
canvas.getContext('2d'),
x,
y,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
}
async function runComputeFaceDescriptors(useBatchInput) {
const ts = Date.now()
descriptorsByFace = useBatchInput
? await faceapi.computeFaceDescriptor(images.slice(0, numImages))
: await Promise.all(images.slice(0, numImages).map(img => faceapi.computeFaceDescriptor(img)))
const time = Date.now() - ts
return time
}
async function measureTimings() {
const timeNoBatch = await runComputeFaceDescriptors(false)
const timeBatch = await runComputeFaceDescriptors(true)
return { timeNoBatch, timeBatch }
}
async function measureTimingsAndDisplay() {
const { timeNoBatch, timeBatch } = await measureTimings()
displayTimeStats(timeNoBatch, timeBatch)
$('#faceContainer').empty()
descriptorsByFace.forEach((descriptor, i) => drawFaceRecognitionCanvas(images[i], descriptor))
}
async function run() {
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
const imgUris = classes
// skip images with idx 1, as they are used as reference data
.map(clazz => Array.from(Array(4), (_, idx) => getFaceImageUri(clazz, idx + 2)))
.reduce((flat, arr) => flat.concat(arr))
images = await Promise.all(imgUris.map(
async uri => faceapi.bufferToImage(await fetchImage(uri))
))
// warmup
await measureTimings()
// run
measureTimingsAndDisplay()
}
$(document).ready(function() {
$('#numImages').on('change', onNumImagesChanged)
renderNavBar('#navbar', 'batch_face_recognition')
run()
})
</script>
</body>
</html>
\ No newline at end of file
...@@ -30,6 +30,10 @@ ...@@ -30,6 +30,10 @@
> >
Ok Ok
</button> </button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div> </div>
<div class="row side-by-side"> <div class="row side-by-side">
<div class="row"> <div class="row">
...@@ -37,13 +41,13 @@ ...@@ -37,13 +41,13 @@
<input disabled value="0.7" id="minConfidence" type="text" class="bold"> <input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div> </div>
<button <button
class="waves-effect waves-light btn" class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMinConfidence()" onclick="onDecreaseMinConfidence()"
> >
<i class="material-icons left">-</i> <i class="material-icons left">-</i>
</button> </button>
<button <button
class="waves-effect waves-light btn" class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMinConfidence()" onclick="onIncreaseMinConfidence()"
> >
<i class="material-icons left">+</i> <i class="material-icons left">+</i>
...@@ -53,13 +57,13 @@ ...@@ -53,13 +57,13 @@
<input disabled value="0.6" id="maxDistance" type="text" class="bold"> <input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div> </div>
<button <button
class="waves-effect waves-light btn" class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()" onclick="onDecreaseMaxDistance()"
> >
<i class="material-icons left">-</i> <i class="material-icons left">-</i>
</button> </button>
<button <button
class="waves-effect waves-light btn" class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()" onclick="onIncreaseMaxDistance()"
> >
<i class="material-icons left">+</i> <i class="material-icons left">+</i>
...@@ -70,9 +74,14 @@ ...@@ -70,9 +74,14 @@
<script> <script>
let maxDistance = 0.6 let maxDistance = 0.6
let minConfidence = 0.7 let minConfidence = 0.7
let useBatchProcessing = false
let detectionNet, recognitionNet, landmarkNet let detectionNet, recognitionNet, landmarkNet
let trainDescriptorsByClass = [] let trainDescriptorsByClass = []
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMinConfidence() { function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0) minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence) $('#minConfidence').val(minConfidence)
...@@ -110,7 +119,7 @@ ...@@ -110,7 +119,7 @@
canvas.width = width canvas.width = width
canvas.height = height canvas.height = height
const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence)) const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence, useBatchProcessing))
.map(fd => fd.forSize(width, height)) .map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => { fullFaceDescriptions.forEach(({ detection, descriptor }) => {
......
...@@ -13,18 +13,19 @@ export function allFacesFactory( ...@@ -13,18 +13,19 @@ export function allFacesFactory(
) { ) {
return async function( return async function(
input: TNetInput, input: TNetInput,
minConfidence: number minConfidence: number,
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> { ): Promise<FullFaceDescription[]> {
const detections = await detectionNet.locateFaces(input, minConfidence) const detections = await detectionNet.locateFaces(input, minConfidence)
const faceTensors = await extractFaceTensors(input, detections) const faceTensors = await extractFaceTensors(input, detections)
/**
const faceLandmarksByFace = await Promise.all(faceTensors.map( const faceLandmarksByFace = useBatchProcessing
faceTensor => landmarkNet.detectLandmarks(faceTensor) ? await landmarkNet.detectLandmarks(faceTensors) as FaceLandmarks[]
)) as FaceLandmarks[] : await Promise.all(faceTensors.map(
*/ faceTensor => landmarkNet.detectLandmarks(faceTensor)
const faceLandmarksByFace = await landmarkNet.detectLandmarks(faceTensors) as FaceLandmarks[] )) as FaceLandmarks[]
faceTensors.forEach(t => t.dispose()) faceTensors.forEach(t => t.dispose())
...@@ -33,9 +34,12 @@ export function allFacesFactory( ...@@ -33,9 +34,12 @@ export function allFacesFactory(
) )
const alignedFaceTensors = await extractFaceTensors(input, alignedFaceBoxes) const alignedFaceTensors = await extractFaceTensors(input, alignedFaceBoxes)
const descriptors = await Promise.all(alignedFaceTensors.map( const descriptors = useBatchProcessing
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor) ? await recognitionNet.computeFaceDescriptor(alignedFaceTensors) as Float32Array[]
)) as Float32Array[] : await Promise.all(alignedFaceTensors.map(
faceTensor => recognitionNet.computeFaceDescriptor(faceTensor)
)) as Float32Array[]
alignedFaceTensors.forEach(t => t.dispose()) alignedFaceTensors.forEach(t => t.dispose())
return detections.map((detection, i) => return detections.map((detection, i) =>
......
...@@ -56,7 +56,8 @@ export function computeFaceDescriptor( ...@@ -56,7 +56,8 @@ export function computeFaceDescriptor(
export const allFaces: ( export const allFaces: (
input: tf.Tensor | NetInput | TNetInput, input: tf.Tensor | NetInput | TNetInput,
minConfidence: number minConfidence: number,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]> = allFacesFactory( ) => Promise<FullFaceDescription[]> = allFacesFactory(
detectionNet, detectionNet,
landmarkNet, landmarkNet,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment