Commit e0b53014 by vincent

examples for face extraction

parent cdd2c49d
......@@ -24,6 +24,35 @@ async function initFaceRecognitionNet() {
return facerecognition.faceRecognitionNet(weights)
}
// fetch first image of each class and compute their descriptors
async function initTrainDescriptorsByClass(net) {
return Promise.all(classes.map(
async className => {
const img = await facerecognition.bufferToImage(
await fetchImage(getFaceImageUri(className, 1))
)
const descriptor = await net.computeFaceDescriptor(img)
return {
descriptor,
className
}
}
))
}
function getBestMatch(allDescriptors, queryDescriptor) {
return allDescriptors
.map(
({ descriptor, className }) => ({
distance: facerecognition.round(
facerecognition.euclideanDistance(descriptor, queryDescriptor)
),
className
})
)
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
......@@ -41,6 +70,14 @@ function renderNavBar(navbarId, exampleUri) {
{
uri: 'face_similarity',
name: 'Face Similarity'
},
{
uri: 'detect_and_draw_faces',
name: 'Detect and Draw Faces'
},
{
uri: 'detect_and_recognize_faces',
name: 'Detect and Recognize Faces'
}
]
......
......@@ -37,4 +37,8 @@
position: absolute;
top: 0;
left: 0;
}
#facesContainer canvas {
margin: 10px;
}
\ No newline at end of file
......@@ -15,5 +15,7 @@ app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceD
app.get('/face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetectionVideo.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'faceSimilarity.html')))
app.get('/detect_and_draw_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawFaces.html')))
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
app.listen(3000, () => console.log('Listening on port 3000!'))
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-recognition.js"></script>
<script src="axios.min.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div class="center-content page-container">
<div id="navbar"></div>
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
let net
function onIncreaseThreshold() {
minConfidence = Math.min(facerecognition.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(facerecognition.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = new facerecognition.NetInput(inputImgEl)
const detections = await net.locateFaces(input, minConfidence)
facerecognition.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
const faceImages = await facerecognition.extractFaces(input.canvases[0], detections)
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await facerecognition.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
net = await initFaceDetectionNet()
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_draw_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-recognition.js"></script>
<script src="axios.min.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div class="center-content page-container">
<div id="navbar"></div>
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
const threshold = 0.6
let minConfidence = 0.7
let detectionNet, recognitionNet
let trainDescriptorsByClass = []
function onIncreaseThreshold() {
minConfidence = Math.min(facerecognition.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(facerecognition.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = new facerecognition.NetInput(inputImgEl)
const detections = await detectionNet.locateFaces(input, minConfidence)
const detectionsForSize = detections.map(det => det.forSize(width, height))
facerecognition.drawDetection('overlay', detectionsForSize, { withScore: false })
const faceTensors = await facerecognition.extractFaceTensors(input, detections)
const descriptors = await Promise.all(faceTensors.map(t => recognitionNet.computeFaceDescriptor(t)))
// free memory for face image tensors after we computed their descriptors
faceTensors.forEach(t => t.dispose())
descriptors.forEach((descriptor, i) => {
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < threshold ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y } = detectionsForSize[i].box
facerecognition.drawText(canvas.getContext('2d'), x, y, text, facerecognition.getDefaultDrawOptions())
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await facerecognition.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
detectionNet = await initFaceDetectionNet()
recognitionNet = await initFaceRecognitionNet()
trainDescriptorsByClass = await initTrainDescriptorsByClass(recognitionNet)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_recognize_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -57,12 +57,13 @@
}
async function updateResults() {
const input = new facerecognition.NetInput('inputImg')
const { width, height } = input
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = new facerecognition.NetInput(inputImgEl)
result = await net.locateFaces(input, minConfidence)
facerecognition.drawDetection('overlay', result.map(det => det.forSize(width, height)))
}
......
......@@ -108,30 +108,6 @@
getImg().src = src
}
async function loadTrainingImages() {
return await Promise.all(classes.map(
async className => ({
img: await facerecognition.bufferToImage(
await fetchImage(getFaceImageUri(className, 1))
),
className
})
))
}
function getBestMatch(queryDescriptor) {
return trainDescriptorsByClass
.map(
({ descriptor, className }) => ({
distance: facerecognition.round(
facerecognition.euclideanDistance(descriptor, queryDescriptor)
),
className
})
)
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
async function runFaceRecognition() {
async function next() {
const imgBuf = await fetchImage(getFaceImageUri(classes[currClassIdx], currImageIdx))
......@@ -143,7 +119,7 @@
const descriptor = await net.computeFaceDescriptor(input)
displayTimeStats(Date.now() - ts)
const bestMatch = getBestMatch(descriptor)
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
$('#prediction').val(`${bestMatch.distance < threshold ? bestMatch.className : 'unkown'} (${bestMatch.distance})`)
currImageIdx = currClassIdx === (classes.length - 1)
......@@ -164,13 +140,7 @@
net = await initFaceRecognitionNet()
setStatusText('computing initial descriptors...')
const trainImgs = await loadTrainingImages()
trainDescriptorsByClass = await Promise.all(trainImgs.map(
async ({ className, img }) => ({
descriptor: await net.computeFaceDescriptor(img),
className
})
))
trainDescriptorsByClass = await initTrainDescriptorsByClass(net)
$('#loader').hide()
runFaceRecognition()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment