Unverified Commit 5a066131 by justadudewhohacks Committed by GitHub

Merge pull request #109 from justadudewhohacks/v-0_15

v0.15
parents 2145e28f 2d92c084
......@@ -4,4 +4,5 @@ node_modules
tmp
proto
weights_uncompressed
weights_unused
\ No newline at end of file
weights_unused
docs
\ No newline at end of file
......@@ -7,4 +7,5 @@ weights
weights_uncompressed
weights_unused
test
tools
\ No newline at end of file
tools
docs
\ No newline at end of file
const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart']
function getImageUri(imageName) {
return `images/${imageName}`
}
function getFaceImageUri(className, idx) {
return `images/${className}/${className}${idx}.png`
}
async function fetchImage(uri) {
return (await fetch(uri)).blob()
}
async function requestExternalImage(imageUrl) {
const res = await fetch('fetch_external_image', {
method: 'post',
headers: {
'content-type': 'application/json'
},
body: JSON.stringify({ imageUrl })
})
if (!(res.status < 400)) {
console.error(res.status + ' : ' + await res.text())
throw new Error('failed to fetch image from url: ' + imageUrl)
}
let blob
try {
blob = await res.blob()
return await faceapi.bufferToImage(blob)
} catch (e) {
console.error('received blob:', blob)
console.error('error:', e)
throw new Error('failed to load image from url: ' + imageUrl)
}
}
// fetch first image of each class and compute their descriptors
async function initTrainDescriptorsByClass(net, numImagesForTraining = 1) {
const maxAvailableImagesPerClass = 5
numImagesForTraining = Math.min(numImagesForTraining, maxAvailableImagesPerClass)
return Promise.all(classes.map(
async className => {
const descriptors = []
for (let i = 1; i < (numImagesForTraining + 1); i++) {
const img = await faceapi.bufferToImage(
await fetchImage(getFaceImageUri(className, i))
)
descriptors.push(await net.computeFaceDescriptor(img))
}
return {
descriptors,
className
}
}
))
}
function getBestMatch(descriptorsByClass, queryDescriptor) {
function computeMeanDistance(descriptorsOfClass) {
return faceapi.round(
descriptorsOfClass
.map(d => faceapi.euclideanDistance(d, queryDescriptor))
.reduce((d1, d2) => d1 + d2, 0)
/ (descriptorsOfClass.length || 1)
)
}
return descriptorsByClass
.map(
({ descriptors, className }) => ({
distance: computeMeanDistance(descriptors),
className
})
)
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
uri: 'face_detection',
name: 'Face Detection'
},
{
uri: 'face_detection_video',
name: 'Face Detection Video'
},
{
uri: 'face_recognition',
name: 'Face Recognition'
},
{
uri: 'face_similarity',
name: 'Face Similarity'
},
{
uri: 'face_landmarks',
name: 'Face Landmarks'
},
{
uri: 'detect_and_draw_landmarks',
name: 'Detect and Draw Landmarks'
},
{
uri: 'detect_and_draw_faces',
name: 'Detect and Draw Faces'
},
{
uri: 'face_alignment',
name: 'Face Alignment'
},
{
uri: 'detect_and_recognize_faces',
name: 'Detect and Recognize Faces'
},
{
uri: 'mtcnn_face_detection',
name: 'MTCNN Face Detection'
},
{
uri: 'mtcnn_face_detection_video',
name: 'MTCNN Face Detection Video'
},
{
uri: 'mtcnn_face_detection_webcam',
name: 'MTCNN Face Detection Webcam'
},
{
uri: 'mtcnn_face_recognition',
name: 'MTCNN Face Recognition'
},
{
uri: 'mtcnn_face_recognition_webcam',
name: 'MTCNN Face Recognition Webcam'
},
{
uri: 'tiny_yolov2_face_detection',
name: 'Tiny Yolov2 Face Detection'
},
{
uri: 'tiny_yolov2_face_detection_video',
name: 'Tiny Yolov2 Face Detection Video'
},
{
uri: 'tiny_yolov2_face_detection_webcam',
name: 'Tiny Yolov2 Face Detection Webcam'
},
{
uri: 'tiny_yolov2_face_recognition',
name: 'Tiny Yolov2 Face Recognition'
},
{
uri: 'batch_face_landmarks',
name: 'Batch Face Landmarks'
},
{
uri: 'batch_face_recognition',
name: 'Batch Face Recognition'
}
]
const navbar = $(navbarId).get(0)
const pageContainer = $('.page-container').get(0)
const header = document.createElement('h3')
header.innerHTML = examples.find(ex => ex.uri === exampleUri).name
pageContainer.insertBefore(header, pageContainer.children[0])
const menuContent = document.createElement('ul')
menuContent.id = 'slide-out'
menuContent.classList.add('side-nav', 'fixed')
navbar.appendChild(menuContent)
const menuButton = document.createElement('a')
menuButton.href='#'
menuButton.classList.add('button-collapse', 'show-on-large')
menuButton.setAttribute('data-activates', 'slide-out')
const menuButtonIcon = document.createElement('img')
menuButtonIcon.src = 'menu_icon.png'
menuButton.appendChild(menuButtonIcon)
navbar.appendChild(menuButton)
const li = document.createElement('li')
const githubLink = document.createElement('a')
githubLink.classList.add('waves-effect', 'waves-light', 'side-by-side')
githubLink.id = 'github-link'
githubLink.href = 'https://github.com/justadudewhohacks/face-api.js'
const h5 = document.createElement('h5')
h5.innerHTML = 'face-api.js'
githubLink.appendChild(h5)
const githubLinkIcon = document.createElement('img')
githubLinkIcon.src = 'github_link_icon.png'
githubLink.appendChild(githubLinkIcon)
li.appendChild(githubLink)
menuContent.appendChild(li)
examples
.forEach(ex => {
const li = document.createElement('li')
if (ex.uri === exampleUri) {
li.style.background='#b0b0b0'
}
const a = document.createElement('a')
a.classList.add('waves-effect', 'waves-light')
a.href = ex.uri
const span = document.createElement('span')
span.innerHTML = ex.name
span.style.whiteSpace = 'nowrap'
a.appendChild(span)
li.appendChild(a)
menuContent.appendChild(li)
})
$('.button-collapse').sideNav({
menuWidth: 280
})
}
function renderSelectList(selectListId, onChange, initialValue, renderChildren) {
const select = document.createElement('select')
$(selectListId).get(0).appendChild(select)
renderChildren(select)
$(select).val(initialValue)
$(select).on('change', (e) => onChange(e.target.value))
$(select).material_select()
}
function renderOption(parent, text, value) {
const option = document.createElement('option')
option.innerHTML = text
option.value = value
parent.appendChild(option)
}
function renderFaceImageSelectList(selectListId, onChange, initialValue) {
const indices = [1, 2, 3, 4, 5]
function renderChildren(select) {
classes.forEach(className => {
const optgroup = document.createElement('optgroup')
optgroup.label = className
select.appendChild(optgroup)
indices.forEach(imageIdx =>
renderOption(
optgroup,
`${className} ${imageIdx}`,
getFaceImageUri(className, imageIdx)
)
)
})
}
renderSelectList(
selectListId,
onChange,
getFaceImageUri(initialValue.className, initialValue.imageIdx),
renderChildren
)
}
function renderImageSelectList(selectListId, onChange, initialValue) {
const images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
function renderChildren(select) {
images.forEach(imageName =>
renderOption(
select,
imageName,
getImageUri(imageName)
)
)
}
renderSelectList(
selectListId,
onChange,
getImageUri(initialValue),
renderChildren
)
}
\ No newline at end of file
const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart']
function getFaceImageUri(className, idx) {
return `images/${className}/${className}${idx}.png`
}
function renderFaceImageSelectList(selectListId, onChange, initialValue) {
const indices = [1, 2, 3, 4, 5]
function renderChildren(select) {
classes.forEach(className => {
const optgroup = document.createElement('optgroup')
optgroup.label = className
select.appendChild(optgroup)
indices.forEach(imageIdx =>
renderOption(
optgroup,
`${className} ${imageIdx}`,
getFaceImageUri(className, imageIdx)
)
)
})
}
renderSelectList(
selectListId,
onChange,
getFaceImageUri(initialValue.className, initialValue.imageIdx),
renderChildren
)
}
// fetch first image of each class and compute their descriptors
async function createBbtFaceMatcher(numImagesForTraining = 1) {
const maxAvailableImagesPerClass = 5
numImagesForTraining = Math.min(numImagesForTraining, maxAvailableImagesPerClass)
const labeledFaceDescriptors = await Promise.all(classes.map(
async className => {
const descriptors = []
for (let i = 1; i < (numImagesForTraining + 1); i++) {
const img = await faceapi.fetchImage(getFaceImageUri(className, i))
descriptors.push(await faceapi.computeFaceDescriptor(img))
}
return new faceapi.LabeledFaceDescriptors(
className,
descriptors
)
}
))
return new faceapi.FaceMatcher(labeledFaceDescriptors)
}
\ No newline at end of file
function getImageUri(imageName) {
return `images/${imageName}`
}
async function requestExternalImage(imageUrl) {
const res = await fetch('fetch_external_image', {
method: 'post',
headers: {
'content-type': 'application/json'
},
body: JSON.stringify({ imageUrl })
})
if (!(res.status < 400)) {
console.error(res.status + ' : ' + await res.text())
throw new Error('failed to fetch image from url: ' + imageUrl)
}
let blob
try {
blob = await res.blob()
return await faceapi.bufferToImage(blob)
} catch (e) {
console.error('received blob:', blob)
console.error('error:', e)
throw new Error('failed to load image from url: ' + imageUrl)
}
}
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
uri: 'face_and_landmark_detection',
name: 'Face And Landmark Detection'
},
{
uri: 'face_recognition',
name: 'Face Recognition'
},
{
uri: 'face_extraction',
name: 'Face Extraction'
},
{
uri: 'video_face_tracking',
name: 'Video Face Tracking'
},
{
uri: 'webcam_face_tracking',
name: 'Webcam Face Tracking'
},
{
uri: 'bbt_face_landmark_detection',
name: 'BBT Face Landmark Detection'
},
{
uri: 'bbt_face_similarity',
name: 'BBT Face Similarity'
},
{
uri: 'bbt_face_matching',
name: 'BBT Face Matching'
},
{
uri: 'bbt_face_recognition',
name: 'BBT Face Recognition'
},
{
uri: 'batch_face_landmarks',
name: 'Batch Face Landmark Detection'
},
{
uri: 'batch_face_recognition',
name: 'Batch Face Recognition'
}
]
const navbar = $(navbarId).get(0)
const pageContainer = $('.page-container').get(0)
const header = document.createElement('h3')
header.innerHTML = examples.find(ex => ex.uri === exampleUri).name
pageContainer.insertBefore(header, pageContainer.children[0])
const menuContent = document.createElement('ul')
menuContent.id = 'slide-out'
menuContent.classList.add('side-nav', 'fixed')
navbar.appendChild(menuContent)
const menuButton = document.createElement('a')
menuButton.href='#'
menuButton.classList.add('button-collapse', 'show-on-large')
menuButton.setAttribute('data-activates', 'slide-out')
const menuButtonIcon = document.createElement('img')
menuButtonIcon.src = 'menu_icon.png'
menuButton.appendChild(menuButtonIcon)
navbar.appendChild(menuButton)
const li = document.createElement('li')
const githubLink = document.createElement('a')
githubLink.classList.add('waves-effect', 'waves-light', 'side-by-side')
githubLink.id = 'github-link'
githubLink.href = 'https://github.com/justadudewhohacks/face-api.js'
const h5 = document.createElement('h5')
h5.innerHTML = 'face-api.js'
githubLink.appendChild(h5)
const githubLinkIcon = document.createElement('img')
githubLinkIcon.src = 'github_link_icon.png'
githubLink.appendChild(githubLinkIcon)
li.appendChild(githubLink)
menuContent.appendChild(li)
examples
.forEach(ex => {
const li = document.createElement('li')
if (ex.uri === exampleUri) {
li.style.background='#b0b0b0'
}
const a = document.createElement('a')
a.classList.add('waves-effect', 'waves-light')
a.href = ex.uri
const span = document.createElement('span')
span.innerHTML = ex.name
span.style.whiteSpace = 'nowrap'
a.appendChild(span)
li.appendChild(a)
menuContent.appendChild(li)
})
$('.button-collapse').sideNav({
menuWidth: 240
})
}
function renderSelectList(selectListId, onChange, initialValue, renderChildren) {
const select = document.createElement('select')
$(selectListId).get(0).appendChild(select)
renderChildren(select)
$(select).val(initialValue)
$(select).on('change', (e) => onChange(e.target.value))
$(select).material_select()
}
function renderOption(parent, text, value) {
const option = document.createElement('option')
option.innerHTML = text
option.value = value
parent.appendChild(option)
}
\ No newline at end of file
function resizeCanvasAndResults(dimensions, canvas, results) {
const { width, height } = dimensions instanceof HTMLVideoElement
? faceapi.getMediaDimensions(dimensions)
: dimensions
canvas.width = width
canvas.height = height
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return results.map(res => res.forSize(width, height))
}
function drawDetections(dimensions, canvas, detections) {
const resizedDetections = resizeCanvasAndResults(dimensions, canvas, detections)
faceapi.drawDetection(canvas, resizedDetections)
}
function drawLandmarks(dimensions, canvas, results, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection))
}
const faceLandmarks = resizedResults.map(det => det.landmarks)
const drawLandmarksOptions = {
lineWidth: 2,
drawLines: true,
color: 'green'
}
faceapi.drawLandmarks(canvas, faceLandmarks, drawLandmarksOptions)
}
\ No newline at end of file
const SSD_MOBILENETV1 = 'ssd_mobilenetv1'
const TINY_FACE_DETECTOR = 'tiny_face_detector'
const MTCNN = 'mtcnn'
let selectedFaceDetector = SSD_MOBILENETV1
// ssd_mobilenetv1 options
let minConfidence = 0.5
// tiny_face_detector options
let inputSize = 512
let scoreThreshold = 0.5
//mtcnn options
let minFaceSize = 20
function getFaceDetectorOptions() {
return selectedFaceDetector === SSD_MOBILENETV1
? new faceapi.SsdMobilenetv1Options({ minConfidence })
: (
selectedFaceDetector === TINY_FACE_DETECTOR
? new faceapi.TinyFaceDetectorOptions({ inputSize, scoreThreshold })
: new faceapi.MtcnnOptions({ minFaceSize })
)
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onInputSizeChanged(e) {
changeInputSize(e.target.value)
updateResults()
}
function changeInputSize(size) {
inputSize = parseInt(size)
const inputSizeSelect = $('#inputSize')
inputSizeSelect.val(inputSize)
inputSizeSelect.material_select()
}
function onIncreaseScoreThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseScoreThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 50)
$('#minFaceSize').val(minFaceSize)
}
function getCurrentFaceDetectionNet() {
if (selectedFaceDetector === SSD_MOBILENETV1) {
return faceapi.nets.ssdMobilenetv1
}
if (selectedFaceDetector === TINY_FACE_DETECTOR) {
return faceapi.nets.tinyFaceDetector
}
if (selectedFaceDetector === MTCNN) {
return faceapi.nets.mtcnn
}
}
function isFaceDetectionModelLoaded() {
return !!getCurrentFaceDetectionNet().params
}
async function changeFaceDetector(detector) {
['#ssd_mobilenetv1_controls', '#tiny_face_detector_controls', '#mtcnn_controls']
.forEach(id => $(id).hide())
selectedFaceDetector = detector
const faceDetectorSelect = $('#selectFaceDetector')
faceDetectorSelect.val(detector)
faceDetectorSelect.material_select()
$('#loader').show()
if (!isFaceDetectionModelLoaded()) {
await getCurrentFaceDetectionNet().load('/')
}
$(`#${detector}_controls`).show()
$('#loader').hide()
}
async function onSelectedFaceDetectorChanged(e) {
selectedFaceDetector = e.target.value
await changeFaceDetector(e.target.value)
updateResults()
}
function initFaceDetectionControls() {
const faceDetectorSelect = $('#selectFaceDetector')
faceDetectorSelect.val(selectedFaceDetector)
faceDetectorSelect.on('change', onSelectedFaceDetectorChanged)
faceDetectorSelect.material_select()
const inputSizeSelect = $('#inputSize')
inputSizeSelect.val(inputSize)
inputSizeSelect.on('change', onInputSizeChanged)
inputSizeSelect.material_select()
}
\ No newline at end of file
async function onSelectedImageChanged(uri) {
const img = await faceapi.fetchImage(uri)
$(`#inputImg`).get(0).src = img.src
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
function renderImageSelectList(selectListId, onChange, initialValue) {
const images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
function renderChildren(select) {
images.forEach(imageName =>
renderOption(
select,
imageName,
getImageUri(imageName)
)
)
}
renderSelectList(
selectListId,
onChange,
getImageUri(initialValue),
renderChildren
)
}
function initImageSelectionControls() {
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectedImageChanged(uri)
},
'bbt1.jpg'
)
onSelectedImageChanged($('#selectList select').val())
}
\ No newline at end of file
......@@ -3,7 +3,7 @@
right: 0;
margin: auto;
margin-top: 20px;
padding-left: 300px;
padding-left: 260px;
display: inline-flex !important;
}
......@@ -61,13 +61,7 @@
margin-bottom: 10px;
}
#overlay {
position: absolute;
top: 0;
left: 0;
}
.overlay {
#overlay, .overlay {
position: absolute;
top: 0;
left: 0;
......
......@@ -11,28 +11,20 @@ const viewsDir = path.join(__dirname, 'views')
app.use(express.static(viewsDir))
app.use(express.static(path.join(__dirname, './public')))
app.use(express.static(path.join(__dirname, '../weights')))
app.use(express.static(path.join(__dirname, '../weights_uncompressed')))
app.use(express.static(path.join(__dirname, '../dist')))
app.use(express.static(path.join(__dirname, './node_modules/axios/dist')))
app.get('/', (req, res) => res.redirect('/face_detection'))
app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetection.html')))
app.get('/face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetectionVideo.html')))
app.get('/', (req, res) => res.redirect('/face_and_landmark_detection'))
app.get('/face_and_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceAndLandmarkDetection.html')))
app.get('/face_extraction', (req, res) => res.sendFile(path.join(viewsDir, 'faceExtraction.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'faceSimilarity.html')))
app.get('/face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarks.html')))
app.get('/detect_and_draw_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawFaces.html')))
app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawLandmarks.html')))
app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html')))
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
app.get('/mtcnn_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetection.html')))
app.get('/mtcnn_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionVideo.html')))
app.get('/mtcnn_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionWebcam.html')))
app.get('/mtcnn_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognition.html')))
app.get('/mtcnn_face_recognition_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognitionWebcam.html')))
app.get('/tiny_yolov2_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetection.html')))
app.get('/tiny_yolov2_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionVideo.html')))
app.get('/tiny_yolov2_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionWebcam.html')))
app.get('/tiny_yolov2_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceRecognition.html')))
app.get('/video_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'videoFaceTracking.html')))
app.get('/webcam_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceTracking.html')))
app.get('/bbt_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceLandmarkDetection.html')))
app.get('/bbt_face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceSimilarity.html')))
app.get('/bbt_face_matching', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceMatching.html')))
app.get('/bbt_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceRecognition.html')))
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -97,7 +98,7 @@
.reduce((flat, arr) => flat.concat(arr))
images = await Promise.all(allImgUris.map(
async uri => faceapi.bufferToImage(await fetchImage(uri))
async uri => faceapi.fetchImage(uri)
))
// warmup
await measureTimings()
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -28,7 +29,7 @@
<div class="row side-by-side">
<div>
<label for="numImages">Num Images:</label>
<input id="numImages" type="text" class="bold" value="32"/>
<input id="numImages" type="text" class="bold" value="16"/>
</div>
<button
class="waves-effect waves-light btn"
......@@ -47,13 +48,12 @@
<script>
let images = []
let trainDescriptorsByClass = []
let descriptorsByFace = []
let numImages = 32
let faceMatcher = null
let numImages = 16
let maxDistance = 0.6
function onNumImagesChanged(e) {
const val = parseInt(e.target.value) || 32
const val = parseInt(e.target.value) || 16
numImages = Math.min(Math.max(val, 0), 32)
e.target.value = numImages
}
......@@ -67,15 +67,12 @@
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const x = 20, y = canvas.height - 20
faceapi.drawText(
canvas.getContext('2d'),
x,
y,
text,
faceMatcher.findBestMatch(descriptor).toString(),
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
}
......@@ -105,7 +102,7 @@
async function run() {
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
faceMatcher = await createBbtFaceMatcher(1)
$('#loader').hide()
const imgUris = classes
......@@ -114,7 +111,7 @@
.reduce((flat, arr) => flat.concat(arr))
images = await Promise.all(imgUris.map(
async uri => faceapi.bufferToImage(await fetchImage(uri))
async uri => faceapi.fetchImage(uri)
))
// warmup
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -46,8 +47,7 @@
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
currentImg = await faceapi.bufferToImage(imgBuf)
currentImg = await faceapi.fetchImage(uri)
landmarks = await faceapi.detectLandmarks(currentImg)
redraw()
}
......@@ -59,7 +59,7 @@
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_landmarks')
renderNavBar('#navbar', 'bbt_face_landmark_detection')
renderFaceImageSelectList(
'#selectList',
onSelectionChanged,
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div>
<div class="row center-content" id="loader">
<input disabled value="" id="status" type="text" class="bold">
<div class="progress">
<div class="indeterminate"></div>
</div>
</div>
<div class="row center-content">
<img id="face" src=""/>
</div>
<div class="row">
<label for="prediction">Prediction:</label>
<input disabled value="-" id="prediction" type="text" class="bold">
</div>
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
<div class="row">
<button
class="waves-effect waves-light btn"
id="stop"
onclick="onToggleStop()"
>
Stop
</button>
<button
class="waves-effect waves-light btn"
onclick="onSlower()"
>
<i class="material-icons left">-</i> Slower
</button>
<button
class="waves-effect waves-light btn"
onclick="onFaster()"
>
<i class="material-icons left">+</i> Faster
</button>
</div>
<div class="row">
<label for="interval">Interval:</label>
<input disabled value="2000" id="interval" type="text" class="bold">
</div>
</div>
</div>
<script>
let interval = 2000
let isStop = false
let faceMatcher = null
let currImageIdx = 2, currClassIdx = 0
let to = null
function onSlower() {
interval = Math.min(interval + 100, 2000)
$('#interval').val(interval)
}
function onFaster() {
interval = Math.max(interval - 100, 0)
$('#interval').val(interval)
}
function onToggleStop() {
clearTimeout(to)
isStop = !isStop
document.getElementById('stop').innerHTML = isStop ? 'Continue' : 'Stop'
setStatusText(isStop ? 'stopped' : 'running face recognition:')
if (!isStop) {
runFaceRecognition()
}
}
function setStatusText(text) {
$('#status').val(text)
}
function displayTimeStats(timeInMs) {
$('#time').val(`${timeInMs} ms`)
$('#fps').val(`${faceapi.round(1000 / timeInMs)}`)
}
function displayImage(src) {
getImg().src = src
}
async function runFaceRecognition() {
async function next() {
const input = await faceapi.fetchImage(getFaceImageUri(classes[currClassIdx], currImageIdx))
const imgEl = $('#face').get(0)
imgEl.src = input.src
const ts = Date.now()
const descriptor = await faceapi.computeFaceDescriptor(input)
displayTimeStats(Date.now() - ts)
const bestMatch = faceMatcher.findBestMatch(descriptor)
$('#prediction').val(bestMatch.toString())
currImageIdx = currClassIdx === (classes.length - 1)
? currImageIdx + 1
: currImageIdx
currClassIdx = (currClassIdx + 1) % classes.length
currImageIdx = (currImageIdx % 6) || 2
to = setTimeout(next, interval)
}
await next(0, 0)
}
async function run() {
try {
setStatusText('loading model file...')
await faceapi.loadFaceRecognitionModel('/')
setStatusText('computing initial descriptors...')
faceMatcher = await createBbtFaceMatcher(1)
$('#loader').hide()
runFaceRecognition()
} catch (err) {
console.error(err)
}
}
$(document).ready(function() {
renderNavBar('#navbar', 'bbt_face_matching')
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -52,8 +53,7 @@
}
async function onSelectionChanged(which, uri) {
const imgBuf = await fetchImage(uri)
const input = await faceapi.bufferToImage(imgBuf)
const input = await faceapi.fetchImage(uri)
const imgEl = $(`#face${which}`).get(0)
imgEl.src = input.src
descriptors[`desc${which}`] = await faceapi.computeFaceDescriptor(input)
......@@ -68,7 +68,7 @@
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_similarity')
renderNavBar('#navbar', 'bbt_face_similarity')
renderFaceImageSelectList(
'#selectList1',
async (uri) => {
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = await faceapi.toNetInput(inputImgEl)
const detections = await faceapi.locateFaces(input, minConfidence)
faceapi.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
const faceImages = await faceapi.extractFaces(inputImgEl, detections)
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_draw_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
let drawLines = true
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = await faceapi.toNetInput(inputImgEl)
const locations = await faceapi.locateFaces(input, minConfidence)
const faces = await faceapi.extractFaces(input, locations)
let landmarksByFace = await Promise.all(faces.map(face => faceapi.detectLandmarks(face)))
// shift and scale the face landmarks to the face image position in the canvas
landmarksByFace = landmarksByFace.map((landmarks, i) => {
const box = locations[i].forSize(width, height).getBox()
return landmarks.forSize(box.width, box.height).shift(box.x, box.y)
})
faceapi.drawLandmarks(canvas, landmarksByFace, { lineWidth: drawLines ? 2 : 4, drawLines, color: 'red' })
faceapi.drawDetection('overlay', locations.map(det => det.forSize(width, height)))
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
await faceapi.loadFaceLandmarkModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_draw_landmarks')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let maxDistance = 0.6
let minConfidence = 0.7
let useBatchProcessing = false
let detectionNet, recognitionNet, landmarkNet
let trainDescriptorsByClass = []
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence, useBatchProcessing))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadModels('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_recognize_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row">
<p>
<input type="checkbox" id="drawLinesCheckbox" onchange="onChangeUseMtcnn(event)" />
<label for="drawLinesCheckbox">Use Mtcnn</label>
</p>
</div>
</div>
<script>
let minConfidence = 0.7
let useMtcnn = false
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onChangeUseMtcnn(e) {
useMtcnn = $(e.target).prop('checked')
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function locateAndAlignFacesWithMtcnn(inputImgEl) {
const input = await faceapi.toNetInput(inputImgEl)
const results = await faceapi.mtcnn(input, { minFaceSize: 100 })
const unalignedFaceImages = await faceapi.extractFaces(input.getInput(0), results.map(res => res.faceDetection))
const alignedFaceBoxes = results
.filter(res => res.faceDetection.score > minConfidence)
.map(res => res.faceLandmarks.align())
const alignedFaceImages = await faceapi.extractFaces(input.getInput(0), alignedFaceBoxes)
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function locateAndAlignFacesWithSSD(inputImgEl) {
const input = await faceapi.toNetInput(inputImgEl)
const locations = await faceapi.locateFaces(input, minConfidence)
const unalignedFaceImages = await faceapi.extractFaces(input.getInput(0), locations)
// detect landmarks and get the aligned face image bounding boxes
const alignedFaceBoxes = await Promise.all(unalignedFaceImages.map(
async (faceCanvas, i) => {
const faceLandmarks = await faceapi.detectLandmarks(faceCanvas)
return faceLandmarks.align(locations[i])
}
))
const alignedFaceImages = await faceapi.extractFaces(input.getInput(0), alignedFaceBoxes)
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const {
unalignedFaceImages,
alignedFaceImages
} = useMtcnn
? await locateAndAlignFacesWithMtcnn(inputImgEl)
: await locateAndAlignFacesWithSSD(inputImgEl)
$('#facesContainer').empty()
unalignedFaceImages.forEach(async (faceCanvas, i) => {
$('#facesContainer').append(faceCanvas)
$('#facesContainer').append(alignedFaceImages[i])
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadMtcnnModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_alignment')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- image_selection_control -->
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<!-- image_selection_control -->
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="withFaceLandmarksCheckbox" onchange="onChangeWithFaceLandmarks(event)" />
<label for="withFaceLandmarksCheckbox">Detect Face Landmarks</label>
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
<!-- check boxes -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let withFaceLandmarks = false
let withBoxes = true
function onChangeWithFaceLandmarks(e) {
withFaceLandmarks = $(e.target).prop('checked')
updateResults()
}
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
updateResults()
}
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const faceDetectionTask = faceapi.detectAllFaces(inputImgEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
drawFunction(inputImgEl, $('#overlay').get(0), results, withBoxes)
}
async function run() {
// load face detection and face landmark models
await changeFaceDetector(SSD_MOBILENETV1)
await faceapi.loadFaceLandmarkModel('/')
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_and_landmark_detection')
initImageSelectionControls()
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
let result
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
result = await faceapi.locateFaces(inputImgEl, minConfidence)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let minConfidence = 0.7
let modelLoaded = false
let result
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const ts = Date.now()
result = await faceapi.locateFaces(videoEl, minConfidence)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_detection_video')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- image_selection_control -->
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<!-- image_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</div>
<script>
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const detections = await faceapi.detectAllFaces(inputImgEl, options)
const faceImages = await faceapi.extractFaces(inputImgEl, detections)
displayExtractedFaces(faceImages)
}
function displayExtractedFaces(faceImages) {
const canvas = $('#overlay').get(0)
const { width, height } = $('#inputImg').get(0)
canvas.width = width
canvas.height = height
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
}
async function run() {
// load face detection model
await changeFaceDetector(selectedFaceDetector)
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_extraction')
initImageSelectionControls()
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minFaceSize = 50
let scaleFactor = 0.709
let maxNumScales = 10
let stage1Threshold = 0.7
let stage2Threshold = 0.7
let stage3Threshold = 0.7
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize,
scaleFactor,
maxNumScales,
scoreThresholds: [stage1Threshold, stage2Threshold, stage3Threshold]
}
const results = await faceapi.mtcnn(inputImgEl, mtcnnParams)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadMtcnnModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="80" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 80
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 200)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 20)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const ts = Date.now()
const results = await faceapi.mtcnn(videoEl, mtcnnParams)
updateTimeStats(Date.now() - ts)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
if (faceDetection.score < minConfidence) {
return
}
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection_video')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="200" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 200
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 50), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 50), 50)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const { results, stats } = await faceapi.nets.mtcnn.forwardWithStats(videoEl, mtcnnParams)
updateTimeStats(stats.total)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
if (faceDetection.score < minConfidence) {
return
}
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
modelLoaded = true
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection_webcam')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="200" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 200
let maxDistance = 0.6
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 50), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 50), 50)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const ts = Date.now()
const fullFaceDescriptions = (await faceapi.allFacesMtcnn(videoEl, mtcnnParams))
.map(fd => fd.forSize(width, height))
updateTimeStats(Date.now() - ts)
fullFaceDescriptions.forEach(({ detection, landmarks, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
faceapi.drawLandmarks('overlay', landmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
await faceapi.loadFaceRecognitionModel('/')
// init reference data, e.g. compute a face descriptor for each class
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet)
modelLoaded = true
// try to access users webcam and stream the images
// to the video element
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_recognition_webcam')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = 'lg'
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const detections = await faceapi.tinyYolov2(inputImgEl, forwardParams)
faceapi.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = 'md'
let modelLoaded = false
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const ts = Date.now()
result = await faceapi.tinyYolov2(videoEl, forwardParams)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection_video')
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = '160'
let modelLoaded = false
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: parseInt(sizeType),
scoreThreshold
}
const ts = Date.now()
result = await faceapi.tinyYolov2(videoEl, forwardParams)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
modelLoaded = true
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection_webcam')
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let maxDistance = 0.6
let useBatchProcessing = false
let trainDescriptorsByClass = []
let scoreThreshold = 0.5
let sizeType = 'lg'
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const fullFaceDescriptions = (await faceapi.allFacesTinyYolov2(inputImgEl, forwardParams, useBatchProcessing))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_recognition')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" id="inputVideo" autoplay muted loop></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="withFaceLandmarksCheckbox" onchange="onChangeWithFaceLandmarks(event)" />
<label for="withFaceLandmarksCheckbox">Detect Face Landmarks</label>
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
<!-- check boxes -->
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div>
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
let withFaceLandmarks = false
let withBoxes = true
function onChangeWithFaceLandmarks(e) {
withFaceLandmarks = $(e.target).prop('checked')
}
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(!videoEl.currentTime || videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay(videoEl))
const options = getFaceDetectorOptions()
const ts = Date.now()
const faceDetectionTask = faceapi.detectAllFaces(videoEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
drawFunction(videoEl, $('#overlay').get(0), results, withBoxes)
setTimeout(() => onPlay(videoEl))
}
async function run() {
// load face detection and face landmark models
await changeFaceDetector(TINY_FACE_DETECTOR)
await faceapi.loadFaceLandmarkModel('/')
changeInputSize(416)
// start processing frames
onPlay($('#inputVideo').get(0))
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'video_face_tracking')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="withFaceLandmarksCheckbox" onchange="onChangeWithFaceLandmarks(event)" />
<label for="withFaceLandmarksCheckbox">Detect Face Landmarks</label>
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
<!-- check boxes -->
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div>
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="128">128 x 128</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
let withFaceLandmarks = false
let withBoxes = true
function onChangeWithFaceLandmarks(e) {
withFaceLandmarks = $(e.target).prop('checked')
}
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
const videoEl = $('#inputVideo').get(0)
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay())
const options = getFaceDetectorOptions()
const ts = Date.now()
const faceDetectionTask = faceapi.detectSingleFace(videoEl, options)
const result = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
if (result) {
drawFunction(videoEl, $('#overlay').get(0), [result], withBoxes)
}
setTimeout(() => onPlay())
}
async function run() {
// load face detection and face landmark models
await changeFaceDetector(TINY_FACE_DETECTOR)
await faceapi.loadFaceLandmarkModel('/')
changeInputSize(128)
// try to access users webcam and stream the images
// to the video element
const stream = await navigator.mediaDevices.getUserMedia({ video: {} })
const videoEl = $('#inputVideo').get(0)
videoEl.srcObject = stream
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_face_tracking')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -14,6 +14,20 @@ const dataFiles = [
nocache: false
}))
const exclude = process.env.UUT
? [
'dom',
'faceLandmarkNet',
'faceRecognitionNet',
'ssdMobilenetv1',
'tinyFaceDetector',
'mtcnn',
'tinyYolov2'
]
.filter(ex => ex !== process.env.UUT)
.map(ex => `test/tests/${ex}/*.ts`)
: []
module.exports = function(config) {
config.set({
frameworks: ['jasmine', 'karma-typescript'],
......@@ -21,6 +35,7 @@ module.exports = function(config) {
'src/**/*.ts',
'test/**/*.ts'
].concat(dataFiles),
exclude,
preprocessors: {
'**/*.ts': ['karma-typescript']
},
......
......@@ -4903,21 +4903,21 @@
}
},
"tfjs-image-recognition-base": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.1.2.tgz",
"integrity": "sha512-+mnRdQ6IxA2q2nTIRmwImmUidgE9qhhWOXVCPvgnA/g52CXeydgT4NpOHHCcteWgqwg5q6ju1p0epWKgP1k/dg==",
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/tfjs-image-recognition-base/-/tfjs-image-recognition-base-0.1.3.tgz",
"integrity": "sha512-Vo1arsSkOxtlBedWxw7w2V/mbpp70izAJPu0Cl6WE62ZJ0kLL6TmFphGAr3zKaqrZ0VOyADVedDqFic3aH84RQ==",
"requires": {
"@tensorflow/tfjs-core": "0.13.2",
"tslib": "1.9.3"
}
},
"tfjs-tiny-yolov2": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/tfjs-tiny-yolov2/-/tfjs-tiny-yolov2-0.1.3.tgz",
"integrity": "sha512-oY77SnFFzOizXRYPfRcHO9LuqB+dJkXvOBZz6b04E5HrJvVkCJu/MDJBIHuMdbe2TuVmPIhcX+Yr1bqLp3gXKw==",
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/tfjs-tiny-yolov2/-/tfjs-tiny-yolov2-0.2.1.tgz",
"integrity": "sha512-HSdBu6dMyQdtueY32wSO+5IajXDrvu7MufvUBaLD0CubKbfJWM1JogsONUkvp3N948UAI2/K35p9+eEP2woXpw==",
"requires": {
"@tensorflow/tfjs-core": "0.13.2",
"tfjs-image-recognition-base": "0.1.2",
"tfjs-image-recognition-base": "0.1.3",
"tslib": "1.9.3"
}
},
......
......@@ -11,7 +11,14 @@
"tsc": "tsc",
"tsc-es6": "tsc --p tsconfig.es6.json",
"build": "npm run rollup && npm run rollup-min && npm run tsc && npm run tsc-es6",
"test": "karma start"
"test": "karma start",
"test-facelandmarknets": "set UUT=faceLandmarkNet&& karma start",
"test-facerecognitionnet": "set UUT=faceRecognitionNet&& karma start",
"test-ssdmobilenetv1": "set UUT=ssdMobilenetv1&& karma start",
"test-tinyfacedetector": "set UUT=tinyFaceDetector&& karma start",
"test-mtcnn": "set UUT=mtcnn&& karma start",
"test-tinyyolov2": "set UUT=tinyYolov2&& karma start",
"docs": "typedoc --options ./typedoc.config.js ./src"
},
"keywords": [
"face",
......@@ -24,8 +31,8 @@
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "^0.13.2",
"tfjs-image-recognition-base": "^0.1.2",
"tfjs-tiny-yolov2": "^0.1.3",
"tfjs-image-recognition-base": "^0.1.3",
"tfjs-tiny-yolov2": "^0.2.1",
"tslib": "^1.9.3"
},
"devDependencies": {
......
import { Point, Rect, TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { TinyYolov2 } from '.';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks68 } from './classes/FaceLandmarks68';
import { FullFaceDescription } from './classes/FullFaceDescription';
import { extractFaces } from './dom';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmark68Net } from './faceLandmarkNet/FaceLandmark68Net';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams } from './mtcnn/types';
function computeDescriptorsFactory(
recognitionNet: FaceRecognitionNet
) {
return async function(input: TNetInput, alignedFaceBoxes: Rect[], useBatchProcessing: boolean) {
const alignedFaceCanvases = await extractFaces(input, alignedFaceBoxes)
const descriptors = useBatchProcessing
? await recognitionNet.computeFaceDescriptor(alignedFaceCanvases) as Float32Array[]
: await Promise.all(alignedFaceCanvases.map(
canvas => recognitionNet.computeFaceDescriptor(canvas)
)) as Float32Array[]
return descriptors
}
}
function allFacesFactory(
detectFaces: (input: TNetInput) => Promise<FaceDetection[]>,
landmarkNet: FaceLandmark68Net,
recognitionNet: FaceRecognitionNet
) {
const computeDescriptors = computeDescriptorsFactory(recognitionNet)
return async function(
input: TNetInput,
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const detections = await detectFaces(input)
const faceCanvases = await extractFaces(input, detections)
const faceLandmarksByFace = useBatchProcessing
? await landmarkNet.detectLandmarks(faceCanvases) as FaceLandmarks68[]
: await Promise.all(faceCanvases.map(
canvas => landmarkNet.detectLandmarks(canvas)
)) as FaceLandmarks68[]
const alignedFaceBoxes = faceLandmarksByFace.map(
(landmarks, i) => landmarks.align(detections[i].getBox())
)
const descriptors = await computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)
return detections.map((detection, i) =>
new FullFaceDescription(
detection,
faceLandmarksByFace[i].shiftByPoint<FaceLandmarks68>(
new Point(detection.box.x, detection.box.y)
),
descriptors[i]
)
)
}
}
export function allFacesSsdMobilenetv1Factory(
ssdMobilenetv1: FaceDetectionNet,
landmarkNet: FaceLandmark68Net,
recognitionNet: FaceRecognitionNet
) {
return async function(
input: TNetInput,
minConfidence: number = 0.8,
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const detectFaces = (input: TNetInput) => ssdMobilenetv1.locateFaces(input, minConfidence)
const allFaces = allFacesFactory(detectFaces, landmarkNet, recognitionNet)
return allFaces(input, useBatchProcessing)
}
}
export function allFacesTinyYolov2Factory(
tinyYolov2: TinyYolov2,
landmarkNet: FaceLandmark68Net,
recognitionNet: FaceRecognitionNet
) {
return async function(
input: TNetInput,
forwardParams: TinyYolov2Types.TinyYolov2ForwardParams = {},
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const detectFaces = (input: TNetInput) => tinyYolov2.locateFaces(input, forwardParams)
const allFaces = allFacesFactory(detectFaces, landmarkNet, recognitionNet)
return allFaces(input, useBatchProcessing)
}
}
export function allFacesMtcnnFactory(
mtcnn: Mtcnn,
recognitionNet: FaceRecognitionNet
) {
const computeDescriptors = computeDescriptorsFactory(recognitionNet)
return async function(
input: TNetInput,
mtcnnForwardParams: MtcnnForwardParams = {},
useBatchProcessing: boolean = false
): Promise<FullFaceDescription[]> {
const results = await mtcnn.forward(input, mtcnnForwardParams)
const alignedFaceBoxes = results.map(
({ faceLandmarks }) => faceLandmarks.align()
)
const descriptors = await computeDescriptors(input, alignedFaceBoxes, useBatchProcessing)
return results.map(({ faceDetection, faceLandmarks }, i) =>
new FullFaceDescription(
faceDetection,
faceLandmarks,
descriptors[i]
)
)
}
}
\ No newline at end of file
import { Dimensions, ObjectDetection, Rect } from 'tfjs-image-recognition-base';
import { Box, IDimensions, ObjectDetection, Rect } from 'tfjs-image-recognition-base';
export class FaceDetection extends ObjectDetection {
export interface IFaceDetecion {
score: number
box: Box
}
export class FaceDetection extends ObjectDetection implements IFaceDetecion {
constructor(
score: number,
relativeBox: Rect,
imageDims: Dimensions
imageDims: IDimensions
) {
super(score, score, '', relativeBox, imageDims)
}
......
import { FaceDetection } from './FaceDetection';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export interface IFaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> {
detection: FaceDetection,
landmarks: TFaceLandmarks
}
export class FaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
implements IFaceDetectionWithLandmarks<TFaceLandmarks> {
private _detection: FaceDetection
private _unshiftedLandmarks: TFaceLandmarks
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks
) {
this._detection = detection
this._unshiftedLandmarks = unshiftedLandmarks
}
public get detection(): FaceDetection { return this._detection }
public get unshiftedLandmarks(): TFaceLandmarks { return this._unshiftedLandmarks }
public get alignedRect(): FaceDetection {
const rect = this.landmarks.align()
const { imageDims } = this.detection
return new FaceDetection(this._detection.score, rect.rescale(imageDims.reverse()), imageDims)
}
public get landmarks(): TFaceLandmarks {
const { x, y } = this.detection.box
return this._unshiftedLandmarks.shiftBy(x, y)
}
// aliases for backward compatibily
get faceDetection(): FaceDetection { return this.detection }
get faceLandmarks(): TFaceLandmarks { return this.landmarks }
public forSize(width: number, height: number): FaceDetectionWithLandmarks<TFaceLandmarks> {
const resizedDetection = this._detection.forSize(width, height)
const resizedLandmarks = this._unshiftedLandmarks.forSize<TFaceLandmarks>(resizedDetection.box.width, resizedDetection.box.height)
return new FaceDetectionWithLandmarks<TFaceLandmarks>(resizedDetection, resizedLandmarks)
}
}
\ No newline at end of file
import { Dimensions, getCenterPoint, Point, Rect } from 'tfjs-image-recognition-base';
import { Dimensions, getCenterPoint, IDimensions, Point, Rect } from 'tfjs-image-recognition-base';
import { FaceDetection } from './FaceDetection';
......@@ -7,65 +7,56 @@ const relX = 0.5
const relY = 0.43
const relScale = 0.45
export class FaceLandmarks {
protected _imageWidth: number
protected _imageHeight: number
export interface IFaceLandmarks {
positions: Point[]
shift: Point
}
export class FaceLandmarks implements IFaceLandmarks {
protected _shift: Point
protected _faceLandmarks: Point[]
protected _positions: Point[]
protected _imgDims: Dimensions
constructor(
relativeFaceLandmarkPositions: Point[],
imageDims: Dimensions,
imgDims: IDimensions,
shift: Point = new Point(0, 0)
) {
const { width, height } = imageDims
this._imageWidth = width
this._imageHeight = height
const { width, height } = imgDims
this._imgDims = new Dimensions(width, height)
this._shift = shift
this._faceLandmarks = relativeFaceLandmarkPositions.map(
this._positions = relativeFaceLandmarkPositions.map(
pt => pt.mul(new Point(width, height)).add(shift)
)
}
public getShift(): Point {
return new Point(this._shift.x, this._shift.y)
}
public getImageWidth(): number {
return this._imageWidth
}
public getImageHeight(): number {
return this._imageHeight
}
public getPositions(): Point[] {
return this._faceLandmarks
}
public getRelativePositions(): Point[] {
return this._faceLandmarks.map(
pt => pt.sub(this._shift).div(new Point(this._imageWidth, this._imageHeight))
public get shift(): Point { return new Point(this._shift.x, this._shift.y) }
public get imageWidth(): number { return this._imgDims.width }
public get imageHeight(): number { return this._imgDims.height }
public get positions(): Point[] { return this._positions }
public get relativePositions(): Point[] {
return this._positions.map(
pt => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight))
)
}
public forSize<T extends FaceLandmarks>(width: number, height: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
this.relativePositions,
{ width, height }
)
}
public shift<T extends FaceLandmarks>(x: number, y: number): T {
public shiftBy<T extends FaceLandmarks>(x: number, y: number): T {
return new (this.constructor as any)(
this.getRelativePositions(),
{ width: this._imageWidth, height: this._imageHeight },
this.relativePositions,
this._imgDims,
new Point(x, y)
)
}
public shiftByPoint<T extends FaceLandmarks>(pt: Point): T {
return this.shift(pt.x, pt.y)
return this.shiftBy(pt.x, pt.y)
}
/**
......@@ -84,10 +75,10 @@ export class FaceLandmarks {
): Rect {
if (detection) {
const box = detection instanceof FaceDetection
? detection.getBox().floor()
? detection.box.floor()
: detection
return this.shift(box.x, box.y).align()
return this.shiftBy(box.x, box.y).align()
}
const centers = this.getRefPointsForAlignment()
......@@ -103,7 +94,7 @@ export class FaceLandmarks {
const x = Math.floor(Math.max(0, refPoint.x - (relX * size)))
const y = Math.floor(Math.max(0, refPoint.y - (relY * size)))
return new Rect(x, y, Math.min(size, this._imageWidth + x), Math.min(size, this._imageHeight + y))
return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y))
}
protected getRefPointsForAlignment(): Point[] {
......
......@@ -5,7 +5,7 @@ import { FaceLandmarks } from './FaceLandmarks';
export class FaceLandmarks5 extends FaceLandmarks {
protected getRefPointsForAlignment(): Point[] {
const pts = this.getPositions()
const pts = this.positions
return [
pts[0],
pts[1],
......
......@@ -2,34 +2,33 @@ import { getCenterPoint, Point } from 'tfjs-image-recognition-base';
import { FaceLandmarks } from '../classes/FaceLandmarks';
export class FaceLandmarks68 extends FaceLandmarks {
public getJawOutline(): Point[] {
return this._faceLandmarks.slice(0, 17)
return this.positions.slice(0, 17)
}
public getLeftEyeBrow(): Point[] {
return this._faceLandmarks.slice(17, 22)
return this.positions.slice(17, 22)
}
public getRightEyeBrow(): Point[] {
return this._faceLandmarks.slice(22, 27)
return this.positions.slice(22, 27)
}
public getNose(): Point[] {
return this._faceLandmarks.slice(27, 36)
return this.positions.slice(27, 36)
}
public getLeftEye(): Point[] {
return this._faceLandmarks.slice(36, 42)
return this.positions.slice(36, 42)
}
public getRightEye(): Point[] {
return this._faceLandmarks.slice(42, 48)
return this.positions.slice(42, 48)
}
public getMouth(): Point[] {
return this._faceLandmarks.slice(48, 68)
return this.positions.slice(48, 68)
}
protected getRefPointsForAlignment(): Point[] {
......
import { round } from 'tfjs-image-recognition-base';
export interface IFaceMatch {
label: string
distance: number
}
export class FaceMatch implements IFaceMatch {
private _label: string
private _distance: number
constructor(label: string, distance: number) {
this._label = label
this._distance = distance
}
public get label(): string { return this._label }
public get distance(): number { return this._distance }
public toString(withDistance: boolean = true): string {
return `${this.label}${withDistance ? ` (${round(this.distance)})` : ''}`
}
}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceDetectionWithLandmarks, IFaceDetectionWithLandmarks } from './FaceDetectionWithLandmarks';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export class FullFaceDescription {
constructor(
private _detection: FaceDetection,
private _landmarks: FaceLandmarks,
private _descriptor: Float32Array
) {}
export interface IFullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends IFaceDetectionWithLandmarks<TFaceLandmarks> {
public get detection(): FaceDetection {
return this._detection
}
detection: FaceDetection,
landmarks: TFaceLandmarks,
descriptor: Float32Array
}
export class FullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends FaceDetectionWithLandmarks<TFaceLandmarks>
implements IFullFaceDescription<TFaceLandmarks> {
public get landmarks(): FaceLandmarks {
return this._landmarks
private _descriptor: Float32Array
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks,
descriptor: Float32Array
) {
super(detection, unshiftedLandmarks)
this._descriptor = descriptor
}
public get descriptor(): Float32Array {
return this._descriptor
}
public forSize(width: number, height: number): FullFaceDescription {
return new FullFaceDescription(
this._detection.forSize(width, height),
this._landmarks.forSize(width, height),
this._descriptor
)
public forSize(width: number, height: number): FullFaceDescription<TFaceLandmarks> {
const { detection, landmarks } = super.forSize(width, height)
return new FullFaceDescription<TFaceLandmarks>(detection, landmarks, this.descriptor)
}
}
\ No newline at end of file
export class LabeledFaceDescriptors {
private _label: string
private _descriptors: Float32Array[]
constructor(label: string, descriptors: Float32Array[]) {
if (!(typeof label === 'string')) {
throw new Error('LabeledFaceDescriptors - constructor expected label to be a string')
}
if (!Array.isArray(descriptors) || descriptors.some(desc => !(desc instanceof Float32Array))) {
throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array')
}
this._label = label
this._descriptors = descriptors
}
public get label(): string { return this._label }
public get descriptors(): Float32Array[] { return this._descriptors }
}
\ No newline at end of file
export * from './FaceDetection';
export * from './FaceDetectionWithLandmarks';
export * from './FaceLandmarks';
export * from './FaceLandmarks5';
export * from './FaceLandmarks68';
export * from './FullFaceDescription';
\ No newline at end of file
export * from './FaceMatch';
export * from './FullFaceDescription';
export * from './LabeledFaceDescriptors';
\ No newline at end of file
......@@ -44,6 +44,6 @@ export function drawLandmarks(
// else draw points
const ptOffset = lineWidth / 2
ctx.fillStyle = color
landmarks.getPositions().forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
landmarks.positions.forEach(pt => ctx.fillRect(pt.x - ptOffset, pt.y - ptOffset, lineWidth, lineWidth))
})
}
\ No newline at end of file
......@@ -27,7 +27,7 @@ export async function extractFaceTensors(
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(imgWidth, imgHeight).getBox()
? det.forSize(imgWidth, imgHeight).box
: det
)
.map(box => box.clipAtImageBorders(imgWidth, imgHeight))
......
......@@ -39,7 +39,7 @@ export async function extractFaces(
const ctx = getContext2dOrThrow(canvas)
const boxes = detections.map(
det => det instanceof FaceDetection
? det.forSize(canvas.width, canvas.height).getBox().floor()
? det.forSize(canvas.width, canvas.height).box.floor()
: det
)
.map(box => box.clipAtImageBorders(canvas.width, canvas.height))
......
import { FaceDetectionNet } from './FaceDetectionNet';
export * from './FaceDetectionNet';
export function createFaceDetectionNet(weights: Float32Array) {
const net = new FaceDetectionNet()
net.extractWeights(weights)
return net
}
export function faceDetectionNet(weights: Float32Array) {
console.warn('faceDetectionNet(weights: Float32Array) will be deprecated in future, use createFaceDetectionNet instead')
return createFaceDetectionNet(weights)
}
\ No newline at end of file
......@@ -38,7 +38,7 @@ function denseBlock(
export class FaceLandmark68Net extends FaceLandmark68NetBase<NetParams> {
constructor() {
super('FaceLandmark68LargeNet')
super('FaceLandmark68Net')
}
public runNet(input: NetInput): tf.Tensor2D {
......@@ -46,7 +46,7 @@ export class FaceLandmark68Net extends FaceLandmark68NetBase<NetParams> {
const { params } = this
if (!params) {
throw new Error('FaceLandmark68LargeNet - load model before inference')
throw new Error('FaceLandmark68Net - load model before inference')
}
return tf.tidy(() => {
......
import * as tf from '@tensorflow/tfjs-core';
import { isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput, Dimensions } from 'tfjs-image-recognition-base';
import { IDimensions, isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
......@@ -17,7 +17,7 @@ export class FaceLandmark68NetBase<NetParams> extends NeuralNetwork<NetParams> {
throw new Error(`${this.__name} - runNet not implemented`)
}
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: Dimensions[]): tf.Tensor2D {
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {
const inputDimensions = originalDimensions.map(({ width, height }) => {
const scale = inputSize / Math.max(height, width)
......
......@@ -9,9 +9,4 @@ export function createFaceLandmarkNet(weights: Float32Array) {
const net = new FaceLandmarkNet()
net.extractWeights(weights)
return net
}
export function faceLandmarkNet(weights: Float32Array) {
console.warn('faceLandmarkNet(weights: Float32Array) will be deprecated in future, use createFaceLandmarkNet instead')
return createFaceLandmarkNet(weights)
}
\ No newline at end of file
......@@ -6,9 +6,4 @@ export function createFaceRecognitionNet(weights: Float32Array) {
const net = new FaceRecognitionNet()
net.extractWeights(weights)
return net
}
export function faceRecognitionNet(weights: Float32Array) {
console.warn('faceRecognitionNet(weights: Float32Array) will be deprecated in future, use createFaceRecognitionNet instead')
return createFaceRecognitionNet(weights)
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Types } from 'tfjs-tiny-yolov2';
import { allFacesMtcnnFactory, allFacesSsdMobilenetv1Factory, allFacesTinyYolov2Factory } from './allFacesFactory';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks68 } from './classes/FaceLandmarks68';
import { FullFaceDescription } from './classes/FullFaceDescription';
import { FaceDetectionNet } from './faceDetectionNet/FaceDetectionNet';
import { FaceLandmark68Net } from './faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from './faceLandmarkNet/FaceLandmark68TinyNet';
import { FaceRecognitionNet } from './faceRecognitionNet/FaceRecognitionNet';
import { Mtcnn } from './mtcnn/Mtcnn';
import { MtcnnForwardParams, MtcnnResult } from './mtcnn/types';
import { TinyYolov2 } from './tinyYolov2/TinyYolov2';
export const detectionNet = new FaceDetectionNet()
export const landmarkNet = new FaceLandmark68Net()
export const recognitionNet = new FaceRecognitionNet()
// nets need more specific names, to avoid ambiguity in future
// when alternative net implementations are provided
export const nets = {
ssdMobilenetv1: detectionNet,
faceLandmark68Net: landmarkNet,
faceLandmark68TinyNet: new FaceLandmark68TinyNet(),
faceRecognitionNet: recognitionNet,
mtcnn: new Mtcnn(),
tinyYolov2: new TinyYolov2()
}
export function loadSsdMobilenetv1Model(url: string) {
return nets.ssdMobilenetv1.load(url)
}
export function loadFaceLandmarkModel(url: string) {
return nets.faceLandmark68Net.load(url)
}
export function loadFaceLandmarkTinyModel(url: string) {
return nets.faceLandmark68TinyNet.load(url)
}
export function loadFaceRecognitionModel(url: string) {
return nets.faceRecognitionNet.load(url)
}
export function loadMtcnnModel(url: string) {
return nets.mtcnn.load(url)
}
export function loadTinyYolov2Model(url: string) {
return nets.tinyYolov2.load(url)
}
export function loadFaceDetectionModel(url: string) {
return loadSsdMobilenetv1Model(url)
}
export function loadModels(url: string) {
console.warn('loadModels will be deprecated in future')
return Promise.all([
loadSsdMobilenetv1Model(url),
loadFaceLandmarkModel(url),
loadFaceRecognitionModel(url),
loadMtcnnModel(url),
loadTinyYolov2Model(url)
])
}
export function locateFaces(
input: TNetInput,
minConfidence?: number,
maxResults?: number
): Promise<FaceDetection[]> {
return nets.ssdMobilenetv1.locateFaces(input, minConfidence, maxResults)
}
export const ssdMobilenetv1 = locateFaces
export function detectLandmarks(
input: TNetInput
): Promise<FaceLandmarks68 | FaceLandmarks68[]> {
return nets.faceLandmark68Net.detectLandmarks(input)
}
export function detectLandmarksTiny(
input: TNetInput
): Promise<FaceLandmarks68 | FaceLandmarks68[]> {
return nets.faceLandmark68TinyNet.detectLandmarks(input)
}
export function computeFaceDescriptor(
input: TNetInput
): Promise<Float32Array | Float32Array[]> {
return nets.faceRecognitionNet.computeFaceDescriptor(input)
}
export function mtcnn(
input: TNetInput,
forwardParams: MtcnnForwardParams
): Promise<MtcnnResult[]> {
return nets.mtcnn.forward(input, forwardParams)
}
export function tinyYolov2(
input: TNetInput,
forwardParams: TinyYolov2Types.TinyYolov2ForwardParams
): Promise<FaceDetection[]> {
return nets.tinyYolov2.locateFaces(input, forwardParams)
}
export type allFacesSsdMobilenetv1Function = (
input: tf.Tensor | NetInput | TNetInput,
minConfidence?: number,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]>
export const allFacesSsdMobilenetv1: allFacesSsdMobilenetv1Function = allFacesSsdMobilenetv1Factory(
nets.ssdMobilenetv1,
nets.faceLandmark68Net,
nets.faceRecognitionNet
)
export type allFacesTinyYolov2Function = (
input: tf.Tensor | NetInput | TNetInput,
forwardParams?: TinyYolov2Types.TinyYolov2ForwardParams,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]>
export const allFacesTinyYolov2: allFacesTinyYolov2Function = allFacesTinyYolov2Factory(
nets.tinyYolov2,
nets.faceLandmark68Net,
nets.faceRecognitionNet
)
export type allFacesMtcnnFunction = (
input: tf.Tensor | NetInput | TNetInput,
mtcnnForwardParams?: MtcnnForwardParams,
useBatchProcessing?: boolean
) => Promise<FullFaceDescription[]>
export const allFacesMtcnn: allFacesMtcnnFunction = allFacesMtcnnFactory(
nets.mtcnn,
nets.faceRecognitionNet
)
export const allFaces = allFacesSsdMobilenetv1
export class ComposableTask<T> {
public async then(
onfulfilled: (value: T) => T | PromiseLike<T>
): Promise<T> {
return onfulfilled(await this.run())
}
public async run(): Promise<T> {
throw new Error('ComposableTask - run is not implemented')
}
}
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { extractFaces } from '../dom';
import { ComposableTask } from './ComposableTask';
import { nets } from './nets';
export class ComputeFaceDescriptorsTaskBase<TReturn, DetectFaceLandmarksReturnType> extends ComposableTask<TReturn> {
constructor(
protected detectFaceLandmarksTask: ComposableTask<DetectFaceLandmarksReturnType> | Promise<DetectFaceLandmarksReturnType>,
protected input: TNetInput
) {
super()
}
}
export class ComputeAllFaceDescriptorsTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription[], FaceDetectionWithLandmarks[]> {
public async run(): Promise<FullFaceDescription[]> {
const facesWithLandmarks = await this.detectFaceLandmarksTask
const alignedFaceCanvases = await extractFaces(
this.input,
facesWithLandmarks.map(({ landmarks }) => landmarks.align())
)
return await Promise.all(facesWithLandmarks.map(async ({ detection, landmarks }, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvases[i]) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor)
}))
}
}
export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription | undefined, FaceDetectionWithLandmarks | undefined> {
public async run(): Promise<FullFaceDescription | undefined> {
const detectionWithLandmarks = await this.detectFaceLandmarksTask
if (!detectionWithLandmarks) {
return
}
const { detection, landmarks, alignedRect } = detectionWithLandmarks
const alignedFaceCanvas = (await extractFaces(this.input, [alignedRect]))[0]
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaceCanvas) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor)
}
}
\ No newline at end of file
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { extractFaces } from '../dom';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { ComposableTask } from './ComposableTask';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { nets } from './nets';
export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> extends ComposableTask<ReturnType> {
constructor(
protected detectFacesTask: ComposableTask<DetectFacesReturnType> | Promise<DetectFacesReturnType>,
protected input: TNetInput,
protected useTinyLandmarkNet: boolean
) {
super()
}
protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet {
return this.useTinyLandmarkNet
? nets.faceLandmark68TinyNet
: nets.faceLandmark68Net
}
}
export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks[], FaceDetection[]> {
public async run(): Promise<FaceDetectionWithLandmarks[]> {
const detections = await this.detectFacesTask
const faceCanvases = await extractFaces(this.input, detections)
const faceLandmarksByFace = await Promise.all(faceCanvases.map(
canvas => this.landmarkNet.detectLandmarks(canvas)
)) as FaceLandmarks68[]
return detections.map((detection, i) =>
new FaceDetectionWithLandmarks(detection, faceLandmarksByFace[i])
)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask {
return new ComputeAllFaceDescriptorsTask(this, this.input)
}
}
export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks | undefined, FaceDetection | undefined> {
public async run(): Promise<FaceDetectionWithLandmarks | undefined> {
const detection = await this.detectFacesTask
if (!detection) {
return
}
const faceCanvas = (await extractFaces(this.input, [detection]))[0]
return new FaceDetectionWithLandmarks(
detection,
await this.landmarkNet.detectLandmarks(faceCanvas) as FaceLandmarks68
)
}
withFaceDescriptor(): ComputeSingleFaceDescriptorTask {
return new ComputeSingleFaceDescriptorTask(this, this.input)
}
}
\ No newline at end of file
import { TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
import { FaceDetectionOptions } from './types';
export function detectSingleFace(
input: TNetInput,
options: FaceDetectionOptions = new SsdMobilenetv1Options()
): DetectSingleFaceTask {
return new DetectSingleFaceTask(input, options)
}
export function detectAllFaces(
input: TNetInput,
options: FaceDetectionOptions = new SsdMobilenetv1Options()
): DetectAllFacesTask {
return new DetectAllFacesTask(input, options)
}
export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
constructor(
protected input: TNetInput,
protected options: FaceDetectionOptions = new SsdMobilenetv1Options()
) {
super()
}
}
export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
public async run(): Promise<FaceDetection[]> {
const { input, options } = this
if (options instanceof MtcnnOptions) {
return (await nets.mtcnn.forward(input, options))
.map(result => result.faceDetection)
}
const faceDetectionFunction = options instanceof TinyFaceDetectorOptions
? (input: TNetInput) => nets.tinyFaceDetector.locateFaces(input, options)
: (
options instanceof SsdMobilenetv1Options
? (input: TNetInput) => nets.ssdMobilenetv1.locateFaces(input, options)
: (
options instanceof TinyYolov2Options
? (input: TNetInput) => nets.tinyYolov2.locateFaces(input, options)
: null
)
)
if (!faceDetectionFunction) {
throw new Error('detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options')
}
return faceDetectionFunction(input)
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask {
return new DetectAllFaceLandmarksTask(this, this.input, useTinyLandmarkNet)
}
}
export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | undefined> {
public async run(): Promise<FaceDetection | undefined> {
return (await new DetectAllFacesTask(this.input, this.options))
.sort((f1, f2) => f1.score - f2.score)[0]
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask {
return new DetectSingleFaceLandmarksTask(this, this.input, useTinyLandmarkNet)
}
}
\ No newline at end of file
import { FaceMatch } from '../classes/FaceMatch';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors';
import { euclideanDistance } from '../euclideanDistance';
export class FaceMatcher {
private _labeledDescriptors: LabeledFaceDescriptors[]
private _distanceThreshold: number
constructor(
inputs: LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>,
distanceThreshold: number = 0.6
) {
this._distanceThreshold = distanceThreshold
const inputArray = Array.isArray(inputs) ? inputs : [inputs]
if (!inputArray.length) {
throw new Error(`FaceRecognizer.constructor - expected atleast one input`)
}
let count = 1
const createUniqueLabel = () => `person ${count++}`
this._labeledDescriptors = inputArray.map((desc) => {
if (desc instanceof LabeledFaceDescriptors) {
return desc
}
if (desc instanceof FullFaceDescription) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor])
}
if (desc instanceof Float32Array) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc])
}
throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>`)
})
}
public get labeledDescriptors(): LabeledFaceDescriptors[] { return this._labeledDescriptors }
public get distanceThreshold(): number { return this._distanceThreshold }
public computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number {
return descriptors
.map(d => euclideanDistance(d, queryDescriptor))
.reduce((d1, d2) => d1 + d2, 0)
/ (descriptors.length || 1)
}
public matchDescriptor(queryDescriptor: Float32Array): FaceMatch {
return this.labeledDescriptors
.map(({ descriptors, label }) => new FaceMatch(
label,
this.computeMeanDistance(queryDescriptor, descriptors)
))
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
public findBestMatch(queryDescriptor: Float32Array): FaceMatch {
const bestMatch = this.matchDescriptor(queryDescriptor)
return bestMatch.distance < this.distanceThreshold
? bestMatch
: new FaceMatch('unknown', bestMatch.distance)
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment