Commit abf82d46 by vincent

cleaned up examples

parent f36fb464
const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart']
function getImageUri(imageName) {
return `images/${imageName}`
}
function getFaceImageUri(className, idx) {
return `images/${className}/${className}${idx}.png`
}
async function fetchImage(uri) {
return (await fetch(uri)).blob()
}
async function requestExternalImage(imageUrl) {
const res = await fetch('fetch_external_image', {
method: 'post',
headers: {
'content-type': 'application/json'
},
body: JSON.stringify({ imageUrl })
})
if (!(res.status < 400)) {
console.error(res.status + ' : ' + await res.text())
throw new Error('failed to fetch image from url: ' + imageUrl)
}
let blob
try {
blob = await res.blob()
return await faceapi.bufferToImage(blob)
} catch (e) {
console.error('received blob:', blob)
console.error('error:', e)
throw new Error('failed to load image from url: ' + imageUrl)
}
}
// fetch first image of each class and compute their descriptors
async function initTrainDescriptorsByClass(net, numImagesForTraining = 1) {
const maxAvailableImagesPerClass = 5
numImagesForTraining = Math.min(numImagesForTraining, maxAvailableImagesPerClass)
return Promise.all(classes.map(
async className => {
const descriptors = []
for (let i = 1; i < (numImagesForTraining + 1); i++) {
const img = await faceapi.bufferToImage(
await fetchImage(getFaceImageUri(className, i))
)
descriptors.push(await net.computeFaceDescriptor(img))
}
return {
descriptors,
className
}
}
))
}
function getBestMatch(descriptorsByClass, queryDescriptor) {
function computeMeanDistance(descriptorsOfClass) {
return faceapi.round(
descriptorsOfClass
.map(d => faceapi.euclideanDistance(d, queryDescriptor))
.reduce((d1, d2) => d1 + d2, 0)
/ (descriptorsOfClass.length || 1)
)
}
return descriptorsByClass
.map(
({ descriptors, className }) => ({
distance: computeMeanDistance(descriptors),
className
})
)
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
uri: 'face_detection',
name: 'Face Detection'
},
{
uri: 'face_detection_video',
name: 'Face Detection Video'
},
{
uri: 'face_recognition',
name: 'Face Recognition'
},
{
uri: 'face_similarity',
name: 'Face Similarity'
},
{
uri: 'face_landmarks',
name: 'Face Landmarks'
},
{
uri: 'detect_and_draw_landmarks',
name: 'Detect and Draw Landmarks'
},
{
uri: 'detect_and_draw_faces',
name: 'Detect and Draw Faces'
},
{
uri: 'face_alignment',
name: 'Face Alignment'
},
{
uri: 'detect_and_recognize_faces',
name: 'Detect and Recognize Faces'
},
{
uri: 'mtcnn_face_detection',
name: 'MTCNN Face Detection'
},
{
uri: 'mtcnn_face_detection_video',
name: 'MTCNN Face Detection Video'
},
{
uri: 'mtcnn_face_detection_webcam',
name: 'MTCNN Face Detection Webcam'
},
{
uri: 'mtcnn_face_recognition',
name: 'MTCNN Face Recognition'
},
{
uri: 'mtcnn_face_recognition_webcam',
name: 'MTCNN Face Recognition Webcam'
},
{
uri: 'tiny_yolov2_face_detection',
name: 'Tiny Yolov2 Face Detection'
},
{
uri: 'tiny_yolov2_face_detection_video',
name: 'Tiny Yolov2 Face Detection Video'
},
{
uri: 'tiny_yolov2_face_detection_webcam',
name: 'Tiny Yolov2 Face Detection Webcam'
},
{
uri: 'tiny_yolov2_face_recognition',
name: 'Tiny Yolov2 Face Recognition'
},
{
uri: 'batch_face_landmarks',
name: 'Batch Face Landmarks'
},
{
uri: 'batch_face_recognition',
name: 'Batch Face Recognition'
}
]
const navbar = $(navbarId).get(0)
const pageContainer = $('.page-container').get(0)
const header = document.createElement('h3')
header.innerHTML = examples.find(ex => ex.uri === exampleUri).name
pageContainer.insertBefore(header, pageContainer.children[0])
const menuContent = document.createElement('ul')
menuContent.id = 'slide-out'
menuContent.classList.add('side-nav', 'fixed')
navbar.appendChild(menuContent)
const menuButton = document.createElement('a')
menuButton.href='#'
menuButton.classList.add('button-collapse', 'show-on-large')
menuButton.setAttribute('data-activates', 'slide-out')
const menuButtonIcon = document.createElement('img')
menuButtonIcon.src = 'menu_icon.png'
menuButton.appendChild(menuButtonIcon)
navbar.appendChild(menuButton)
const li = document.createElement('li')
const githubLink = document.createElement('a')
githubLink.classList.add('waves-effect', 'waves-light', 'side-by-side')
githubLink.id = 'github-link'
githubLink.href = 'https://github.com/justadudewhohacks/face-api.js'
const h5 = document.createElement('h5')
h5.innerHTML = 'face-api.js'
githubLink.appendChild(h5)
const githubLinkIcon = document.createElement('img')
githubLinkIcon.src = 'github_link_icon.png'
githubLink.appendChild(githubLinkIcon)
li.appendChild(githubLink)
menuContent.appendChild(li)
examples
.forEach(ex => {
const li = document.createElement('li')
if (ex.uri === exampleUri) {
li.style.background='#b0b0b0'
}
const a = document.createElement('a')
a.classList.add('waves-effect', 'waves-light')
a.href = ex.uri
const span = document.createElement('span')
span.innerHTML = ex.name
span.style.whiteSpace = 'nowrap'
a.appendChild(span)
li.appendChild(a)
menuContent.appendChild(li)
})
$('.button-collapse').sideNav({
menuWidth: 280
})
}
function renderSelectList(selectListId, onChange, initialValue, renderChildren) {
const select = document.createElement('select')
$(selectListId).get(0).appendChild(select)
renderChildren(select)
$(select).val(initialValue)
$(select).on('change', (e) => onChange(e.target.value))
$(select).material_select()
}
function renderOption(parent, text, value) {
const option = document.createElement('option')
option.innerHTML = text
option.value = value
parent.appendChild(option)
}
function renderFaceImageSelectList(selectListId, onChange, initialValue) {
const indices = [1, 2, 3, 4, 5]
function renderChildren(select) {
classes.forEach(className => {
const optgroup = document.createElement('optgroup')
optgroup.label = className
select.appendChild(optgroup)
indices.forEach(imageIdx =>
renderOption(
optgroup,
`${className} ${imageIdx}`,
getFaceImageUri(className, imageIdx)
)
)
})
}
renderSelectList(
selectListId,
onChange,
getFaceImageUri(initialValue.className, initialValue.imageIdx),
renderChildren
)
}
function renderImageSelectList(selectListId, onChange, initialValue) {
const images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
function renderChildren(select) {
images.forEach(imageName =>
renderOption(
select,
imageName,
getImageUri(imageName)
)
)
}
renderSelectList(
selectListId,
onChange,
getImageUri(initialValue),
renderChildren
)
}
\ No newline at end of file
const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart']
function getFaceImageUri(className, idx) {
return `images/${className}/${className}${idx}.png`
}
function renderFaceImageSelectList(selectListId, onChange, initialValue) {
const indices = [1, 2, 3, 4, 5]
function renderChildren(select) {
classes.forEach(className => {
const optgroup = document.createElement('optgroup')
optgroup.label = className
select.appendChild(optgroup)
indices.forEach(imageIdx =>
renderOption(
optgroup,
`${className} ${imageIdx}`,
getFaceImageUri(className, imageIdx)
)
)
})
}
renderSelectList(
selectListId,
onChange,
getFaceImageUri(initialValue.className, initialValue.imageIdx),
renderChildren
)
}
// fetch first image of each class and compute their descriptors
async function initBbtFaceDescriptors(net, numImagesForTraining = 1) {
const maxAvailableImagesPerClass = 5
numImagesForTraining = Math.min(numImagesForTraining, maxAvailableImagesPerClass)
return Promise.all(classes.map(
async className => {
const descriptors = []
for (let i = 1; i < (numImagesForTraining + 1); i++) {
const img = await faceapi.fetchImage(getFaceImageUri(className, i))
descriptors.push(await faceapi.computeFaceDescriptor(img))
}
return {
descriptors,
className
}
}
))
}
function getBestMatch(descriptorsByClass, queryDescriptor) {
function computeMeanDistance(descriptorsOfClass) {
return faceapi.round(
descriptorsOfClass
.map(d => faceapi.euclideanDistance(d, queryDescriptor))
.reduce((d1, d2) => d1 + d2, 0)
/ (descriptorsOfClass.length || 1)
)
}
return descriptorsByClass
.map(
({ descriptors, className }) => ({
distance: computeMeanDistance(descriptors),
className
})
)
.reduce((best, curr) => best.distance < curr.distance ? best : curr)
}
function getImageUri(imageName) {
return `images/${imageName}`
}
async function requestExternalImage(imageUrl) {
const res = await fetch('fetch_external_image', {
method: 'post',
headers: {
'content-type': 'application/json'
},
body: JSON.stringify({ imageUrl })
})
if (!(res.status < 400)) {
console.error(res.status + ' : ' + await res.text())
throw new Error('failed to fetch image from url: ' + imageUrl)
}
let blob
try {
blob = await res.blob()
return await faceapi.bufferToImage(blob)
} catch (e) {
console.error('received blob:', blob)
console.error('error:', e)
throw new Error('failed to load image from url: ' + imageUrl)
}
}
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
uri: 'face_detection',
name: 'Face Detection'
},
{
uri: 'face_extraction',
name: 'Face Extraction'
},
{
uri: 'face_landmark_detection',
name: 'Face Landmark Detection'
},
{
uri: 'face_recognition',
name: 'Face Recognition'
},
{
uri: 'video_face_tracking',
name: 'Video Face Tracking'
},
{
uri: 'webcam_face_tracking',
name: 'Webcam Face Tracking'
},
{
uri: 'bbt_face_landmark_detection',
name: 'BBT Face Landmark Detection'
},
{
uri: 'bbt_face_recognition',
name: 'BBT Face Recognition'
},
{
uri: 'bbt_face_similarity',
name: 'BBT Face Similarity'
},
{
uri: 'batch_face_landmarks',
name: 'Batch Face Landmark Detection'
},
{
uri: 'batch_face_recognition',
name: 'Batch Face Recognition'
}
]
const navbar = $(navbarId).get(0)
const pageContainer = $('.page-container').get(0)
const header = document.createElement('h3')
header.innerHTML = examples.find(ex => ex.uri === exampleUri).name
pageContainer.insertBefore(header, pageContainer.children[0])
const menuContent = document.createElement('ul')
menuContent.id = 'slide-out'
menuContent.classList.add('side-nav', 'fixed')
navbar.appendChild(menuContent)
const menuButton = document.createElement('a')
menuButton.href='#'
menuButton.classList.add('button-collapse', 'show-on-large')
menuButton.setAttribute('data-activates', 'slide-out')
const menuButtonIcon = document.createElement('img')
menuButtonIcon.src = 'menu_icon.png'
menuButton.appendChild(menuButtonIcon)
navbar.appendChild(menuButton)
const li = document.createElement('li')
const githubLink = document.createElement('a')
githubLink.classList.add('waves-effect', 'waves-light', 'side-by-side')
githubLink.id = 'github-link'
githubLink.href = 'https://github.com/justadudewhohacks/face-api.js'
const h5 = document.createElement('h5')
h5.innerHTML = 'face-api.js'
githubLink.appendChild(h5)
const githubLinkIcon = document.createElement('img')
githubLinkIcon.src = 'github_link_icon.png'
githubLink.appendChild(githubLinkIcon)
li.appendChild(githubLink)
menuContent.appendChild(li)
examples
.forEach(ex => {
const li = document.createElement('li')
if (ex.uri === exampleUri) {
li.style.background='#b0b0b0'
}
const a = document.createElement('a')
a.classList.add('waves-effect', 'waves-light')
a.href = ex.uri
const span = document.createElement('span')
span.innerHTML = ex.name
span.style.whiteSpace = 'nowrap'
a.appendChild(span)
li.appendChild(a)
menuContent.appendChild(li)
})
$('.button-collapse').sideNav({
menuWidth: 280
})
}
function renderSelectList(selectListId, onChange, initialValue, renderChildren) {
const select = document.createElement('select')
$(selectListId).get(0).appendChild(select)
renderChildren(select)
$(select).val(initialValue)
$(select).on('change', (e) => onChange(e.target.value))
$(select).material_select()
}
function renderOption(parent, text, value) {
const option = document.createElement('option')
option.innerHTML = text
option.value = value
parent.appendChild(option)
}
\ No newline at end of file
const SSD_MOBILENETV1 = 'ssd_mobilenetv1'
const TINY_FACE_DETECTOR = 'tiny_face_detector'
const MTCNN = 'mtcnn'
let selectedFaceDetector = SSD_MOBILENETV1
// ssd_mobilenetv1 options
let minConfidence = 0.7
// tiny_face_detector options
let inputSize = 512
let scoreThreshold = 0.5
//mtcnn options
let minFaceSize = 20
function getFaceDetectorOptions() {
return selectedFaceDetector === SSD_MOBILENETV1
? new faceapi.SsdMobilenetv1Options({ minConfidence })
: (
selectedFaceDetector === TINY_FACE_DETECTOR
? new faceapi.TinyFaceDetectorOptions({ inputSize, scoreThreshold })
: new faceapi.MtcnnOptions({ minFaceSize })
)
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onInputSizeChanged(e) {
changeInputSize(e.target.value)
updateResults()
}
function changeInputSize(size) {
inputSize = parseInt(size)
const inputSizeSelect = $('#inputSize')
inputSizeSelect.val(inputSize)
inputSizeSelect.material_select()
}
function onIncreaseScoreThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseScoreThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 50)
$('#minFaceSize').val(minFaceSize)
}
function getCurrentFaceDetectionNet() {
if (selectedFaceDetector === SSD_MOBILENETV1) {
return faceapi.nets.ssdMobilenetv1
}
if (selectedFaceDetector === TINY_FACE_DETECTOR) {
return faceapi.nets.tinyFaceDetector
}
if (selectedFaceDetector === MTCNN) {
return faceapi.nets.mtcnn
}
}
function isFaceDetectionModelLoaded() {
return !!getCurrentFaceDetectionNet().params
}
async function changeFaceDetector(detector) {
['#ssd_mobilenetv1_controls', '#tiny_face_detector_controls', '#mtcnn_controls']
.forEach(id => $(id).hide())
selectedFaceDetector = detector
$('#selectFaceDetector').val(detector)
$('#loader').show()
if (!isFaceDetectionModelLoaded()) {
await getCurrentFaceDetectionNet().load('/')
}
$(`#${detector}_controls`).show()
$('#loader').hide()
}
async function onSelectedFaceDetectorChanged(e) {
selectedFaceDetector = e.target.value
await changeFaceDetector(e.target.value)
updateResults()
}
function initFaceDetectionControls() {
const faceDetectorSelect = $('#selectFaceDetector')
faceDetectorSelect.val(selectedFaceDetector)
faceDetectorSelect.on('change', onSelectedFaceDetectorChanged)
faceDetectorSelect.material_select()
const inputSizeSelect = $('#inputSize')
inputSizeSelect.val(inputSize)
inputSizeSelect.on('change', onInputSizeChanged)
inputSizeSelect.material_select()
}
\ No newline at end of file
async function onSelectedImageChanged(uri) {
const img = await faceapi.fetchImage(uri)
$(`#inputImg`).get(0).src = img.src
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
function renderImageSelectList(selectListId, onChange, initialValue) {
const images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
function renderChildren(select) {
images.forEach(imageName =>
renderOption(
select,
imageName,
getImageUri(imageName)
)
)
}
renderSelectList(
selectListId,
onChange,
getImageUri(initialValue),
renderChildren
)
}
function initImageSelectionControls() {
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectedImageChanged(uri)
},
'bbt1.jpg'
)
onSelectedImageChanged($('#selectList select').val())
}
\ No newline at end of file
......@@ -11,30 +11,22 @@ const viewsDir = path.join(__dirname, 'views')
app.use(express.static(viewsDir))
app.use(express.static(path.join(__dirname, './public')))
app.use(express.static(path.join(__dirname, '../weights')))
app.use(express.static(path.join(__dirname, '../weights_uncompressed')))
app.use(express.static(path.join(__dirname, '../dist')))
app.use(express.static(path.join(__dirname, './node_modules/axios/dist')))
app.get('/', (req, res) => res.redirect('/face_detection'))
app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetection.html')))
app.get('/face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetectionVideo.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'faceSimilarity.html')))
app.get('/face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarks.html')))
app.get('/detect_and_draw_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawFaces.html')))
app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndDrawLandmarks.html')))
app.get('/face_alignment', (req, res) => res.sendFile(path.join(viewsDir, 'faceAlignment.html')))
app.get('/detect_and_recognize_faces', (req, res) => res.sendFile(path.join(viewsDir, 'detectAndRecognizeFaces.html')))
app.get('/mtcnn_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetection.html')))
app.get('/mtcnn_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionVideo.html')))
app.get('/mtcnn_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceDetectionWebcam.html')))
app.get('/mtcnn_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognition.html')))
app.get('/mtcnn_face_recognition_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'mtcnnFaceRecognitionWebcam.html')))
app.get('/tiny_yolov2_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetection.html')))
app.get('/tiny_yolov2_face_detection_video', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionVideo.html')))
app.get('/tiny_yolov2_face_detection_webcam', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceDetectionWebcam.html')))
app.get('/tiny_yolov2_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'tinyYolov2FaceRecognition.html')))
app.get('/batch_face_landmarks', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceLandmarks.html')))
app.get('/batch_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'batchFaceRecognition.html')))
app.get('/bbt_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceLandmarkDetection.html')))
app.get('/bbt_face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceRecognition.html')))
app.get('/bbt_face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceSimilarity.html')))
app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetection.html')))
app.get('/face_extraction', (req, res) => res.sendFile(path.join(viewsDir, 'faceExtraction.html')))
app.get('/face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarkDetection.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/video_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'videoFaceTracking.html')))
app.get('/webcam_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceTracking.html')))
app.post('/fetch_external_image', async (req, res) => {
const { imageUrl } = req.body
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -97,7 +98,7 @@
.reduce((flat, arr) => flat.concat(arr))
images = await Promise.all(allImgUris.map(
async uri => faceapi.bufferToImage(await fetchImage(uri))
async uri => faceapi.fetchImage(uri)
))
// warmup
await measureTimings()
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -28,7 +29,7 @@
<div class="row side-by-side">
<div>
<label for="numImages">Num Images:</label>
<input id="numImages" type="text" class="bold" value="32"/>
<input id="numImages" type="text" class="bold" value="16"/>
</div>
<button
class="waves-effect waves-light btn"
......@@ -47,13 +48,13 @@
<script>
let images = []
let trainDescriptorsByClass = []
let referenceDescriptorsByClass = []
let descriptorsByFace = []
let numImages = 32
let numImages = 16
let maxDistance = 0.6
function onNumImagesChanged(e) {
const val = parseInt(e.target.value) || 32
const val = parseInt(e.target.value) || 16
numImages = Math.min(Math.max(val, 0), 32)
e.target.value = numImages
}
......@@ -67,7 +68,7 @@
const canvas = faceapi.createCanvasFromMedia(img)
$('#faceContainer').append(canvas)
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const bestMatch = getBestMatch(referenceDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const x = 20, y = canvas.height - 20
......@@ -105,7 +106,7 @@
async function run() {
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
referenceDescriptorsByClass = await initBbtFaceDescriptors(faceapi.recognitionNet, 1)
$('#loader').hide()
const imgUris = classes
......@@ -114,7 +115,7 @@
.reduce((flat, arr) => flat.concat(arr))
images = await Promise.all(imgUris.map(
async uri => faceapi.bufferToImage(await fetchImage(uri))
async uri => faceapi.fetchImage(uri)
))
// warmup
......
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -46,8 +47,7 @@
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
currentImg = await faceapi.bufferToImage(imgBuf)
currentImg = await faceapi.fetchImage(uri)
landmarks = await faceapi.detectLandmarks(currentImg)
redraw()
}
......@@ -59,7 +59,7 @@
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_landmarks')
renderNavBar('#navbar', 'bbt_face_landmark_detection')
renderFaceImageSelectList(
'#selectList',
onSelectionChanged,
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div>
<div class="row center-content" id="loader">
<input disabled value="" id="status" type="text" class="bold">
<div class="progress">
<div class="indeterminate"></div>
</div>
</div>
<div class="row center-content">
<img id="face" src=""/>
</div>
<div class="row">
<label for="prediction">Prediction:</label>
<input disabled value="-" id="prediction" type="text" class="bold">
</div>
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
<div class="row">
<button
class="waves-effect waves-light btn"
id="stop"
onclick="onToggleStop()"
>
Stop
</button>
<button
class="waves-effect waves-light btn"
onclick="onSlower()"
>
<i class="material-icons left">-</i> Slower
</button>
<button
class="waves-effect waves-light btn"
onclick="onFaster()"
>
<i class="material-icons left">+</i> Faster
</button>
</div>
<div class="row">
<label for="interval">Interval:</label>
<input disabled value="2000" id="interval" type="text" class="bold">
</div>
</div>
</div>
<script>
// for 150 x 150 sized face images 0.6 is a good threshold to
// judge whether two face descriptors are similar or not
const threshold = 0.6
let interval = 2000
let isStop = false
let referenceDescriptorsByClass = []
let currImageIdx = 2, currClassIdx = 0
let to = null
function onSlower() {
interval = Math.min(interval + 100, 2000)
$('#interval').val(interval)
}
function onFaster() {
interval = Math.max(interval - 100, 0)
$('#interval').val(interval)
}
function onToggleStop() {
clearTimeout(to)
isStop = !isStop
document.getElementById('stop').innerHTML = isStop ? 'Continue' : 'Stop'
setStatusText(isStop ? 'stopped' : 'running face recognition:')
if (!isStop) {
runFaceRecognition()
}
}
function setStatusText(text) {
$('#status').val(text)
}
function displayTimeStats(timeInMs) {
$('#time').val(`${timeInMs} ms`)
$('#fps').val(`${faceapi.round(1000 / timeInMs)}`)
}
function displayImage(src) {
getImg().src = src
}
async function runFaceRecognition() {
async function next() {
const input = await faceapi.fetchImage(getFaceImageUri(classes[currClassIdx], currImageIdx))
const imgEl = $('#face').get(0)
imgEl.src = input.src
const ts = Date.now()
const descriptor = await faceapi.computeFaceDescriptor(input)
displayTimeStats(Date.now() - ts)
const bestMatch = getBestMatch(referenceDescriptorsByClass, descriptor)
$('#prediction').val(`${bestMatch.distance < threshold ? bestMatch.className : 'unknown'} (${bestMatch.distance})`)
currImageIdx = currClassIdx === (classes.length - 1)
? currImageIdx + 1
: currImageIdx
currClassIdx = (currClassIdx + 1) % classes.length
currImageIdx = (currImageIdx % 6) || 2
to = setTimeout(next, interval)
}
await next(0, 0)
}
async function run() {
try {
setStatusText('loading model file...')
await faceapi.loadFaceRecognitionModel('/')
setStatusText('computing initial descriptors...')
referenceDescriptorsByClass = await initBbtFaceDescriptors(faceapi.recognitionNet)
$('#loader').hide()
runFaceRecognition()
} catch (err) {
console.error(err)
}
}
$(document).ready(function() {
renderNavBar('#navbar', 'bbt_face_recognition')
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -2,7 +2,8 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/bbt.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -52,8 +53,7 @@
}
async function onSelectionChanged(which, uri) {
const imgBuf = await fetchImage(uri)
const input = await faceapi.bufferToImage(imgBuf)
const input = await faceapi.fetchImage(uri)
const imgEl = $(`#face${which}`).get(0)
imgEl.src = input.src
descriptors[`desc${which}`] = await faceapi.computeFaceDescriptor(input)
......@@ -68,7 +68,7 @@
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_similarity')
renderNavBar('#navbar', 'bbt_face_similarity')
renderFaceImageSelectList(
'#selectList1',
async (uri) => {
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = await faceapi.toNetInput(inputImgEl)
const detections = await faceapi.locateFaces(input, minConfidence)
faceapi.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
const faceImages = await faceapi.extractFaces(inputImgEl, detections)
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_draw_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minConfidence = 0.7
let drawLines = true
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const input = await faceapi.toNetInput(inputImgEl)
const locations = await faceapi.locateFaces(input, minConfidence)
const faces = await faceapi.extractFaces(input, locations)
let landmarksByFace = await Promise.all(faces.map(face => faceapi.detectLandmarks(face)))
// shift and scale the face landmarks to the face image position in the canvas
landmarksByFace = landmarksByFace.map((landmarks, i) => {
const box = locations[i].forSize(width, height).getBox()
return landmarks.forSize(box.width, box.height).shift(box.x, box.y)
})
faceapi.drawLandmarks(canvas, landmarksByFace, { lineWidth: drawLines ? 2 : 4, drawLines, color: 'red' })
faceapi.drawDetection('overlay', locations.map(det => det.forSize(width, height)))
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
await faceapi.loadFaceLandmarkModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_draw_landmarks')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let maxDistance = 0.6
let minConfidence = 0.7
let useBatchProcessing = false
let detectionNet, recognitionNet, landmarkNet
let trainDescriptorsByClass = []
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const fullFaceDescriptions = (await faceapi.allFaces(inputImgEl, minConfidence, useBatchProcessing))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadModels('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'detect_and_recognize_faces')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row">
<p>
<input type="checkbox" id="drawLinesCheckbox" onchange="onChangeUseMtcnn(event)" />
<label for="drawLinesCheckbox">Use Mtcnn</label>
</p>
</div>
</div>
<script>
let minConfidence = 0.7
let useMtcnn = false
function onIncreaseMinConfidence() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseMinConfidence() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onChangeUseMtcnn(e) {
useMtcnn = $(e.target).prop('checked')
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function locateAndAlignFacesWithMtcnn(inputImgEl) {
const input = await faceapi.toNetInput(inputImgEl)
const results = await faceapi.mtcnn(input, { minFaceSize: 100 })
const unalignedFaceImages = await faceapi.extractFaces(input.getInput(0), results.map(res => res.faceDetection))
const alignedFaceBoxes = results
.filter(res => res.faceDetection.score > minConfidence)
.map(res => res.faceLandmarks.align())
const alignedFaceImages = await faceapi.extractFaces(input.getInput(0), alignedFaceBoxes)
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function locateAndAlignFacesWithSSD(inputImgEl) {
const input = await faceapi.toNetInput(inputImgEl)
const locations = await faceapi.locateFaces(input, minConfidence)
const unalignedFaceImages = await faceapi.extractFaces(input.getInput(0), locations)
// detect landmarks and get the aligned face image bounding boxes
const alignedFaceBoxes = await Promise.all(unalignedFaceImages.map(
async (faceCanvas, i) => {
const faceLandmarks = await faceapi.detectLandmarks(faceCanvas)
return faceLandmarks.align(locations[i])
}
))
const alignedFaceImages = await faceapi.extractFaces(input.getInput(0), alignedFaceBoxes)
return {
unalignedFaceImages,
alignedFaceImages
}
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const {
unalignedFaceImages,
alignedFaceImages
} = useMtcnn
? await locateAndAlignFacesWithMtcnn(inputImgEl)
: await locateAndAlignFacesWithSSD(inputImgEl)
$('#facesContainer').empty()
unalignedFaceImages.forEach(async (faceCanvas, i) => {
$('#facesContainer').append(faceCanvas)
$('#facesContainer').append(alignedFaceImages[i])
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadMtcnnModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_alignment')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -2,7 +2,9 @@
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
......@@ -11,6 +13,7 @@
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
......@@ -18,7 +21,22 @@
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- image_selection_control -->
<div id="image_selection_control"></div>
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
......@@ -30,7 +48,13 @@
>
Ok
</button>
<div id="image_selection_control"></div>
<!-- image_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
......@@ -38,73 +62,114 @@
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<script>
let minConfidence = 0.7
let result
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
</body>
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
<script>
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const detections = await faceapi.detectAllFaces(inputImgEl, options)
drawDetections(detections)
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
function drawDetections(detections) {
const { width, height } = $('#inputImg').get(0)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
result = await faceapi.locateFaces(inputImgEl, minConfidence)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
faceapi.drawDetection(canvas, detections.map(det => det.forSize(width, height)))
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
// load face detection model
await changeFaceDetector(selectedFaceDetector)
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
initImageSelectionControls()
initFaceDetectionControls()
run()
})
</script>
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let minConfidence = 0.7
let modelLoaded = false
let result
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const ts = Date.now()
result = await faceapi.locateFaces(videoEl, minConfidence)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadFaceDetectionModel('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_detection_video')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div id="facesContainer"></div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- image_selection_control -->
<div id="image_selection_control"></div>
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<div id="image_selection_control"></div>
<!-- image_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</div>
<script>
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const detections = await faceapi.detectAllFaces(inputImgEl, options)
const faceImages = await faceapi.extractFaces(inputImgEl, detections)
displayExtractedFaces(faceImages)
}
function displayExtractedFaces(faceImages) {
const canvas = $('#overlay').get(0)
const { width, height } = $('#inputImg').get(0)
canvas.width = width
canvas.height = height
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
}
async function run() {
// load face detection model
await changeFaceDetector(selectedFaceDetector)
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_extraction')
initImageSelectionControls()
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let minFaceSize = 50
let scaleFactor = 0.709
let maxNumScales = 10
let stage1Threshold = 0.7
let stage2Threshold = 0.7
let stage3Threshold = 0.7
function onIncreaseThreshold() {
minConfidence = Math.min(faceapi.round(minConfidence + 0.1), 1.0)
$('#minConfidence').val(minConfidence)
updateResults()
}
function onDecreaseThreshold() {
minConfidence = Math.max(faceapi.round(minConfidence - 0.1), 0.1)
$('#minConfidence').val(minConfidence)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize,
scaleFactor,
maxNumScales,
scoreThresholds: [stage1Threshold, stage2Threshold, stage3Threshold]
}
const results = await faceapi.mtcnn(inputImgEl, mtcnnParams)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadMtcnnModel('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="80" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 80
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 20), 200)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 20), 20)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const ts = Date.now()
const results = await faceapi.mtcnn(videoEl, mtcnnParams)
updateTimeStats(Date.now() - ts)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
if (faceDetection.score < minConfidence) {
return
}
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection_video')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="200" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 200
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 50), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 50), 50)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const { results, stats } = await faceapi.nets.mtcnn.forwardWithStats(videoEl, mtcnnParams)
updateTimeStats(stats.total)
if (results) {
results.forEach(({ faceDetection, faceLandmarks }) => {
if (faceDetection.score < minConfidence) {
return
}
faceapi.drawDetection('overlay', faceDetection.forSize(width, height))
faceapi.drawLandmarks('overlay', faceLandmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
})
}
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
modelLoaded = true
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_detection_webcam')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="200" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let modelLoaded = false
let minFaceSize = 200
let maxDistance = 0.6
let minConfidence = 0.9
let forwardTimes = []
function onIncreaseMinFaceSize() {
minFaceSize = Math.min(faceapi.round(minFaceSize + 50), 300)
$('#minFaceSize').val(minFaceSize)
}
function onDecreaseMinFaceSize() {
minFaceSize = Math.max(faceapi.round(minFaceSize - 50), 50)
$('#minFaceSize').val(minFaceSize)
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const mtcnnParams = {
minFaceSize
}
const ts = Date.now()
const fullFaceDescriptions = (await faceapi.allFacesMtcnn(videoEl, mtcnnParams))
.map(fd => fd.forSize(width, height))
updateTimeStats(Date.now() - ts)
fullFaceDescriptions.forEach(({ detection, landmarks, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
faceapi.drawLandmarks('overlay', landmarks.forSize(width, height), { lineWidth: 4, color: 'red' })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
setTimeout(() => onPlay(videoEl))
}
async function run() {
await faceapi.loadMtcnnModel('/')
await faceapi.loadFaceRecognitionModel('/')
// init reference data, e.g. compute a face descriptor for each class
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet)
modelLoaded = true
// try to access users webcam and stream the images
// to the video element
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'mtcnn_face_recognition_webcam')
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = 'lg'
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const detections = await faceapi.tinyYolov2(inputImgEl, forwardParams)
faceapi.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = 'md'
let modelLoaded = false
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const ts = Date.now()
result = await faceapi.tinyYolov2(videoEl, forwardParams)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
modelLoaded = true
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection_video')
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = '160'
let modelLoaded = false
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !modelLoaded)
return false
const { width, height } = faceapi.getMediaDimensions(videoEl)
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: parseInt(sizeType),
scoreThreshold
}
const ts = Date.now()
result = await faceapi.tinyYolov2(videoEl, forwardParams)
updateTimeStats(Date.now() - ts)
faceapi.drawDetection('overlay', result.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
modelLoaded = true
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
onPlay($('#inputVideo').get(0))
$('#loader').hide()
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection_webcam')
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<p>
<input type="checkbox" id="useBatchProcessing" onchange="onChangeUseBatchProcessing(event)" />
<label for="useBatchProcessing">Use Batch Processing</label>
</p>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row side-by-side">
<div class="row">
<label for="maxDistance">Max Descriptor Distance:</label>
<input disabled value="0.6" id="maxDistance" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn button-sm"
onclick="onDecreaseMaxDistance()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn button-sm"
onclick="onIncreaseMaxDistance()"
>
<i class="material-icons left">+</i>
</button>
</div>
</div>
<script>
let maxDistance = 0.6
let useBatchProcessing = false
let trainDescriptorsByClass = []
let scoreThreshold = 0.5
let sizeType = 'lg'
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
function onChangeUseBatchProcessing(e) {
useBatchProcessing = $(e.target).prop('checked')
}
function onIncreaseMaxDistance() {
maxDistance = Math.min(faceapi.round(maxDistance + 0.1), 1.0)
$('#maxDistance').val(maxDistance)
updateResults()
}
function onDecreaseMaxDistance() {
maxDistance = Math.max(faceapi.round(maxDistance - 0.1), 0.1)
$('#maxDistance').val(maxDistance)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const fullFaceDescriptions = (await faceapi.allFacesTinyYolov2(inputImgEl, forwardParams, useBatchProcessing))
.map(fd => fd.forSize(width, height))
fullFaceDescriptions.forEach(({ detection, descriptor }) => {
faceapi.drawDetection('overlay', [detection], { withScore: false })
const bestMatch = getBestMatch(trainDescriptorsByClass, descriptor)
const text = `${bestMatch.distance < maxDistance ? bestMatch.className : 'unkown'} (${bestMatch.distance})`
const { x, y, height: boxHeight } = detection.getBox()
faceapi.drawText(
canvas.getContext('2d'),
x,
y + boxHeight,
text,
Object.assign(faceapi.getDefaultDrawOptions(), { color: 'red', fontSize: 16 })
)
})
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function run() {
await faceapi.loadTinyYolov2Model('/')
await faceapi.loadFaceLandmarkModel('/')
await faceapi.loadFaceRecognitionModel('/')
trainDescriptorsByClass = await initTrainDescriptorsByClass(faceapi.recognitionNet, 1)
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_recognition')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" id="inputVideo" autoplay muted loop></video>
<canvas id="overlay" />
</div>
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay(videoEl))
const options = getFaceDetectorOptions()
const ts = Date.now()
const detections = await faceapi.detectAllFaces(videoEl, options)
updateTimeStats(Date.now() - ts)
// draw results
const canvas = $('#overlay').get(0)
const { width, height } = faceapi.getMediaDimensions(videoEl)
canvas.width = width
canvas.height = height
faceapi.drawDetection(canvas, detections.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function run() {
// load face detection model
await changeFaceDetector(TINY_FACE_DETECTOR)
changeInputSize(416)
// start processing frames
onPlay($('#inputVideo').get(0))
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'video_face_tracking')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div class="row">
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
</div>
<div class="row">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.7" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="128">128 x 128</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay(videoEl) {
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay(videoEl))
const options = getFaceDetectorOptions()
const ts = Date.now()
const detections = await faceapi.detectAllFaces(videoEl, options)
updateTimeStats(Date.now() - ts)
// draw results
const canvas = $('#overlay').get(0)
const { width, height } = faceapi.getMediaDimensions(videoEl)
canvas.width = width
canvas.height = height
faceapi.drawDetection(canvas, detections.map(det => det.forSize(width, height)))
setTimeout(() => onPlay(videoEl))
}
async function run() {
// load face detection model
await changeFaceDetector(TINY_FACE_DETECTOR)
changeInputSize(128)
// try to access users webcam and stream the images
// to the video element
const videoEl = $('#inputVideo').get(0)
navigator.getUserMedia(
{ video: {} },
stream => videoEl.srcObject = stream,
err => console.error(err)
)
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_face_tracking')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment