Unverified Commit 68ff8530 by Vincent Mühler Committed by GitHub

Merge pull request #179 from justadudewhohacks/expressions

face expression recognition
parents 6094f36c a72ec31c
......@@ -25,8 +25,16 @@ async function requestExternalImage(imageUrl) {
function renderNavBar(navbarId, exampleUri) {
const examples = [
{
uri: 'face_and_landmark_detection',
name: 'Face And Landmark Detection'
uri: 'face_detection',
name: 'Face Detection'
},
{
uri: 'face_landmark_detection',
name: 'Face Landmark Detection'
},
{
uri: 'face_expression_recognition',
name: 'Face Expression Recognition'
},
{
uri: 'face_recognition',
......@@ -41,8 +49,16 @@ function renderNavBar(navbarId, exampleUri) {
name: 'Video Face Tracking'
},
{
uri: 'webcam_face_tracking',
name: 'Webcam Face Tracking'
uri: 'webcam_face_detection',
name: 'Webcam Face Detection'
},
{
uri: 'webcam_face_landmark_detection',
name: 'Webcam Face Landmark Detection'
},
{
uri: 'webcam_face_expression_recognition',
name: 'Webcam Face Expression Recognition'
},
{
uri: 'bbt_face_landmark_detection',
......@@ -112,7 +128,7 @@ function renderNavBar(navbarId, exampleUri) {
li.style.background='#b0b0b0'
}
const a = document.createElement('a')
a.classList.add('waves-effect', 'waves-light')
a.classList.add('waves-effect', 'waves-light', 'pad-sides-sm')
a.href = ex.uri
const span = document.createElement('span')
span.innerHTML = ex.name
......@@ -123,7 +139,7 @@ function renderNavBar(navbarId, exampleUri) {
})
$('.button-collapse').sideNav({
menuWidth: 240
menuWidth: 260
})
}
......
......@@ -7,7 +7,7 @@ function resizeCanvasAndResults(dimensions, canvas, results) {
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return results.map(res => res.forSize(width, height))
return faceapi.resizeResults(results, { width, height })
}
function drawDetections(dimensions, canvas, detections) {
......@@ -30,3 +30,13 @@ function drawLandmarks(dimensions, canvas, results, withBoxes = true) {
}
faceapi.drawLandmarks(canvas, faceLandmarks, drawLandmarksOptions)
}
function drawExpressions(dimensions, canvas, results, thresh, withBoxes = true) {
const resizedResults = resizeCanvasAndResults(dimensions, canvas, results)
if (withBoxes) {
faceapi.drawDetection(canvas, resizedResults.map(det => det.detection), { withScore: false })
}
faceapi.drawFaceExpressions(canvas, resizedResults.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
}
\ No newline at end of file
......@@ -10,8 +10,21 @@ async function loadImageFromUrl(url) {
updateResults()
}
function renderImageSelectList(selectListId, onChange, initialValue) {
const images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
function renderImageSelectList(selectListId, onChange, initialValue, withFaceExpressionImages) {
let images = [1, 2, 3, 4, 5].map(idx => `bbt${idx}.jpg`)
if (withFaceExpressionImages) {
images = [
'happy.jpg',
'sad.jpg',
'angry.jpg',
'disgusted.jpg',
'surprised.jpg',
'fearful.jpg',
'neutral.jpg'
].concat(images)
}
function renderChildren(select) {
images.forEach(imageName =>
renderOption(
......@@ -30,13 +43,14 @@ function renderImageSelectList(selectListId, onChange, initialValue) {
)
}
function initImageSelectionControls() {
function initImageSelectionControls(initialValue = 'bbt1.jpg', withFaceExpressionImages = false) {
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectedImageChanged(uri)
},
'bbt1.jpg'
initialValue,
withFaceExpressionImages
)
onSelectedImageChanged($('#selectList select').val())
}
\ No newline at end of file
......@@ -3,7 +3,7 @@
right: 0;
margin: auto;
margin-top: 20px;
padding-left: 260px;
padding-left: 280px;
display: inline-flex !important;
}
......@@ -53,6 +53,10 @@
padding: 0 10px !important;
}
.pad-sides-sm {
padding: 0 8px !important;
}
#github-link {
display: flex !important;
justify-content: center;
......
......@@ -15,12 +15,16 @@ app.use(express.static(path.join(__dirname, '../media')))
app.use(express.static(path.join(__dirname, '../../weights')))
app.use(express.static(path.join(__dirname, '../../dist')))
app.get('/', (req, res) => res.redirect('/face_and_landmark_detection'))
app.get('/face_and_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceAndLandmarkDetection.html')))
app.get('/', (req, res) => res.redirect('/face_detection'))
app.get('/face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceDetection.html')))
app.get('/face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceLandmarkDetection.html')))
app.get('/face_expression_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceExpressionRecognition.html')))
app.get('/face_extraction', (req, res) => res.sendFile(path.join(viewsDir, 'faceExtraction.html')))
app.get('/face_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'faceRecognition.html')))
app.get('/video_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'videoFaceTracking.html')))
app.get('/webcam_face_tracking', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceTracking.html')))
app.get('/webcam_face_detection', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceDetection.html')))
app.get('/webcam_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceLandmarkDetection.html')))
app.get('/webcam_face_expression_recognition', (req, res) => res.sendFile(path.join(viewsDir, 'webcamFaceExpressionRecognition.html')))
app.get('/bbt_face_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceLandmarkDetection.html')))
app.get('/bbt_face_similarity', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceSimilarity.html')))
app.get('/bbt_face_matching', (req, res) => res.sendFile(path.join(viewsDir, 'bbtFaceMatching.html')))
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- image_selection_control -->
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<!-- image_selection_control -->
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options)
drawDetections(inputImgEl, $('#overlay').get(0), results)
}
async function run() {
// load face detection
await changeFaceDetector(SSD_MOBILENETV1)
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_detection')
initImageSelectionControls()
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<script src="js/imageSelectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- image_selection_control -->
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
<!-- image_selection_control -->
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let thresh = 0.1
async function updateResults() {
if (!isFaceDetectionModelLoaded()) {
return
}
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceExpressions()
drawExpressions(inputImgEl, $('#overlay').get(0), results, thresh, true)
}
async function run() {
// load face detection and face expression recognition models
await changeFaceDetector(SSD_MOBILENETV1)
await faceapi.loadFaceExpressionModel('/')
// start processing image
updateResults()
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_expression_recognition')
initImageSelectionControls('happy.jpg', true)
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -54,8 +54,6 @@
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="withFaceLandmarksCheckbox" onchange="onChangeWithFaceLandmarks(event)" />
<label for="withFaceLandmarksCheckbox">Detect Face Landmarks</label>
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
......@@ -147,14 +145,8 @@
</body>
<script>
let withFaceLandmarks = false
let withBoxes = true
function onChangeWithFaceLandmarks(e) {
withFaceLandmarks = $(e.target).prop('checked')
updateResults()
}
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
updateResults()
......@@ -168,17 +160,9 @@
const inputImgEl = $('#inputImg').get(0)
const options = getFaceDetectorOptions()
const faceDetectionTask = faceapi.detectAllFaces(inputImgEl, options)
const results = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
const results = await faceapi.detectAllFaces(inputImgEl, options).withFaceLandmarks()
drawFunction(inputImgEl, $('#overlay').get(0), results, withBoxes)
drawLandmarks(inputImgEl, $('#overlay').get(0), results, withBoxes)
}
async function run() {
......@@ -191,7 +175,7 @@
}
$(document).ready(function() {
renderNavBar('#navbar', 'face_and_landmark_detection')
renderNavBar('#navbar', 'face_landmark_detection')
initImageSelectionControls()
initFaceDetectionControls()
run()
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div>
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="128">128 x 128</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
const videoEl = $('#inputVideo').get(0)
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay())
const options = getFaceDetectorOptions()
const ts = Date.now()
const result = await faceapi.detectSingleFace(videoEl, options)
updateTimeStats(Date.now() - ts)
if (result) {
drawDetections(videoEl, $('#overlay').get(0), [result])
}
setTimeout(() => onPlay())
}
async function run() {
// load face detection model
await changeFaceDetector(TINY_FACE_DETECTOR)
changeInputSize(128)
// try to access users webcam and stream the images
// to the video element
const stream = await navigator.mediaDevices.getUserMedia({ video: {} })
const videoEl = $('#inputVideo').get(0)
videoEl.srcObject = stream
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_face_detection')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="js/commons.js"></script>
<script src="js/drawing.js"></script>
<script src="js/faceDetectionControls.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video onplay="onPlay(this)" id="inputVideo" autoplay muted></video>
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<!-- face_detector_selection_control -->
<div id="face_detector_selection_control" class="row input-field" style="margin-right: 20px;">
<select id="selectFaceDetector">
<option value="ssd_mobilenetv1">SSD Mobilenet V1</option>
<option value="tiny_face_detector">Tiny Face Detector</option>
<option value="mtcnn">MTCNN</option>
</select>
<label>Select Face Detector</label>
</div>
<!-- face_detector_selection_control -->
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
<!-- check boxes -->
<!-- fps_meter -->
<div id="fps_meter" class="row side-by-side">
<div>
<label for="time">Time:</label>
<input disabled value="-" id="time" type="text" class="bold">
<label for="fps">Estimated Fps:</label>
<input disabled value="-" id="fps" type="text" class="bold">
</div>
</div>
<!-- fps_meter -->
</div>
<!-- ssd_mobilenetv1_controls -->
<span id="ssd_mobilenetv1_controls">
<div class="row side-by-side">
<div class="row">
<label for="minConfidence">Min Confidence:</label>
<input disabled value="0.5" id="minConfidence" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinConfidence()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinConfidence()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- ssd_mobilenetv1_controls -->
<!-- tiny_face_detector_controls -->
<span id="tiny_face_detector_controls">
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="inputSize">
<option value="" disabled selected>Input Size:</option>
<option value="128">128 x 128</option>
<option value="160">160 x 160</option>
<option value="224">224 x 224</option>
<option value="320">320 x 320</option>
<option value="416">416 x 416</option>
<option value="512">512 x 512</option>
<option value="608">608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseScoreThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseScoreThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- tiny_face_detector_controls -->
<!-- mtcnn_controls -->
<span id="mtcnn_controls">
<div class="row side-by-side">
<div class="row">
<label for="minFaceSize">Minimum Face Size:</label>
<input disabled value="20" id="minFaceSize" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseMinFaceSize()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseMinFaceSize()"
>
<i class="material-icons left">+</i>
</button>
</div>
</span>
<!-- mtcnn_controls -->
</body>
<script>
let forwardTimes = []
let withBoxes = true
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
}
function updateTimeStats(timeInMs) {
forwardTimes = [timeInMs].concat(forwardTimes).slice(0, 30)
const avgTimeInMs = forwardTimes.reduce((total, t) => total + t) / forwardTimes.length
$('#time').val(`${Math.round(avgTimeInMs)} ms`)
$('#fps').val(`${faceapi.round(1000 / avgTimeInMs)}`)
}
async function onPlay() {
const videoEl = $('#inputVideo').get(0)
if(videoEl.paused || videoEl.ended || !isFaceDetectionModelLoaded())
return setTimeout(() => onPlay())
const options = getFaceDetectorOptions()
const ts = Date.now()
const result = await faceapi.detectSingleFace(videoEl, options).withFaceExpressions()
updateTimeStats(Date.now() - ts)
if (result) {
drawExpressions(videoEl, $('#overlay').get(0), [result], withBoxes)
}
setTimeout(() => onPlay())
}
async function run() {
// load face detection and face expression recognition models
await changeFaceDetector(TINY_FACE_DETECTOR)
await faceapi.loadFaceExpressionModel('/')
changeInputSize(224)
// try to access users webcam and stream the images
// to the video element
const stream = await navigator.mediaDevices.getUserMedia({ video: {} })
const videoEl = $('#inputVideo').get(0)
videoEl.srcObject = stream
}
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_face_expression_recognition')
initFaceDetectionControls()
run()
})
</script>
</body>
</html>
\ No newline at end of file
......@@ -37,8 +37,6 @@
<!-- check boxes -->
<div class="row" style="width: 220px;">
<input type="checkbox" id="withFaceLandmarksCheckbox" onchange="onChangeWithFaceLandmarks(event)" />
<label for="withFaceLandmarksCheckbox">Detect Face Landmarks</label>
<input type="checkbox" id="hideBoundingBoxesCheckbox" onchange="onChangeHideBoundingBoxes(event)" />
<label for="hideBoundingBoxesCheckbox">Hide Bounding Boxes</label>
</div>
......@@ -144,13 +142,8 @@
<script>
let forwardTimes = []
let withFaceLandmarks = false
let withBoxes = true
function onChangeWithFaceLandmarks(e) {
withFaceLandmarks = $(e.target).prop('checked')
}
function onChangeHideBoundingBoxes(e) {
withBoxes = !$(e.target).prop('checked')
}
......@@ -173,19 +166,12 @@
const ts = Date.now()
const faceDetectionTask = faceapi.detectSingleFace(videoEl, options)
const result = withFaceLandmarks
? await faceDetectionTask.withFaceLandmarks()
: await faceDetectionTask
const result = await faceapi.detectSingleFace(videoEl, options).withFaceLandmarks()
updateTimeStats(Date.now() - ts)
const drawFunction = withFaceLandmarks
? drawLandmarks
: drawDetections
if (result) {
drawFunction(videoEl, $('#overlay').get(0), [result], withBoxes)
drawLandmarks(videoEl, $('#overlay').get(0), [result], withBoxes)
}
setTimeout(() => onPlay())
......@@ -195,7 +181,7 @@
// load face detection and face landmark models
await changeFaceDetector(TINY_FACE_DETECTOR)
await faceapi.loadFaceLandmarkModel('/')
changeInputSize(128)
changeInputSize(224)
// try to access users webcam and stream the images
// to the video element
......@@ -207,7 +193,7 @@
function updateResults() {}
$(document).ready(function() {
renderNavBar('#navbar', 'webcam_face_tracking')
renderNavBar('#navbar', 'webcam_face_landmark_detection')
initFaceDetectionControls()
run()
})
......
......@@ -11,6 +11,7 @@ async function run() {
faceapi.drawDetection(out, detections)
saveFile('faceDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceDetection.jpg')
}
run()
\ No newline at end of file
import { canvas, faceapi, faceDetectionNet, faceDetectionOptions, saveFile } from './commons';
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
await faceapi.nets.faceExpressionNet.loadFromDisk('../../weights')
const img = await canvas.loadImage('../images/surprised.jpg')
const results = await faceapi.detectAllFaces(img, faceDetectionOptions)
.withFaceExpressions()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection), { withScore: false })
faceapi.drawFaceExpressions(out, results.map(({ detection, expressions }) => ({ position: detection.box, expressions })))
saveFile('faceExpressionRecognition.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceExpressionRecognition.jpg')
}
run()
\ No newline at end of file
......@@ -11,9 +11,10 @@ async function run() {
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.faceLandmarks), { drawLines: true, color: 'red' })
faceapi.drawLandmarks(out, results.map(res => res.landmarks), { drawLines: true, color: 'red' })
saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
console.log('done, saved results to out/faceLandmarkDetection.jpg')
}
run()
\ No newline at end of file
......@@ -38,6 +38,7 @@ async function run() {
const outQuery = faceapi.createCanvasFromMedia(queryImage) as any
faceapi.drawDetection(outQuery, queryBoxesWithText)
saveFile('queryImage.jpg', outQuery.toBuffer('image/jpeg'))
console.log('done, saved results to out/queryImage.jpg')
}
run()
\ No newline at end of file
......@@ -13,4 +13,8 @@ export class FaceDetection extends ObjectDetection implements IFaceDetecion {
) {
super(score, score, '', relativeBox, imageDims)
}
public forSize(width: number, height: number): FaceDetection {
return super.forSize(width, height)
}
}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export interface IFaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68> {
detection: FaceDetection,
landmarks: TFaceLandmarks
}
export class FaceDetectionWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
implements IFaceDetectionWithLandmarks<TFaceLandmarks> {
private _detection: FaceDetection
private _unshiftedLandmarks: TFaceLandmarks
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks
) {
this._detection = detection
this._unshiftedLandmarks = unshiftedLandmarks
}
public get detection(): FaceDetection { return this._detection }
public get unshiftedLandmarks(): TFaceLandmarks { return this._unshiftedLandmarks }
public get alignedRect(): FaceDetection {
const rect = this.landmarks.align()
const { imageDims } = this.detection
return new FaceDetection(this._detection.score, rect.rescale(imageDims.reverse()), imageDims)
}
public get landmarks(): TFaceLandmarks {
const { x, y } = this.detection.box
return this._unshiftedLandmarks.shiftBy(x, y)
}
// aliases for backward compatibily
get faceDetection(): FaceDetection { return this.detection }
get faceLandmarks(): TFaceLandmarks { return this.landmarks }
public forSize(width: number, height: number): FaceDetectionWithLandmarks<TFaceLandmarks> {
const resizedDetection = this._detection.forSize(width, height)
const resizedLandmarks = this._unshiftedLandmarks.forSize<TFaceLandmarks>(resizedDetection.box.width, resizedDetection.box.height)
return new FaceDetectionWithLandmarks<TFaceLandmarks>(resizedDetection, resizedLandmarks)
}
}
\ No newline at end of file
import { FaceDetection } from './FaceDetection';
import { FaceDetectionWithLandmarks, IFaceDetectionWithLandmarks } from './FaceDetectionWithLandmarks';
import { FaceLandmarks } from './FaceLandmarks';
import { FaceLandmarks68 } from './FaceLandmarks68';
export interface IFullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends IFaceDetectionWithLandmarks<TFaceLandmarks> {
detection: FaceDetection,
landmarks: TFaceLandmarks,
descriptor: Float32Array
}
export class FullFaceDescription<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>
extends FaceDetectionWithLandmarks<TFaceLandmarks>
implements IFullFaceDescription<TFaceLandmarks> {
private _descriptor: Float32Array
constructor(
detection: FaceDetection,
unshiftedLandmarks: TFaceLandmarks,
descriptor: Float32Array
) {
super(detection, unshiftedLandmarks)
this._descriptor = descriptor
}
public get descriptor(): Float32Array {
return this._descriptor
}
public forSize(width: number, height: number): FullFaceDescription<TFaceLandmarks> {
const { detection, landmarks } = super.forSize(width, height)
return new FullFaceDescription<TFaceLandmarks>(detection, landmarks, this.descriptor)
}
}
\ No newline at end of file
export * from './FaceDetection';
export * from './FaceDetectionWithLandmarks';
export * from './FaceLandmarks';
export * from './FaceLandmarks5';
export * from './FaceLandmarks68';
export * from './FaceMatch';
export * from './FullFaceDescription';
export * from './LabeledFaceDescriptors';
\ No newline at end of file
import { drawText, env, getContext2dOrThrow, getDefaultDrawOptions, resolveInput, round } from 'tfjs-image-recognition-base';
import { IRect } from 'tfjs-tiny-yolov2';
import { DrawFaceExpressionsInput, DrawFaceExpressionsOptions } from './types';
export function drawFaceExpressions(
canvasArg: string | HTMLCanvasElement,
faceExpressions: DrawFaceExpressionsInput | DrawFaceExpressionsInput[],
options?: DrawFaceExpressionsOptions
) {
const canvas = resolveInput(canvasArg)
if (!(canvas instanceof env.getEnv().Canvas)) {
throw new Error('drawFaceExpressions - expected canvas to be of type: HTMLCanvasElement')
}
const drawOptions = Object.assign(
getDefaultDrawOptions(options),
(options || {})
)
const ctx = getContext2dOrThrow(canvas)
const {
primaryColor = 'red',
secondaryColor = 'blue',
primaryFontSize = 22,
secondaryFontSize = 16,
minConfidence = 0.2
} = drawOptions
const faceExpressionsArray = Array.isArray(faceExpressions)
? faceExpressions
: [faceExpressions]
faceExpressionsArray.forEach(({ position, expressions }) => {
const { x, y } = position
const height = (position as IRect).height || 0
const sorted = expressions.sort((a, b) => b.probability - a.probability)
const resultsToDisplay = sorted.filter(expr => expr.probability > minConfidence)
let offset = (y + height + resultsToDisplay.length * primaryFontSize) > canvas.height
? -(resultsToDisplay.length * primaryFontSize)
: 0
resultsToDisplay.forEach((expr, i) => {
const text = `${expr.expression} (${round(expr.probability)})`
drawText(
ctx,
x,
y + height + (i * primaryFontSize) + offset,
text,
{
textColor: i === 0 ? primaryColor : secondaryColor,
fontSize: i === 0 ? primaryFontSize : secondaryFontSize
}
)
})
})
}
\ No newline at end of file
export * from './drawContour'
export * from './drawLandmarks'
export * from './drawFaceExpressions'
export * from './extractFaces'
export * from './extractFaceTensors'
export * from './types'
\ No newline at end of file
import { IPoint, IRect } from 'tfjs-tiny-yolov2';
import { WithFaceExpressions } from '../factories/WithFaceExpressions';
export type DrawLandmarksOptions = {
lineWidth?: number
color?: string,
color?: string
drawLines?: boolean
}
export type DrawFaceExpressionsOptions = {
primaryColor?: string
secondaryColor?: string
primaryFontSize?: number
secondaryFontSize?: number
minConfidence?: number
}
export type DrawFaceExpressionsInput = WithFaceExpressions<{
position: IPoint | IRect
}>
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
import { FaceExpression, faceExpressionLabels, FaceExpressionPrediction } from './types';
export class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> {
public static getFaceExpressionLabel(faceExpression: string) {
const label = faceExpressionLabels[faceExpression]
if (typeof label !== 'number') {
throw new Error(`getFaceExpressionLabel - no label for faceExpression: ${faceExpression}`)
}
return label
}
public static decodeProbabilites(probabilities: number[] | Float32Array): FaceExpressionPrediction[] {
if (probabilities.length !== 7) {
throw new Error(`decodeProbabilites - expected probabilities.length to be 7, have: ${probabilities.length}`)
}
return (Object.keys(faceExpressionLabels) as FaceExpression[])
.map(expression => ({ expression, probability: probabilities[faceExpressionLabels[expression]] }))
}
constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {
super('FaceExpressionNet', faceFeatureExtractor)
}
public forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D {
return tf.tidy(() => tf.softmax(this.runNet(input)))
}
public async forward(input: TNetInput): Promise<tf.Tensor2D> {
return this.forwardInput(await toNetInput(input))
}
public async predictExpressions(input: TNetInput) {
const netInput = await toNetInput(input)
const out = await this.forwardInput(netInput)
const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async t => {
const data = await t.data()
t.dispose()
return data
}))
out.dispose()
const predictionsByBatch = probabilitesByBatch
.map(propablities => FaceExpressionNet.decodeProbabilites(propablities as Float32Array))
return netInput.isBatchInput
? predictionsByBatch
: predictionsByBatch[0]
}
protected getDefaultModelName(): string {
return 'face_expression_model'
}
protected getClassifierChannelsIn(): number {
return 256
}
protected getClassifierChannelsOut(): number {
return 7
}
}
\ No newline at end of file
export * from './FaceExpressionNet';
export * from './types';
\ No newline at end of file
export const faceExpressionLabels = {
neutral: 0,
happy: 1,
sad: 2,
angry: 3,
fearful: 4,
disgusted: 5,
surprised:6
}
export type FaceExpression = 'neutral' | 'happy' | 'sad' | 'angry' | 'fearful' | 'disgusted' | 'surprised'
export type FaceExpressionPrediction = {
expression: FaceExpression,
probability: number
}
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, normalize, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { denseBlock4 } from './denseBlock';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { FaceFeatureExtractorParams, IFaceFeatureExtractor } from './types';
export class FaceFeatureExtractor extends NeuralNetwork<FaceFeatureExtractorParams> implements IFaceFeatureExtractor<FaceFeatureExtractorParams> {
constructor() {
super('FaceFeatureExtractor')
}
public forwardInput(input: NetInput): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('FaceFeatureExtractor - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock4(normalized, params.dense0, true)
out = denseBlock4(out, params.dense1)
out = denseBlock4(out, params.dense2)
out = denseBlock4(out, params.dense3)
out = tf.avgPool(out, [7, 7], [2, 2], 'valid')
return out
})
}
public async forward(input: TNetInput): Promise<tf.Tensor4D> {
return this.forwardInput(await toNetInput(input))
}
protected getDefaultModelName(): string {
return 'face_feature_extractor_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParams(weights)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, normalize, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { denseBlock3 } from './denseBlock';
import { extractParamsFromWeigthMapTiny } from './extractParamsFromWeigthMapTiny';
import { extractParamsTiny } from './extractParamsTiny';
import { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from './types';
export class TinyFaceFeatureExtractor extends NeuralNetwork<TinyFaceFeatureExtractorParams> implements IFaceFeatureExtractor<TinyFaceFeatureExtractorParams> {
constructor() {
super('TinyFaceFeatureExtractor')
}
public forwardInput(input: NetInput): tf.Tensor4D {
const { params } = this
if (!params) {
throw new Error('TinyFaceFeatureExtractor - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock3(normalized, params.dense0, true)
out = denseBlock3(out, params.dense1)
out = denseBlock3(out, params.dense2)
out = tf.avgPool(out, [14, 14], [2, 2], 'valid')
return out
})
}
public async forward(input: TNetInput): Promise<tf.Tensor4D> {
return this.forwardInput(await toNetInput(input))
}
protected getDefaultModelName(): string {
return 'face_feature_extractor_tiny_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMapTiny(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParamsTiny(weights)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { depthwiseSeparableConv } from './depthwiseSeparableConv';
import { DenseBlock3Params, DenseBlock4Params } from './types';
export function denseBlock3(
x: tf.Tensor4D,
denseBlockParams: DenseBlock3Params,
isFirstLayer: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
})
}
export function denseBlock4(
x: tf.Tensor4D,
denseBlockParams: DenseBlock4Params,
isFirstLayer: boolean = false,
isScaleDown: boolean = true
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D
})
}
import { extractWeightsFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { extractorsFactory } from './extractorsFactory';
import { NetParams } from './types';
import { FaceFeatureExtractorParams } from './types';
export function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {
export function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
......@@ -13,15 +13,13 @@ export function extractParams(weights: Float32Array): { params: NetParams, param
} = extractWeightsFactory(weights)
const {
extractDenseBlock4Params,
extractFCParams
extractDenseBlock4Params
} = extractorsFactory(extractWeights, paramMappings)
const dense0 = extractDenseBlock4Params(3, 32, 'dense0', true)
const dense1 = extractDenseBlock4Params(32, 64, 'dense1')
const dense2 = extractDenseBlock4Params(64, 128, 'dense2')
const dense3 = extractDenseBlock4Params(128, 256, 'dense3')
const fc = extractFCParams(256, 136, 'fc')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
......@@ -29,6 +27,6 @@ export function extractParams(weights: Float32Array): { params: NetParams, param
return {
paramMappings,
params: { dense0, dense1, dense2, dense3, fc }
params: { dense0, dense1, dense2, dense3 }
}
}
\ No newline at end of file
......@@ -2,25 +2,23 @@ import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, ParamMapping } from 'tfjs-image-recognition-base';
import { loadParamsFactory } from './loadParamsFactory';
import { NetParams } from './types';
import { FaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: ParamMapping[] } {
): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock4Params,
extractFcParams
extractDenseBlock4Params
} = loadParamsFactory(weightMap, paramMappings)
const params = {
dense0: extractDenseBlock4Params('dense0', true),
dense1: extractDenseBlock4Params('dense1'),
dense2: extractDenseBlock4Params('dense2'),
dense3: extractDenseBlock4Params('dense3'),
fc: extractFcParams('fc')
dense3: extractDenseBlock4Params('dense3')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
......
......@@ -2,24 +2,22 @@ import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, ParamMapping } from 'tfjs-image-recognition-base';
import { loadParamsFactory } from './loadParamsFactory';
import { TinyNetParams } from './types';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsFromWeigthMapTiny(
weightMap: tf.NamedTensorMap
): { params: TinyNetParams, paramMappings: ParamMapping[] } {
): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractDenseBlock3Params,
extractFcParams
extractDenseBlock3Params
} = loadParamsFactory(weightMap, paramMappings)
const params = {
dense0: extractDenseBlock3Params('dense0', true),
dense1: extractDenseBlock3Params('dense1'),
dense2: extractDenseBlock3Params('dense2'),
fc: extractFcParams('fc')
dense2: extractDenseBlock3Params('dense2')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
......
import { extractWeightsFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { extractorsFactory } from './extractorsFactory';
import { TinyNetParams } from './types';
import { TinyFaceFeatureExtractorParams } from './types';
export function extractParamsTiny(weights: Float32Array): { params: TinyNetParams, paramMappings: ParamMapping[] } {
export function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
......@@ -13,14 +13,12 @@ export function extractParamsTiny(weights: Float32Array): { params: TinyNetParam
} = extractWeightsFactory(weights)
const {
extractDenseBlock3Params,
extractFCParams
extractDenseBlock3Params
} = extractorsFactory(extractWeights, paramMappings)
const dense0 = extractDenseBlock3Params(3, 32, 'dense0', true)
const dense1 = extractDenseBlock3Params(32, 64, 'dense1')
const dense2 = extractDenseBlock3Params(64, 128, 'dense2')
const fc = extractFCParams(128, 136, 'fc')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
......@@ -28,6 +26,6 @@ export function extractParamsTiny(weights: Float32Array): { params: TinyNetParam
return {
paramMappings,
params: { dense0, dense1, dense2, fc }
params: { dense0, dense1, dense2 }
}
}
\ No newline at end of file
......@@ -24,21 +24,6 @@ export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramM
)
}
function extractFCParams(channelsIn: number, channelsOut: number, mappedPrefix: string): FCParams {
const weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut])
const bias = tf.tensor1d(extractWeights(channelsOut))
paramMappings.push(
{ paramPath: `${mappedPrefix}/weights` },
{ paramPath: `${mappedPrefix}/bias` }
)
return {
weights,
bias
}
}
const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings)
function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {
......@@ -62,8 +47,7 @@ export function extractorsFactory(extractWeights: ExtractWeightsFunction, paramM
return {
extractDenseBlock3Params,
extractDenseBlock4Params,
extractFCParams
extractDenseBlock4Params
}
}
\ No newline at end of file
export * from './FaceFeatureExtractor';
export * from './TinyFaceFeatureExtractor';
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { extractWeightEntryFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { ConvParams, FCParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { ConvParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { DenseBlock3Params, DenseBlock4Params } from './types';
......@@ -48,16 +48,8 @@ export function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[])
return { conv0, conv1, conv2, conv3 }
}
function extractFcParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
}
return {
extractDenseBlock3Params,
extractDenseBlock4Params,
extractFcParams
extractDenseBlock4Params
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, FCParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { NetInput, NeuralNetwork, TNetInput } from 'tfjs-image-recognition-base';
import { ConvParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
export type ConvWithBatchNormParams = BatchNormParams & {
filter: tf.Tensor4D
......@@ -17,10 +18,6 @@ export type SeparableConvWithBatchNormParams = {
pointwise: ConvWithBatchNormParams
}
export declare type FCWithBatchNormParams = BatchNormParams & {
weights: tf.Tensor2D
}
export type DenseBlock3Params = {
conv0: SeparableConvParams | ConvParams
conv1: SeparableConvParams
......@@ -31,18 +28,20 @@ export type DenseBlock4Params = DenseBlock3Params & {
conv3: SeparableConvParams
}
export type TinyNetParams = {
export type TinyFaceFeatureExtractorParams = {
dense0: DenseBlock3Params
dense1: DenseBlock3Params
dense2: DenseBlock3Params
fc: FCParams
}
export type NetParams = {
export type FaceFeatureExtractorParams = {
dense0: DenseBlock4Params
dense1: DenseBlock4Params
dense2: DenseBlock4Params
dense3: DenseBlock4Params
fc: FCParams
}
export interface IFaceFeatureExtractor<TNetParams extends TinyFaceFeatureExtractorParams | FaceFeatureExtractorParams> extends NeuralNetwork<TNetParams> {
forwardInput(input: NetInput): tf.Tensor4D
forward(input: TNetInput): Promise<tf.Tensor4D>
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, normalize } from 'tfjs-image-recognition-base';
import { ConvParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { depthwiseSeparableConv } from './depthwiseSeparableConv';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';
import { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceLandmark68NetBase } from './FaceLandmark68NetBase';
import { fullyConnectedLayer } from './fullyConnectedLayer';
import { DenseBlock4Params, NetParams } from './types';
function denseBlock(
x: tf.Tensor4D,
denseBlockParams: DenseBlock4Params,
isFirstLayer: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D
})
}
export class FaceLandmark68Net extends FaceLandmark68NetBase<NetParams> {
constructor() {
super('FaceLandmark68Net')
}
public runNet(input: NetInput): tf.Tensor2D {
const { params } = this
if (!params) {
throw new Error('FaceLandmark68Net - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock(normalized, params.dense0, true)
out = denseBlock(out, params.dense1)
out = denseBlock(out, params.dense2)
out = denseBlock(out, params.dense3)
out = tf.avgPool(out, [7, 7], [2, 2], 'valid')
export class FaceLandmark68Net extends FaceLandmark68NetBase<FaceFeatureExtractorParams> {
return fullyConnectedLayer(out.as2D(out.shape[0], -1), params.fc)
})
constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {
super('FaceLandmark68Net', faceFeatureExtractor)
}
protected getDefaultModelName(): string {
return 'face_landmark_68_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMap(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParams(weights)
protected getClassifierChannelsIn(): number {
return 256
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { IDimensions, isEven, NetInput, NeuralNetwork, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { IDimensions, isEven, NetInput, Point, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';
import { FaceProcessor } from '../faceProcessor/FaceProcessor';
export abstract class FaceLandmark68NetBase<NetParams> extends NeuralNetwork<NetParams> {
// TODO: make super.name protected
private __name: string
constructor(_name: string) {
super(_name)
this.__name = _name
}
public abstract runNet(netInput: NetInput): tf.Tensor2D
export abstract class FaceLandmark68NetBase<
TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams
>
extends FaceProcessor<TExtractorParams> {
public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {
......@@ -103,4 +98,8 @@ export abstract class FaceLandmark68NetBase<NetParams> extends NeuralNetwork<Net
? landmarksForBatch
: landmarksForBatch[0]
}
protected getClassifierChannelsOut(): number {
return 136
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, normalize } from 'tfjs-image-recognition-base';
import { ConvParams, SeparableConvParams } from 'tfjs-tiny-yolov2';
import { TinyFaceFeatureExtractorParams } from 'src/faceFeatureExtractor/types';
import { depthwiseSeparableConv } from './depthwiseSeparableConv';
import { extractParamsTiny } from './extractParamsTiny';
import { TinyFaceFeatureExtractor } from '../faceFeatureExtractor/TinyFaceFeatureExtractor';
import { FaceLandmark68NetBase } from './FaceLandmark68NetBase';
import { fullyConnectedLayer } from './fullyConnectedLayer';
import { extractParamsFromWeigthMapTiny } from './extractParamsFromWeigthMapTiny';
import { DenseBlock3Params, TinyNetParams } from './types';
function denseBlock(
x: tf.Tensor4D,
denseBlockParams: DenseBlock3Params,
isFirstLayer: boolean = false
): tf.Tensor4D {
return tf.tidy(() => {
const out1 = tf.relu(
isFirstLayer
? tf.add(
tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),
denseBlockParams.conv0.bias
)
: depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2])
) as tf.Tensor4D
const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1])
export class FaceLandmark68TinyNet extends FaceLandmark68NetBase<TinyFaceFeatureExtractorParams> {
const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D
const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1])
return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D
})
}
export class FaceLandmark68TinyNet extends FaceLandmark68NetBase<TinyNetParams> {
constructor() {
super('FaceLandmark68TinyNet')
}
public runNet(input: NetInput): tf.Tensor2D {
const { params } = this
if (!params) {
throw new Error('FaceLandmark68TinyNet - load model before inference')
}
return tf.tidy(() => {
const batchTensor = input.toBatchTensor(112, true)
const meanRgb = [122.782, 117.001, 104.298]
const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)) as tf.Tensor4D
let out = denseBlock(normalized, params.dense0, true)
out = denseBlock(out, params.dense1)
out = denseBlock(out, params.dense2)
out = tf.avgPool(out, [14, 14], [2, 2], 'valid')
return fullyConnectedLayer(out.as2D(out.shape[0], -1), params.fc)
})
constructor(faceFeatureExtractor: TinyFaceFeatureExtractor = new TinyFaceFeatureExtractor()) {
super('FaceLandmark68TinyNet', faceFeatureExtractor)
}
protected getDefaultModelName(): string {
return 'face_landmark_68_tiny_model'
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
return extractParamsFromWeigthMapTiny(weightMap)
}
protected extractParams(weights: Float32Array) {
return extractParamsTiny(weights)
protected getClassifierChannelsIn(): number {
return 128
}
}
\ No newline at end of file
......@@ -4,9 +4,3 @@ export * from './FaceLandmark68Net';
export * from './FaceLandmark68TinyNet';
export class FaceLandmarkNet extends FaceLandmark68Net {}
\ No newline at end of file
export function createFaceLandmarkNet(weights: Float32Array) {
const net = new FaceLandmarkNet()
net.extractWeights(weights)
return net
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork } from 'tfjs-image-recognition-base';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import {
FaceFeatureExtractorParams,
IFaceFeatureExtractor,
TinyFaceFeatureExtractorParams,
} from '../faceFeatureExtractor/types';
import { extractParams } from './extractParams';
import { extractParamsFromWeigthMap } from './extractParamsFromWeigthMap';
import { NetParams } from './types';
import { seperateWeightMaps } from './util';
export abstract class FaceProcessor<
TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams
>
extends NeuralNetwork<NetParams> {
protected _faceFeatureExtractor: IFaceFeatureExtractor<TExtractorParams>
constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor<TExtractorParams>) {
super(_name)
this._faceFeatureExtractor = faceFeatureExtractor
}
public get faceFeatureExtractor(): IFaceFeatureExtractor<TExtractorParams> {
return this._faceFeatureExtractor
}
protected abstract getDefaultModelName(): string
protected abstract getClassifierChannelsIn(): number
protected abstract getClassifierChannelsOut(): number
public runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D {
const { params } = this
if (!params) {
throw new Error(`${this._name} - load model before inference`)
}
return tf.tidy(() => {
const bottleneckFeatures = input instanceof NetInput
? this.faceFeatureExtractor.forwardInput(input)
: input
return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0], -1), params.fc)
})
}
public dispose(throwOnRedispose: boolean = true) {
this.faceFeatureExtractor.dispose(throwOnRedispose)
super.dispose(throwOnRedispose)
}
public loadClassifierParams(weights: Float32Array) {
const { params, paramMappings } = this.extractClassifierParams(weights)
this._params = params
this._paramMappings = paramMappings
}
public extractClassifierParams(weights: Float32Array) {
return extractParams(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut())
}
protected extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap) {
const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap)
this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap)
return extractParamsFromWeigthMap(classifierMap)
}
protected extractParams(weights: Float32Array) {
const cIn = this.getClassifierChannelsIn()
const cOut = this.getClassifierChannelsOut()
const classifierWeightSize = (cOut * cIn ) + cOut
const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize)
const classifierWeights = weights.slice(weights.length - classifierWeightSize)
this.faceFeatureExtractor.extractWeights(featureExtractorWeights)
return this.extractClassifierParams(classifierWeights)
}
}
\ No newline at end of file
import { extractWeightsFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { extractFCParamsFactory } from 'tfjs-tiny-yolov2';
import { NetParams } from './types';
export function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const {
extractWeights,
getRemainingWeights
} = extractWeightsFactory(weights)
const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings)
const fc = extractFCParams(channelsIn, channelsOut, 'fc')
if (getRemainingWeights().length !== 0) {
throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`)
}
return {
paramMappings,
params: { fc }
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from 'tfjs-image-recognition-base';
import { FCParams } from 'tfjs-tiny-yolov2';
import { NetParams } from './types';
export function extractParamsFromWeigthMap(
weightMap: tf.NamedTensorMap
): { params: NetParams, paramMappings: ParamMapping[] } {
const paramMappings: ParamMapping[] = []
const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings)
function extractFcParams(prefix: string): FCParams {
const weights = extractWeightEntry<tf.Tensor2D>(`${prefix}/weights`, 2)
const bias = extractWeightEntry<tf.Tensor1D>(`${prefix}/bias`, 1)
return { weights, bias }
}
const params = {
fc: extractFcParams('fc')
}
disposeUnusedWeightTensors(weightMap, paramMappings)
return { params, paramMappings }
}
\ No newline at end of file
export * from './FaceProcessor';
\ No newline at end of file
import { FCParams } from 'tfjs-tiny-yolov2';
export type NetParams = {
fc: FCParams
}
import * as tf from '@tensorflow/tfjs-core';
export function seperateWeightMaps(weightMap: tf.NamedTensorMap) {
const featureExtractorMap: tf.NamedTensorMap = {}
const classifierMap: tf.NamedTensorMap = {}
Object.keys(weightMap).forEach(key => {
const map = key.startsWith('fc') ? classifierMap : featureExtractorMap
map[key] = weightMap[key]
})
return { featureExtractorMap, classifierMap }
}
\ No newline at end of file
export type WithFaceDescriptor<TSource> = TSource & {
descriptor: Float32Array
}
export function extendWithFaceDescriptor<
TSource
> (
sourceObj: TSource,
descriptor: Float32Array
): WithFaceDescriptor<TSource> {
const extension = { descriptor }
return Object.assign({}, sourceObj, extension)
}
import { FaceDetection } from '../classes/FaceDetection';
export type WithFaceDetection<TSource> = TSource & {
detection: FaceDetection
}
export function extendWithFaceDetection<
TSource
> (
sourceObj: TSource,
detection: FaceDetection
): WithFaceDetection<TSource> {
const extension = { detection }
return Object.assign({}, sourceObj, extension)
}
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
export type WithFaceExpressions<TSource> = TSource & {
expressions: FaceExpressionPrediction[]
}
export function extendWithFaceExpressions<
TSource
> (
sourceObj: TSource,
expressions: FaceExpressionPrediction[]
): WithFaceExpressions<TSource> {
const extension = { expressions }
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks } from '../classes/FaceLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { WithFaceDetection } from './WithFaceDetection';
export type WithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
> = TSource & {
landmarks: TFaceLandmarks
unshiftedLandmarks: TFaceLandmarks
alignedRect: FaceDetection
}
export function extendWithFaceLandmarks<
TSource extends WithFaceDetection<{}>,
TFaceLandmarks extends FaceLandmarks = FaceLandmarks68
> (
sourceObj: TSource,
unshiftedLandmarks: TFaceLandmarks
): WithFaceLandmarks<TSource, TFaceLandmarks> {
const { box: shift } = sourceObj.detection
const landmarks = unshiftedLandmarks.shiftBy<TFaceLandmarks>(shift.x, shift.y)
const rect = landmarks.align()
const { imageDims } = sourceObj.detection
const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims)
const extension = {
landmarks,
unshiftedLandmarks,
alignedRect
}
return Object.assign({}, sourceObj, extension)
}
\ No newline at end of file
export * from './WithFaceDescriptor'
export * from './WithFaceDetection'
export * from './WithFaceExpressions'
export * from './WithFaceLandmarks'
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { extractFaces, extractFaceTensors } from '../dom';
import { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask';
import { nets } from './nets';
export class ComputeFaceDescriptorsTaskBase<TReturn, DetectFaceLandmarksReturnType> extends ComposableTask<TReturn> {
export class ComputeFaceDescriptorsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected detectFaceLandmarksTask: ComposableTask<DetectFaceLandmarksReturnType> | Promise<DetectFaceLandmarksReturnType>,
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput
) {
super()
}
}
export class ComputeAllFaceDescriptorsTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription[], FaceDetectionWithLandmarks[]> {
export class ComputeAllFaceDescriptorsTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource>[], TSource[]> {
public async run(): Promise<FullFaceDescription[]> {
public async run(): Promise<WithFaceDescriptor<TSource>[]> {
const facesWithLandmarks = await this.detectFaceLandmarksTask
const parentResults = await this.parentTask
const alignedRects = facesWithLandmarks.map(({ alignedRect }) => alignedRect)
const alignedRects = parentResults.map(({ alignedRect }) => alignedRect)
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, alignedRects)
: await extractFaces(this.input, alignedRects)
const fullFaceDescriptions = await Promise.all(facesWithLandmarks.map(async ({ detection, landmarks }, i) => {
const results = await Promise.all(parentResults.map(async (parentResult, i) => {
const descriptor = await nets.faceRecognitionNet.computeFaceDescriptor(alignedFaces[i]) as Float32Array
return new FullFaceDescription(detection, landmarks, descriptor)
return extendWithFaceDescriptor<TSource>(parentResult, descriptor)
}))
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return fullFaceDescriptions
return results
}
}
export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskBase<FullFaceDescription | undefined, FaceDetectionWithLandmarks | undefined> {
export class ComputeSingleFaceDescriptorTask<
TSource extends WithFaceLandmarks<WithFaceDetection<{}>>
> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource> | undefined, TSource | undefined> {
public async run(): Promise<FullFaceDescription | undefined> {
public async run(): Promise<WithFaceDescriptor<TSource> | undefined> {
const detectionWithLandmarks = await this.detectFaceLandmarksTask
if (!detectionWithLandmarks) {
const parentResult = await this.parentTask
if (!parentResult) {
return
}
const { detection, landmarks, alignedRect } = detectionWithLandmarks
const { alignedRect } = parentResult
const alignedFaces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [alignedRect])
: await extractFaces(this.input, [alignedRect])
......@@ -55,6 +60,6 @@ export class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskB
alignedFaces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FullFaceDescription(detection, landmarks, descriptor)
return extendWithFaceDescriptor(parentResult, descriptor)
}
}
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { TNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { extractFaces, extractFaceTensors } from '../dom';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { ComposableTask } from './ComposableTask';
import { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';
import { nets } from './nets';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> extends ComposableTask<ReturnType> {
export class DetectFaceLandmarksTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected detectFacesTask: ComposableTask<DetectFacesReturnType> | Promise<DetectFacesReturnType>,
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput,
protected useTinyLandmarkNet: boolean
) {
......@@ -27,11 +28,14 @@ export class DetectFaceLandmarksTaskBase<ReturnType, DetectFacesReturnType> exte
}
}
export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks[], FaceDetection[]> {
export class DetectAllFaceLandmarksTask<
TSource extends WithFaceDetection<{}>
> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource>[], TSource[]> {
public async run(): Promise<FaceDetectionWithLandmarks[]> {
public async run(): Promise<WithFaceLandmarks<TSource>[]> {
const detections = await this.detectFacesTask
const parentResults = await this.parentTask
const detections = parentResults.map(res => res.detection)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, detections)
......@@ -43,41 +47,40 @@ export class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase<Face
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return detections.map((detection, i) =>
new FaceDetectionWithLandmarks(detection, faceLandmarksByFace[i])
return parentResults.map((parentResult, i) =>
extendWithFaceLandmarks<TSource>(parentResult, faceLandmarksByFace[i])
)
}
withFaceDescriptors(): ComputeAllFaceDescriptorsTask {
return new ComputeAllFaceDescriptorsTask(this, this.input)
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>> {
return new ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>(this, this.input)
}
}
export class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase<FaceDetectionWithLandmarks | undefined, FaceDetection | undefined> {
export class DetectSingleFaceLandmarksTask<
TSource extends WithFaceDetection<{}>
> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource> | undefined, TSource | undefined> {
public async run(): Promise<FaceDetectionWithLandmarks | undefined> {
public async run(): Promise<WithFaceLandmarks<TSource> | undefined> {
const detection = await this.detectFacesTask
if (!detection) {
const parentResult = await this.parentTask
if (!parentResult) {
return
}
const { detection } = parentResult
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection])
const landmarks = await this.landmarkNet.detectLandmarks(faces[0]) as FaceLandmarks68
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return new FaceDetectionWithLandmarks(
detection,
landmarks
)
return extendWithFaceLandmarks<TSource>(parentResult, landmarks)
}
withFaceDescriptor(): ComputeSingleFaceDescriptorTask {
return new ComputeSingleFaceDescriptorTask(this, this.input)
withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>> {
return new ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>(this, this.input)
}
}
\ No newline at end of file
......@@ -2,6 +2,7 @@ import { TNetInput } from 'tfjs-image-recognition-base';
import { TinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { extendWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';
import { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';
......@@ -9,6 +10,7 @@ import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
import { FaceDetectionOptions } from './types';
import { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionTask } from './PredictFaceExpressionsTask';
export class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
constructor(
......@@ -27,7 +29,7 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
if (options instanceof MtcnnOptions) {
return (await nets.mtcnn.forward(input, options))
.map(result => result.faceDetection)
.map(result => result.detection)
}
const faceDetectionFunction = options instanceof TinyFaceDetectorOptions
......@@ -49,10 +51,27 @@ export class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
return faceDetectionFunction(input)
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask {
return new DetectAllFaceLandmarksTask(this, this.input, useTinyLandmarkNet)
private runAndExtendWithFaceDetections(): Promise<WithFaceDetection<{}>[]> {
return new Promise<WithFaceDetection<{}>[]>(async res => {
const detections = await this.run()
return res(detections.map(detection => extendWithFaceDetection({}, detection)))
})
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectAllFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectAllFaceLandmarksTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetections(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpressions(): PredictAllFaceExpressionsTask<WithFaceDetection<{}>> {
return new PredictAllFaceExpressionsTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetections(),
this.input
)
}
}
export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | undefined> {
......@@ -68,8 +87,26 @@ export class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | un
return faceDetectionWithHighestScore;
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask {
return new DetectSingleFaceLandmarksTask(this, this.input, useTinyLandmarkNet)
private runAndExtendWithFaceDetection(): Promise<WithFaceDetection<{}>> {
return new Promise<WithFaceDetection<{}>>(async res => {
const detection = await this.run()
return res(detection ? extendWithFaceDetection<{}>({}, detection) : undefined)
})
}
withFaceLandmarks(useTinyLandmarkNet: boolean = false): DetectSingleFaceLandmarksTask<WithFaceDetection<{}>> {
return new DetectSingleFaceLandmarksTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(),
this.input,
useTinyLandmarkNet
)
}
withFaceExpressions(): PredictSingleFaceExpressionTask<WithFaceDetection<{}>> {
return new PredictSingleFaceExpressionTask<WithFaceDetection<{}>>(
this.runAndExtendWithFaceDetection(),
this.input
)
}
}
\ No newline at end of file
import { FaceMatch } from '../classes/FaceMatch';
import { FullFaceDescription } from '../classes/FullFaceDescription';
import { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors';
import { euclideanDistance } from '../euclideanDistance';
import { WithFaceDescriptor } from '../factories';
export class FaceMatcher {
......@@ -9,7 +9,7 @@ export class FaceMatcher {
private _distanceThreshold: number
constructor(
inputs: LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>,
inputs: LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array | Array<LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array>,
distanceThreshold: number = 0.6
) {
......@@ -29,15 +29,15 @@ export class FaceMatcher {
return desc
}
if (desc instanceof FullFaceDescription) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor])
}
if (desc instanceof Float32Array) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc])
}
throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | FullFaceDescription | Float32Array | Array<LabeledFaceDescriptors | FullFaceDescription | Float32Array>`)
if (desc.descriptor && desc.descriptor instanceof Float32Array) {
return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor])
}
throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array | Array<LabeledFaceDescriptors | WithFaceDescriptor<any> | Float32Array>`)
})
}
......
import { TNetInput } from 'tfjs-image-recognition-base';
import { tf } from 'tfjs-tiny-yolov2';
import { extractFaces, extractFaceTensors } from '../dom';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';
import { ComposableTask } from './ComposableTask';
import { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';
import { nets } from './nets';
export class PredictFaceExpressionsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
constructor(
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>,
protected input: TNetInput
) {
super()
}
}
export class PredictAllFaceExpressionsTask<
TSource extends WithFaceDetection<{}>
> extends PredictFaceExpressionsTaskBase<WithFaceExpressions<TSource>[], TSource[]> {
public async run(): Promise<WithFaceExpressions<TSource>[]> {
const parentResults = await this.parentTask
const detections = parentResults.map(parentResult => parentResult.detection)
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, detections)
: await extractFaces(this.input, detections)
const faceExpressionsByFace = await Promise.all(faces.map(
face => nets.faceExpressionNet.predictExpressions(face)
)) as FaceExpressionPrediction[][]
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return parentResults.map(
(parentResult, i) => extendWithFaceExpressions<TSource>(parentResult, faceExpressionsByFace[i])
)
}
withFaceLandmarks(): DetectAllFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectAllFaceLandmarksTask(this, this.input, false)
}
}
export class PredictSingleFaceExpressionTask<
TSource extends WithFaceDetection<{}>
> extends PredictFaceExpressionsTaskBase<WithFaceExpressions<TSource> | undefined, TSource | undefined> {
public async run(): Promise<WithFaceExpressions<TSource> | undefined> {
const parentResult = await this.parentTask
if (!parentResult) {
return
}
const { detection } = parentResult
const faces: Array<HTMLCanvasElement | tf.Tensor3D> = this.input instanceof tf.Tensor
? await extractFaceTensors(this.input, [detection])
: await extractFaces(this.input, [detection])
const faceExpressions = await nets.faceExpressionNet.predictExpressions(faces[0]) as FaceExpressionPrediction[]
faces.forEach(f => f instanceof tf.Tensor && f.dispose())
return extendWithFaceExpressions(parentResult, faceExpressions)
}
withFaceLandmarks(): DetectSingleFaceLandmarksTask<WithFaceExpressions<TSource>> {
return new DetectSingleFaceLandmarksTask(this, this.input, false)
}
}
\ No newline at end of file
import { TNetInput } from 'tfjs-image-recognition-base';
import { ITinyYolov2Options, TinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FullFaceDescription } from '../classes';
import { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories';
import { IMtcnnOptions, MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1Options } from '../ssdMobilenetv1';
import { detectAllFaces } from './detectFaces';
......@@ -11,7 +11,7 @@ import { detectAllFaces } from './detectFaces';
export async function allFacesSsdMobilenetv1(
input: TNetInput,
minConfidence?: number
): Promise<FullFaceDescription[]> {
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {}))
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -20,7 +20,7 @@ export async function allFacesSsdMobilenetv1(
export async function allFacesTinyYolov2(
input: TNetInput,
forwardParams: ITinyYolov2Options = {}
): Promise<FullFaceDescription[]> {
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new TinyYolov2Options(forwardParams))
.withFaceLandmarks()
.withFaceDescriptors()
......@@ -29,7 +29,7 @@ export async function allFacesTinyYolov2(
export async function allFacesMtcnn(
input: TNetInput,
forwardParams: IMtcnnOptions = {}
): Promise<FullFaceDescription[]> {
): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]> {
return await detectAllFaces(input, new MtcnnOptions(forwardParams))
.withFaceLandmarks()
.withFaceDescriptors()
......
......@@ -2,12 +2,15 @@ import { TNetInput } from 'tfjs-image-recognition-base';
import { ITinyYolov2Options } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { FaceLandmarks68 } from '../classes/FaceLandmarks68';
import { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';
import { FaceExpressionPrediction } from '../faceExpressionNet/types';
import { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';
import { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';
import { WithFaceDetection } from '../factories/WithFaceDetection';
import { WithFaceLandmarks } from '../factories/WithFaceLandmarks';
import { Mtcnn } from '../mtcnn/Mtcnn';
import { MtcnnOptions } from '../mtcnn/MtcnnOptions';
import { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1';
......@@ -23,7 +26,8 @@ export const nets = {
mtcnn: new Mtcnn(),
faceLandmark68Net: new FaceLandmark68Net(),
faceLandmark68TinyNet: new FaceLandmark68TinyNet(),
faceRecognitionNet: new FaceRecognitionNet()
faceRecognitionNet: new FaceRecognitionNet(),
faceExpressionNet: new FaceExpressionNet()
}
/**
......@@ -64,7 +68,7 @@ export const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promi
* @param options (optional, default: see MtcnnOptions constructor for default parameters).
* @returns Bounding box of each face with score and 5 point face landmarks.
*/
export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> =>
export const mtcnn = (input: TNetInput, options: MtcnnOptions): Promise<WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>[]> =>
nets.mtcnn.forward(input, options)
/**
......@@ -102,6 +106,18 @@ export const detectFaceLandmarksTiny = (input: TNetInput): Promise<FaceLandmarks
export const computeFaceDescriptor = (input: TNetInput): Promise<Float32Array | Float32Array[]> =>
nets.faceRecognitionNet.computeFaceDescriptor(input)
/**
* Recognizes the facial expressions of a face and returns the likelyhood of
* each facial expression.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns An array of facial expressions with corresponding probabilities or array thereof in case of batch input.
*/
export const recognizeFaceExpressions = (input: TNetInput): Promise<FaceExpressionPrediction[] | FaceExpressionPrediction[][]> =>
nets.faceExpressionNet.predictExpressions(input)
export const loadSsdMobilenetv1Model = (url: string) => nets.ssdMobilenetv1.load(url)
export const loadTinyFaceDetectorModel = (url: string) => nets.tinyFaceDetector.load(url)
export const loadMtcnnModel = (url: string) => nets.mtcnn.load(url)
......@@ -109,6 +125,7 @@ export const loadTinyYolov2Model = (url: string) => nets.tinyYolov2.load(url)
export const loadFaceLandmarkModel = (url: string) => nets.faceLandmark68Net.load(url)
export const loadFaceLandmarkTinyModel = (url: string) => nets.faceLandmark68TinyNet.load(url)
export const loadFaceRecognitionModel = (url: string) => nets.faceRecognitionNet.load(url)
export const loadFaceExpressionModel = (url: string) => nets.faceExpressionNet.load(url)
// backward compatibility
export const loadFaceDetectionModel = loadSsdMobilenetv1Model
......
......@@ -8,8 +8,10 @@ export * from 'tfjs-image-recognition-base';
export * from './classes/index';
export * from './dom/index'
export * from './faceExpressionNet/index';
export * from './faceLandmarkNet/index';
export * from './faceRecognitionNet/index';
export * from './factories/index';
export * from './globalApi/index';
export * from './mtcnn/index';
export * from './ssdMobilenetv1/index';
......@@ -17,3 +19,4 @@ export * from './tinyFaceDetector/index';
export * from './tinyYolov2/index';
export * from './euclideanDistance';
export * from './resizeResults';
\ No newline at end of file
......@@ -2,8 +2,8 @@ import * as tf from '@tensorflow/tfjs-core';
import { NetInput, NeuralNetwork, Point, Rect, TNetInput, toNetInput } from 'tfjs-image-recognition-base';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceDetectionWithLandmarks } from '../classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { extendWithFaceDetection, extendWithFaceLandmarks } from '../factories';
import { bgrToRgbTensor } from './bgrToRgbTensor';
import { CELL_SIZE } from './config';
import { extractParams } from './extractParams';
......@@ -14,7 +14,7 @@ import { pyramidDown } from './pyramidDown';
import { stage1 } from './stage1';
import { stage2 } from './stage2';
import { stage3 } from './stage3';
import { NetParams } from './types';
import { MtcnnResult, NetParams } from './types';
export class Mtcnn extends NeuralNetwork<NetParams> {
......@@ -25,7 +25,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardInput(
input: NetInput,
forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> {
): Promise<{ results: MtcnnResult[], stats: any }> {
const { params } = this
......@@ -101,7 +101,9 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
const out3 = await stage3(inputCanvas, out2.boxes, scoreThresholds[2], params.onet, stats)
stats.total_stage3 = Date.now() - ts
const results = out3.boxes.map((box, idx) => new FaceDetectionWithLandmarks<FaceLandmarks5>(
const results = out3.boxes.map((box, idx) => extendWithFaceLandmarks(
extendWithFaceDetection<{}>(
{},
new FaceDetection(
out3.scores[idx],
new Rect(
......@@ -114,6 +116,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
height,
width
}
)
),
new FaceLandmarks5(
out3.points[idx].map(pt => pt.sub(new Point(box.left, box.top)).div(new Point(box.width, box.height))),
......@@ -127,7 +130,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forward(
input: TNetInput,
forwardParams: IMtcnnOptions = {}
): Promise<FaceDetectionWithLandmarks<FaceLandmarks5>[]> {
): Promise<MtcnnResult[]> {
return (
await this.forwardInput(
await toNetInput(input),
......@@ -139,7 +142,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
public async forwardWithStats(
input: TNetInput,
forwardParams: IMtcnnOptions = {}
): Promise<{ results: FaceDetectionWithLandmarks<FaceLandmarks5>[], stats: any }> {
): Promise<{ results: MtcnnResult[], stats: any }> {
return this.forwardInput(
await toNetInput(input),
forwardParams
......
import * as tf from '@tensorflow/tfjs-core';
import { convLayer } from 'tfjs-tiny-yolov2';
import { fullyConnectedLayer } from '../faceLandmarkNet/fullyConnectedLayer';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { prelu } from './prelu';
import { sharedLayer } from './sharedLayers';
import { ONetParams } from './types';
......
import * as tf from '@tensorflow/tfjs-core';
import { fullyConnectedLayer } from '../faceLandmarkNet/fullyConnectedLayer';
import { fullyConnectedLayer } from '../common/fullyConnectedLayer';
import { prelu } from './prelu';
import { sharedLayer } from './sharedLayers';
import { RNetParams } from './types';
......
import * as tf from '@tensorflow/tfjs-core';
import { ConvParams, FCParams } from 'tfjs-tiny-yolov2';
import { FaceDetection } from '../classes/FaceDetection';
import { FaceLandmarks5 } from '../classes/FaceLandmarks5';
import { WithFaceDetection, WithFaceLandmarks } from '../factories';
export type SharedParams = {
conv1: ConvParams
......@@ -40,3 +40,5 @@ export type NetParams = {
rnet: RNetParams
onet: ONetParams
}
export type MtcnnResult = WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>
import { IDimensions } from 'tfjs-image-recognition-base';
import { FaceDetection } from './classes/FaceDetection';
import { FaceLandmarks } from './classes/FaceLandmarks';
import { extendWithFaceDetection } from './factories/WithFaceDetection';
import { extendWithFaceLandmarks } from './factories/WithFaceLandmarks';
export function resizeResults<T>(results: T, { width, height }: IDimensions): T {
if (Array.isArray(results)) {
return results.map(obj => resizeResults(obj, { width, height })) as any as T
}
const hasLandmarks = results['unshiftedLandmarks'] && results['unshiftedLandmarks'] instanceof FaceLandmarks
const hasDetection = results['detection'] && results['detection'] instanceof FaceDetection
if (hasLandmarks) {
const resizedDetection = results['detection'].forSize(width, height)
const resizedLandmarks = results['unshiftedLandmarks'].forSize(resizedDetection.box.width, resizedDetection.box.height)
return extendWithFaceLandmarks(extendWithFaceDetection(results as any, resizedDetection), resizedLandmarks)
}
if (hasDetection) {
return extendWithFaceDetection(results as any, results['detection'].forSize(width, height))
}
if (results instanceof FaceLandmarks || results instanceof FaceDetection) {
return (results as any).forSize(width, height)
}
return results
}
\ No newline at end of file
import { FaceDetectionWithLandmarks } from '../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceLandmarks68 } from '../src/classes/FaceLandmarks68';
import { ExpectedFaceDetectionWithLandmarks, expectPointClose, expectRectClose, sortByFaceDetection } from './utils';
import { WithFaceDetection } from '../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks';
import { ExpectedFaceDetectionWithLandmarks, expectPointsClose, expectRectClose, sortByFaceDetection } from './utils';
export type BoxAndLandmarksDeltas = {
maxScoreDelta: number
......@@ -10,7 +11,7 @@ export type BoxAndLandmarksDeltas = {
}
export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(
results: FaceDetectionWithLandmarks<TFaceLandmarks>[],
results: WithFaceLandmarks<WithFaceDetection<{}>, TFaceLandmarks>[],
allExpectedFullFaceDescriptions: ExpectedFaceDetectionWithLandmarks[],
expectedScores: number[],
deltas: BoxAndLandmarksDeltas
......@@ -29,6 +30,6 @@ export function expectFaceDetectionsWithLandmarks<TFaceLandmarks extends FaceLan
const { detection, landmarks } = sortedResults[i]
expect(Math.abs(detection.score - expected.score)).toBeLessThan(deltas.maxScoreDelta)
expectRectClose(detection.box, expected.detection, deltas.maxBoxDelta)
landmarks.positions.forEach((pt, j) => expectPointClose(pt, expected.landmarks[j], deltas.maxLandmarksDelta))
expectPointsClose(landmarks.positions, expected.landmarks, deltas.maxLandmarksDelta)
})
}
\ No newline at end of file
import { FullFaceDescription } from '../src/classes/FullFaceDescription';
import { euclideanDistance } from '../src/euclideanDistance';
import { WithFaceDescriptor } from '../src/factories/WithFaceDescriptor';
import { WithFaceDetection } from '../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../src/factories/WithFaceLandmarks';
import { BoxAndLandmarksDeltas } from './expectFaceDetectionsWithLandmarks';
import { ExpectedFullFaceDescription, expectPointClose, expectRectClose, sortByFaceDetection } from './utils';
......@@ -8,7 +10,7 @@ export type FullFaceDescriptionDeltas = BoxAndLandmarksDeltas & {
}
export function expectFullFaceDescriptions(
results: FullFaceDescription[],
results: WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[],
allExpectedFullFaceDescriptions: ExpectedFullFaceDescription[],
expectedScores: number[],
deltas: FullFaceDescriptionDeltas
......
import * as tf from '@tensorflow/tfjs-core';
import { createCanvasFromMedia, NetInput, toNetInput } from '../../../src';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { loadImage } from '../../env';
import { describeWithNets, expectAllTensorsReleased } from '../../utils';
describe('faceExpressionNet', () => {
let imgElAngry: HTMLImageElement
let imgElSurprised: HTMLImageElement
beforeAll(async () => {
imgElAngry = await loadImage('test/images/angry_cropped.jpg')
imgElSurprised = await loadImage('test/images/surprised_cropped.jpg')
})
describeWithNets('quantized weights', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions', async () => {
const result = await faceExpressionNet.predictExpressions(imgElAngry) as FaceExpressionPrediction[]
expect(Array.isArray(result)).toBe(true)
expect(result.length).toEqual(7)
const angry = result.find(res => res.expression === 'angry') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('batch inputs', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
it('recognizes facial expressions for batch of image elements', async () => {
const inputs = [imgElAngry, imgElSurprised]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2)
const [resultAngry, resultSurprised] = results
expect(Array.isArray(resultAngry)).toBe(true)
expect(resultAngry.length).toEqual(7)
expect(Array.isArray(resultSurprised)).toBe(true)
expect(resultSurprised.length).toEqual(7)
const angry = resultAngry.find(res => res.expression === 'angry') as FaceExpressionPrediction
const surprised = resultSurprised.find(res => res.expression === 'surprised') as FaceExpressionPrediction
expect(angry).not.toBeUndefined()
expect(angry.probability).toBeGreaterThan(0.95)
expect(surprised).not.toBeUndefined()
expect(surprised.probability).toBeGreaterThan(0.95)
})
})
describeWithNets('no memory leaks', { withFaceExpressionNet: { quantized: true } }, ({ faceExpressionNet }) => {
describe('forwardInput', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([imgElAngry, imgElAngry])
const outTensor = await faceExpressionNet.forwardInput(netInput)
outTensor.dispose()
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
outTensor.dispose()
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
outTensor.dispose()
})
tensors.forEach(t => t.dispose())
})
})
describe('predictExpressions', () => {
it('single image element', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(imgElAngry)
})
})
it('multiple image elements', async () => {
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions([imgElAngry, imgElAngry, imgElAngry])
})
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
})
tensor.dispose()
})
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
})
tensors.forEach(t => t.dispose())
})
})
})
})
import * as tf from '@tensorflow/tfjs-core';
import { FaceFeatureExtractor } from '../../../src/faceFeatureExtractor/FaceFeatureExtractor';
import { FaceLandmark68NetBase } from '../../../src/faceLandmarkNet/FaceLandmark68NetBase';
class FakeFaceLandmark68NetBase extends FaceLandmark68NetBase<any> {
protected getDefaultModelName(): string {
throw new Error('FakeFaceLandmark68NetBase - getDefaultModelName not implemented')
}
protected getClassifierChannelsIn(): number {
throw new Error('FakeFaceLandmark68NetBase - getClassifierChannelsIn not implemented')
}
protected extractParams(_: any): any {
throw new Error('FakeFaceLandmark68NetBase - extractParams not implemented')
}
......@@ -24,7 +30,7 @@ describe('FaceLandmark68NetBase', () => {
describe('postProcess', () => {
const net = new FakeFaceLandmark68NetBase('')
const net = new FakeFaceLandmark68NetBase('', new FaceFeatureExtractor())
describe('single batch', () => {
......
import { Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
describe('extendWithFaceDetection', () => {
it('returns WithFaceDetection', () => {
const withFaceDetection = extendWithFaceDetection({}, detection)
expect(withFaceDetection.detection).toEqual(detection)
})
it('extends source object', () => {
const srcProp = { foo: true }
const withFaceDetection = extendWithFaceDetection({ srcProp }, detection)
expect(withFaceDetection.detection).toEqual(detection)
expect(withFaceDetection.srcProp).toEqual(srcProp)
})
})
import { Point, Rect } from '../../../src';
import { FaceDetection } from '../../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks';
const detection = new FaceDetection(1.0, new Rect(0.5, 0.5, 0.5, 0.5), { width: 100, height: 100 })
const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 })
const makeSrcObjectWithFaceDetection = <T> (srcObject: T) => extendWithFaceDetection(srcObject, detection)
describe('extendWithFaceDetection', () => {
it('returns WithFaceLandmarks', () => {
const srcObj = {}
const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj)
const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks)
expect(withFaceLandmarks.detection).toEqual(detection)
expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks)
expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true)
expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true)
})
it('extends source object', () => {
const srcObj = { srcProp: { foo: true } }
const srcObjWithFaceDetection = makeSrcObjectWithFaceDetection(srcObj)
const withFaceLandmarks = extendWithFaceLandmarks(srcObjWithFaceDetection, unshiftedLandmarks)
expect(withFaceLandmarks.srcProp).toEqual(srcObj.srcProp)
expect(withFaceLandmarks.detection).toEqual(detection)
expect(withFaceLandmarks.unshiftedLandmarks).toEqual(unshiftedLandmarks)
expect(withFaceLandmarks.alignedRect instanceof FaceDetection).toBe(true)
expect(withFaceLandmarks.landmarks instanceof FaceLandmarks68).toBe(true)
})
})
import { IPoint, IRect } from '../../../src';
import { FaceDetectionWithLandmarks } from '../../../src/classes/FaceDetectionWithLandmarks';
import { FaceLandmarks5 } from '../../../src/classes/FaceLandmarks5';
import { WithFaceDetection } from '../../../src/factories/WithFaceDetection';
import { WithFaceLandmarks } from '../../../src/factories/WithFaceLandmarks';
import { BoxAndLandmarksDeltas, expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { sortBoxes, sortByDistanceToOrigin } from '../../utils';
......@@ -14,7 +15,7 @@ export const expectedMtcnnBoxes: IRect[] = sortBoxes([
])
export function expectMtcnnResults(
results: FaceDetectionWithLandmarks<FaceLandmarks5>[],
results: WithFaceLandmarks<WithFaceDetection<{}>, FaceLandmarks5>[],
expectedMtcnnFaceLandmarks: IPoint[][],
expectedScores: number[],
deltas: BoxAndLandmarksDeltas
......
import * as faceapi from '../../../src';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
import { MtcnnOptions } from '../../../src/mtcnn/MtcnnOptions';
import { loadImage } from '../../env';
import { expectFaceDetections } from '../../expectFaceDetections';
......@@ -92,9 +91,10 @@ describe('mtcnn', () => {
maxLandmarksDelta: 6,
maxDescriptorDelta: 0.2
}
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
[result as FullFaceDescription],
result ? [result] : [],
[expectedFullFaceDescriptions[0]],
[expectedScores[0]],
deltas
......
import { Point } from 'tfjs-image-recognition-base';
import { Rect } from '../../src';
import { FaceDetection } from '../../src/classes/FaceDetection';
import { FaceLandmarks68 } from '../../src/classes/FaceLandmarks68';
import { extendWithFaceDetection } from '../../src/factories/WithFaceDetection';
import { extendWithFaceLandmarks } from '../../src/factories/WithFaceLandmarks';
import { resizeResults } from '../../src/resizeResults';
import { expectPointsClose, expectRectClose } from '../utils';
const detection = new FaceDetection(1.0, new Rect(0, 0, 0.5, 0.5), { width: 100, height: 100 })
const unshiftedLandmarks = new FaceLandmarks68(Array(68).fill(0).map((_, i) => new Point(i / 100, i / 100)), { width: 100, height: 100 })
describe('resizeResults', () => {
it('resizes FaceDetection', () => {
const width = 200
const height = 400
const expected = detection.forSize(width, height)
const resized = resizeResults(detection, { width, height })
expect(resized.imageWidth).toEqual(width)
expect(resized.imageHeight).toEqual(height)
expectRectClose(resized.box, expected.box, 0)
})
it('resizes FaceLandmarks', () => {
const width = 200
const height = 400
const expected = unshiftedLandmarks.forSize(width, height)
const resized = resizeResults(unshiftedLandmarks, { width, height })
expect(resized.imageWidth).toEqual(width)
expect(resized.imageHeight).toEqual(height)
expectPointsClose(resized.positions, expected.positions, 0)
})
it('resizes WithFaceDetection', () => {
const width = 200
const height = 400
const expected = detection.forSize(width, height)
const resized = resizeResults(extendWithFaceDetection({}, detection), { width, height })
expect(resized.detection.imageWidth).toEqual(width)
expect(resized.detection.imageHeight).toEqual(height)
expectRectClose(resized.detection.box, expected.box, 0)
})
it('resizes WithFaceLandmarks', () => {
const width = 200
const height = 400
const expectedRect = detection.forSize(width, height)
const expectedLandmarks = unshiftedLandmarks.forSize(expectedRect.box.width, expectedRect.box.height)
const resized = resizeResults(
extendWithFaceLandmarks(
extendWithFaceDetection({}, detection),
unshiftedLandmarks
),
{ width, height }
)
expect(resized.detection.imageWidth).toEqual(width)
expect(resized.detection.imageHeight).toEqual(height)
expectRectClose(resized.detection.box, expectedRect.box, 0)
expect(resized.unshiftedLandmarks.imageWidth).toEqual(expectedRect.box.width)
expect(resized.unshiftedLandmarks.imageHeight).toEqual(expectedRect.box.height)
expectPointsClose(resized.unshiftedLandmarks.positions, expectedLandmarks.positions, 0)
})
})
......@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('ssdMobilenetv1 - node', () => {
......@@ -90,9 +89,9 @@ describe('ssdMobilenetv1 - node', () => {
maxDescriptorDelta: 0.2
}
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
[result as FullFaceDescription],
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
......
......@@ -6,7 +6,6 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedSsdBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('ssdMobilenetv1', () => {
......@@ -89,9 +88,9 @@ describe('ssdMobilenetv1', () => {
maxDescriptorDelta: 0.2
}
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
[result as FullFaceDescription],
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
......
......@@ -7,7 +7,6 @@ import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWit
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import * as tf from '@tensorflow/tfjs-core';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
describe('tinyFaceDetector - node', () => {
......@@ -89,9 +88,10 @@ describe('tinyFaceDetector - node', () => {
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
[result as FullFaceDescription],
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
......
......@@ -6,20 +6,44 @@ import { expectFullFaceDescriptions } from '../../expectFullFaceDescriptions';
import { expectFaceDetectionsWithLandmarks } from '../../expectFaceDetectionsWithLandmarks';
import { expectedTinyFaceDetectorBoxes } from './expectedBoxes';
import { loadImage } from '../../env';
import { FullFaceDescription } from '../../../src/classes/FullFaceDescription';
import { FaceExpressionPrediction } from '../../../src/faceExpressionNet/types';
import { WithFaceExpressions } from '../../../src/factories/WithFaceExpressions';
function expectFaceExpressions(results: WithFaceExpressions<{}>[]) {
results.forEach((result, i) => {
const happy = result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction
const neutral = result.expressions.find(res => res.expression === 'neutral') as FaceExpressionPrediction
const happyProb = i === 4 ? 0 : 0.95
const neutralProb = i === 4 ? 0.4 : 0
expect(happy).not.toBeUndefined()
expect(neutral).not.toBeUndefined()
expect(happy.probability).toBeGreaterThanOrEqual(happyProb)
expect(neutral.probability).toBeGreaterThanOrEqual(neutralProb)
})
}
describe('tinyFaceDetector', () => {
let imgEl: HTMLImageElement
let expectedFullFaceDescriptions: ExpectedFullFaceDescription[]
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
beforeAll(async () => {
imgEl = await loadImage('test/images/faces.jpg')
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true }, () => {
describeWithNets('globalApi', { withAllFacesTinyFaceDetector: true, withFaceExpressionNet: { quantized: true } }, () => {
describe('detectAllFaces', () => {
it('detectAllFaces', async () => {
const options = new TinyFaceDetectorOptions({
......@@ -28,10 +52,8 @@ describe('tinyFaceDetector', () => {
const results = await faceapi.detectAllFaces(imgEl, options)
const maxScoreDelta = 0.05
const maxBoxDelta = 5
expect(results.length).toEqual(6)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, maxScoreDelta, maxBoxDelta)
expectFaceDetections(results, expectedTinyFaceDetectorBoxes, expectedScores, deltas.maxScoreDelta, deltas.maxBoxDelta)
})
it('detectAllFaces.withFaceLandmarks()', async () => {
......@@ -43,15 +65,38 @@ describe('tinyFaceDetector', () => {
.detectAllFaces(imgEl, options)
.withFaceLandmarks()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10
}
expect(results.length).toEqual(6)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceExpressions()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
})
it('detectAllFaces.withFaceExpressions().withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFaceDetectionsWithLandmarks(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
......@@ -62,16 +107,108 @@ describe('tinyFaceDetector', () => {
.withFaceLandmarks()
.withFaceDescriptors()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(results.length).toEqual(6)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
it('detectAllFaces.withFaceLandmarks().withFaceExpressions()withFaceDescriptors()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const results = await faceapi
.detectAllFaces(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptors()
expect(results.length).toEqual(6)
expectFaceExpressions(results)
expectFullFaceDescriptions(results, expectedFullFaceDescriptions, expectedScores, deltas)
})
})
describe('detectSingleFace', () => {
it('detectSingleFace', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
expect(!!result).toBeTruthy()
expectFaceDetections(
result ? [result] : [],
[expectedTinyFaceDetectorBoxes[2]],
[expectedScores[2]],
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
})
it('detectSingleFace.withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceLandmarks()
expect(!!result).toBeTruthy()
expectFaceDetectionsWithLandmarks(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
})
it('detectSingleFace.withFaceExpressions()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
expect(!!result).toBeTruthy()
expectFaceDetections(
result ? [result.detection] : [],
[expectedTinyFaceDetectorBoxes[2]],
[expectedScores[2]],
deltas.maxScoreDelta,
deltas.maxBoxDelta
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
expect(!!result).toBeTruthy()
expectFaceDetectionsWithLandmarks(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('detectSingleFace.withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
......@@ -82,22 +219,42 @@ describe('tinyFaceDetector', () => {
.withFaceLandmarks()
.withFaceDescriptor()
const deltas = {
maxScoreDelta: 0.05,
maxBoxDelta: 5,
maxLandmarksDelta: 10,
maxDescriptorDelta: 0.2
}
expect(result instanceof FullFaceDescription).toBe(true)
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
})
it('detectSingleFace.withFaceExpressions().withFaceLandmarks().withFaceDescriptor()', async () => {
const options = new TinyFaceDetectorOptions({
inputSize: 416
})
const result = await faceapi
.detectSingleFace(imgEl, options)
.withFaceExpressions()
.withFaceLandmarks()
.withFaceDescriptor()
expect(!!result).toBeTruthy()
expectFullFaceDescriptions(
[result as FullFaceDescription],
result ? [result] : [],
[expectedFullFaceDescriptions[2]],
[expectedScores[2]],
deltas
)
result && expect((result.expressions.find(res => res.expression === 'happy') as FaceExpressionPrediction).probability)
.toBeGreaterThanOrEqual(0.95)
})
it('no memory leaks', async () => {
})
describe('no memory leaks', () => {
it('detectAllFaces', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectAllFaces(imgEl, new TinyFaceDetectorOptions())
......@@ -106,6 +263,17 @@ describe('tinyFaceDetector', () => {
})
})
it('detectSingleFace', async () => {
await expectAllTensorsReleased(async () => {
await faceapi
.detectSingleFace(imgEl, new TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceDescriptor()
})
})
})
})
})
\ No newline at end of file
import * as tf from '@tensorflow/tfjs-core';
import { getContext2dOrThrow } from 'tfjs-image-recognition-base';
import * as faceapi from '../src';
import { createCanvasFromMedia, FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceRecognitionNet, IPoint, IRect, Mtcnn, TinyYolov2 } from '../src/';
import { FaceDetection } from '../src/classes/FaceDetection';
import { FaceLandmarks } from '../src/classes/FaceLandmarks';
import { FaceExpressionNet } from '../src/faceExpressionNet/FaceExpressionNet';
import { FaceLandmark68Net } from '../src/faceLandmarkNet/FaceLandmark68Net';
import { FaceLandmark68TinyNet } from '../src/faceLandmarkNet/FaceLandmark68TinyNet';
import { SsdMobilenetv1 } from '../src/ssdMobilenetv1/SsdMobilenetv1';
......@@ -12,7 +12,7 @@ import { TinyFaceDetector } from '../src/tinyFaceDetector/TinyFaceDetector';
import { initNet, loadJson } from './env';
export function expectMaxDelta(val1: number, val2: number, maxDelta: number) {
expect(Math.abs(val1 - val2)).toBeLessThan(maxDelta)
expect(Math.abs(val1 - val2)).toBeLessThanOrEqual(maxDelta)
}
export async function expectAllTensorsReleased(fn: () => any) {
......@@ -30,7 +30,16 @@ export function expectPointClose(
expectedPoint: IPoint,
maxDelta: number
) {
expect(pointDistance(result, expectedPoint)).toBeLessThan(maxDelta)
expect(pointDistance(result, expectedPoint)).toBeLessThanOrEqual(maxDelta)
}
export function expectPointsClose(
results: IPoint[],
expectedPoints: IPoint[],
maxDelta: number
) {
expect(results.length).toEqual(expectedPoints.length)
results.forEach((pt, j) => expectPointClose(pt, expectedPoints[j], maxDelta))
}
export function expectRectClose(
......@@ -104,6 +113,7 @@ export type InjectNetArgs = {
faceLandmark68TinyNet: FaceLandmark68TinyNet
faceRecognitionNet: FaceRecognitionNet
mtcnn: Mtcnn
faceExpressionNet: FaceExpressionNet
tinyYolov2: TinyYolov2
}
......@@ -118,6 +128,7 @@ export type DescribeWithNetsOptions = {
withFaceLandmark68TinyNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions
withMtcnn?: WithNetOptions
withFaceExpressionNet?: WithNetOptions
withTinyYolov2?: WithTinyYolov2Options
}
......@@ -135,6 +146,7 @@ export function describeWithNets(
faceLandmark68TinyNet,
faceRecognitionNet,
mtcnn,
faceExpressionNet,
tinyYolov2
} = faceapi.nets
......@@ -150,6 +162,7 @@ export function describeWithNets(
withFaceLandmark68TinyNet,
withFaceRecognitionNet,
withMtcnn,
withFaceExpressionNet,
withTinyYolov2
} = options
......@@ -195,6 +208,13 @@ export function describeWithNets(
)
}
if (withFaceExpressionNet) {
await initNet<FaceExpressionNet>(
faceExpressionNet,
!!withFaceExpressionNet && !withFaceExpressionNet.quantized && 'face_expression_model.weights'
)
}
if (withTinyYolov2 || withAllFacesTinyYolov2) {
await initNet<TinyYolov2>(
tinyYolov2,
......@@ -202,6 +222,8 @@ export function describeWithNets(
true
)
}
})
afterAll(() => {
......@@ -211,6 +233,7 @@ export function describeWithNets(
mtcnn.isLoaded && mtcnn.dispose()
tinyFaceDetector.isLoaded && tinyFaceDetector.dispose()
tinyYolov2.isLoaded && tinyYolov2.dispose()
faceExpressionNet.isLoaded && faceExpressionNet.dispose()
})
specDefinitions({
......@@ -220,6 +243,7 @@ export function describeWithNets(
faceLandmark68TinyNet,
faceRecognitionNet,
mtcnn,
faceExpressionNet,
tinyYolov2
})
})
......
{
"scripts": {
"start": "node server.js"
},
"author": "justadudewhohacks",
"license": "MIT",
"dependencies": {
"express": "^4.16.3",
"file-saver": "^1.3.8"
}
}
function getQuantizationRange(min, max, qdtype) {
if (qdtype !== 0 && qdtype !== 1) {
throw new Error('qdtype !== 0 && qdtype !== 1: ' + qdtype)
}
const quantMax = qdtype === 0 ? 255 : 65535
const scale = (max - min) / quantMax
let result = { scale, min, max }
if (min <= 0 && 0 <= max) {
const quantizedZeroPoint = (0 - min) / scale
const nudgedZeroPoint = Math.round(quantizedZeroPoint)
result.min = (-nudgedZeroPoint) * scale
result.max = quantMax * scale + result.min
}
return result
}
function quantizeWeights(tensor, qdtype = 0) {
const min = tensor.min().dataSync()[0]
const max = tensor.max().dataSync()[0]
if (min === max) {
return {
scale: 1.0,
min,
qdata: qdtype === 0 ? new Uint8Array(tensor.size) : new Uint16Array(tensor.size)
}
}
const q = getQuantizationRange(min, max, qdtype)
const qdata = tf.round(tf.clipByValue(tensor, q.min, q.max).sub(tf.scalar(q.min)).div(tf.scalar(q.scale))).dataSync()
return {
scale: q.scale,
min: q.min,
qdata: qdtype === 0 ? new Uint8Array(qdata) : new Uint16Array(qdata)
}
}
function dequantizeWeights(qdata, scale, min) {
if (qdata.qdata && qdata.scale && qdata.min) {
return Float32Array.from(qdata.qdata, v => (v * qdata.scale) + qdata.min)
}
return Float32Array.from(qdata, v => (v * scale) + min)
}
const express = require('express')
const path = require('path')
const app = express()
const viewsDir = path.join(__dirname, 'views')
app.use(express.static(viewsDir))
app.use(express.static(path.join(__dirname, './public')))
app.use(express.static(path.join(__dirname, './node_modules/file-saver')))
app.use(express.static(path.join(__dirname, '../../examples/public')))
app.use(express.static(path.join(__dirname, '../../weights')))
app.use(express.static(path.join(__dirname, '../../weights_uncompressed')))
app.use(express.static(path.join(__dirname, '../../dist')))
app.get('/', (req, res) => res.redirect('/quantize_model'))
app.get('/quantize_model', (req, res) => res.sendFile(path.join(viewsDir, 'quantizeModel.html')))
app.listen(3000, () => console.log('Listening on port 3000!'))
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="FileSaver.js"></script>
<script src="quantization.js"></script>
</head>
<body>
<script>
tf = faceapi.tf
const uncompressedWeightsUri = `tiny_face_detector_model.weights`
const net = new faceapi.TinyFaceDetector()
async function load() {
await net.load(new Float32Array(await (await fetch(uncompressedWeightsUri)).arrayBuffer()))
console.log('net loaded')
}
function getNamedTensors() {
return net.getParamList().map(({ path, tensor }) => ({ name: path, tensor }))
}
const modelName = 'tiny_face_detector'
function makeShards(weightArray) {
const maxLength = 4096 * 1024
const shards = []
let shardIdx = 0
for (let i = 0; i < weightArray.length; i++) {
if (!shards[shardIdx]) {
shards[shardIdx] = []
}
shards[shardIdx].push(weightArray[i])
if (shards[shardIdx].length >= maxLength) {
shardIdx += 1
}
}
return shards.map(shardArray => new Uint8Array(shardArray))
}
async function quantizeAndSave() {
const quantizedTensorArrays = []
const weightEntries = []
getNamedTensors().forEach(({ name, tensor, isSkipQuantization }) => {
const weightEntry = {
name,
shape: tensor.shape,
dtype: tensor.dtype
}
if (isSkipQuantization) {
quantizedTensorArrays.push(new Uint8Array(tensor.dataSync().buffer))
weightEntries.push(weightEntry)
return
}
const { scale, min, qdata } = quantizeWeights(tensor)
console.log(name, { scale, min })
const quantizedWeightEntry = {
...weightEntry,
quantization: { dtype: 'uint8', scale, min }
}
quantizedTensorArrays.push(qdata)
weightEntries.push(quantizedWeightEntry)
})
const quantizedWeights = quantizedTensorArrays
.map(typedArray => Array.from(typedArray))
.reduce((flat, arr) => flat.concat(arr))
const shards = makeShards(quantizedWeights)
console.log('num shards: ', shards.length)
const paths = []
shards.forEach((shardData, i) => {
const shardName = `${modelName}_model-shard${i + 1}`
paths.push(shardName)
saveAs(new Blob([shardData]), shardName)
})
const weightManifest = [{
weights: weightEntries,
paths
}]
saveAs(new Blob([JSON.stringify(weightManifest)]), `${modelName}_model-weights_manifest.json`)
}
load()
</script>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@0.12.0"> </script>
<script src="../node_modules/file-saver/FileSaver.js"></script>
</head>
<body>
<div class="row side-by-side">
<button
class="waves-effect waves-light btn"
onclick="save()"
>
Save
</button>
<button
class="waves-effect waves-light btn"
onclick="save(true)"
>
Save Tiny
</button>
</div>
<script>
function toDataArray(tensor) {
return Array.from(tensor.dataSync())
}
function flatten(arrs) {
return arrs.reduce((flat, arr) => flat.concat(arr))
}
function initWeights(initializer, isTiny) {
function initConvWeights(inChannels, outChannels) {
return flatten(
[
// filter
initializer.apply([3, 3, inChannels, outChannels]),
// bias
tf.zeros([outChannels])
]
.map(toDataArray)
)
}
function initSeparableConvWeights(inChannels, outChannels) {
return flatten(
[
// depthwise filter
initializer.apply([3, 3, inChannels, 1]),
// pointwise filter
initializer.apply([1, 1, inChannels, outChannels]),
// bias
tf.zeros([outChannels])
]
.map(toDataArray)
)
}
const separableConvWeights = isTiny
? flatten([
initConvWeights(3, 32),
initSeparableConvWeights(32, 32),
initSeparableConvWeights(32, 32),
initSeparableConvWeights(32, 64),
initSeparableConvWeights(64, 64),
initSeparableConvWeights(64, 64),
initSeparableConvWeights(64, 128),
initSeparableConvWeights(128, 128),
initSeparableConvWeights(128, 128)
])
: flatten([
initConvWeights(3, 32),
initSeparableConvWeights(32, 32),
initSeparableConvWeights(32, 32),
initSeparableConvWeights(32, 32),
initSeparableConvWeights(32, 64),
initSeparableConvWeights(64, 64),
initSeparableConvWeights(64, 64),
initSeparableConvWeights(64, 64),
initSeparableConvWeights(64, 128),
initSeparableConvWeights(128, 128),
initSeparableConvWeights(128, 128),
initSeparableConvWeights(128, 128),
initSeparableConvWeights(128, 256),
initSeparableConvWeights(256, 256),
initSeparableConvWeights(256, 256),
initSeparableConvWeights(256, 256)
])
const fc = flatten(
[
initializer.apply([1, 1, isTiny ? 128 : 256, 136]),
// bias
tf.zeros([136])
]
.map(toDataArray)
)
return new Float32Array(separableConvWeights.concat(fc))
}
function save(isTiny = false) {
const initialWeights = initWeights(
tf.initializers.glorotNormal(),
isTiny
)
saveAs(new Blob([initialWeights]), `initial_glorot.weights`)
}
</script>
</body>
</html>
\ No newline at end of file
const indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB || window.shimIndexedDB
let jsonsDb = null
let jpgsDb = null
const openJsons = indexedDB.open('jsons', 1)
const openJpgs = indexedDB.open('jpgs', 1)
openJsons.onupgradeneeded = function() {
jsonsDb = openJsons.result
jsonsDb.createObjectStore('jsons', { keyPath: 'id' })
}
openJpgs.onupgradeneeded = function() {
jpgsDb = openJpgs.result
jpgsDb.createObjectStore('jpgs', { keyPath: 'id' })
}
openJsons.onsuccess = function() {
console.log('connected to jsons')
jsonsDb = openJsons.result
}
openJpgs.onsuccess = function() {
console.log('connected to jpgs')
jpgsDb = openJpgs.result
}
function putReq(store, obj) {
return new Promise((res, rej) => {
const req = store.put(obj)
req.onsuccess = res
req.onerror = rej
})
}
function getReq(store, id, throwIfNoResult = true) {
return new Promise((res, rej) => {
const req = store.get(id)
req.onsuccess = () => {
if (!req.result && throwIfNoResult) {
return rej(`no result for id: ${id}`)
}
res(req.result)
}
req.onerror = rej
})
}
function existsReq(store, id) {
return getReq(store, id, false)
}
async function getNotFound(store, ids) {
return (await Promise.all(ids.map(async id => ({ id, exists: await existsReq(store, id) }))))
.filter(({ exists }) => !exists)
.map(({ id }) => id)
}
async function getNotFoundPts(ids) {
const store = jsonsDb.transaction('jsons', 'readonly').objectStore('jsons')
return getNotFound(store, ids)
}
async function getNotFoundJpegs(ids) {
const store = jpgsDb.transaction('jpgs', 'readonly').objectStore('jpgs')
return getNotFound(store, ids)
}
async function persistPts(ptsById, overwrite = false) {
const store = jsonsDb.transaction('jsons', 'readwrite').objectStore('jsons')
for (let i = 0; i < ptsById.length; i++) {
const { id, pts } = ptsById[i]
if (!await existsReq(store, id)) {
console.log('persistPts - inserting %s', id)
await putReq(store, { id, pts })
}
}
}
function getPts(ids) {
const store = jsonsDb.transaction('jsons', 'readonly').objectStore('jsons')
return Promise.all(ids.map(id => getReq(store, id)))
}
async function persistJpgs(jpgsById, overwrite = false) {
const store = jpgsDb.transaction('jpgs', 'readwrite').objectStore('jpgs')
for (let i = 0; i < jpgsById.length; i++) {
const { id, blob } = jpgsById[i]
if (!await existsReq(store, id)) {
console.log('persistJpgs - inserting %s', id)
await putReq(store, { id, blob })
}
}
}
function getJpgs(ids) {
const store = jpgsDb.transaction('jpgs', 'readonly').objectStore('jpgs')
return Promise.all(ids.map(id => getReq(store, id)))
}
// https://gist.github.com/tralves/9e5de2bd9f582007a52708d7d4209865
var getTableSize = function(db, dbName){
return new Promise((resolve,reject) => {
if (db == null) {
return reject();
}
var size = 0;
db = event.target.result;
var transaction = db.transaction([dbName])
.objectStore(dbName)
.openCursor();
transaction.onsuccess = function(event){
var cursor = event.target.result;
if(cursor){
var storedObject = cursor.value;
var json = JSON.stringify(storedObject);
size += json.length;
cursor.continue();
}
else{
resolve(size);
}
}.bind(this);
transaction.onerror = function(err){
reject("error in " + dbName + ": " + err);
}
});
};
var getDatabaseSize = function (dbName) {
var request = indexedDB.open(dbName);
var db;
var dbSize = 0;
request.onerror = function(event) {
alert("Why didn't you allow my web app to use IndexedDB?!");
};
request.onsuccess = function(event) {
db = event.target.result;
var tableNames = [ ...db.objectStoreNames ];
(function(tableNames, db) {
var tableSizeGetters = tableNames
.reduce( (acc, tableName) => {
acc.push( getTableSize(db, tableName) );
return acc;
}, []);
Promise.all(tableSizeGetters)
.then(sizes => {
console.log('--------- ' + db.name + ' -------------');
tableNames.forEach( (tableName,i) => {
console.log(" - " + tableName + "\t: " + humanReadableSize(sizes[i]));
});
var total = sizes.reduce(function(acc, val) {
return acc + val;
}, 0);
console.log("TOTAL: " + humanReadableSize(total))
});
})(tableNames, db);
};
};
var humanReadableSize = function (bytes) {
var thresh = 1024;
if(Math.abs(bytes) < thresh) {
return bytes + ' B';
}
var units = ['KB','MB','GB','TB','PB','EB','ZB','YB'];
var u = -1;
do {
bytes /= thresh;
++u;
} while(Math.abs(bytes) >= thresh && u < units.length - 1);
return bytes.toFixed(1)+' '+units[u];
}
getDatabaseSize('jsons')
getDatabaseSize('jpgs')
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment