Commit 99cbd7e5 by vincent

fixed masks and coordinate loss

parent 459067ce
......@@ -2,6 +2,7 @@ async function trainStep(batchCreators) {
await promiseSequential(batchCreators.map((batchCreator, dataIdx) => async () => {
const { batchInput, landmarksBatchTensor } = await batchCreator()
let ts = Date.now()
const cost = optimizer.minimize(() => {
const out = window.trainNet.forwardInput(batchInput.managed())
......@@ -9,7 +10,7 @@ async function trainStep(batchCreators) {
landmarksBatchTensor,
out
)
return loss
return tf.sum(out)
}, true)
ts = Date.now() - ts
......@@ -19,6 +20,7 @@ async function trainStep(batchCreators) {
cost.dispose()
await tf.nextFrame()
console.log(tf.memory())
}))
}
......@@ -63,7 +65,7 @@ function landmarkPositionsToArray(landmarks) {
}
function toFaceLandmarks(landmarks, { naturalWidth, naturalHeight }) {
return new faceapi.FaceLandmarks(
return new faceapi.FaceLandmarks68(
landmarks.map(l => new faceapi.Point(l.x / naturalWidth, l.y / naturalHeight)),
{ width: naturalWidth, height: naturalHeight }
)
......@@ -90,8 +92,11 @@ async function getTrainData() {
(_, i) => landmarksJson[i]
)
return await loadImagesInBatch(allLandmarks.slice(0, 100))
/**
const batch1 = await loadImagesInBatch(allLandmarks.slice(0, 4000))
const batch2 = await loadImagesInBatch(allLandmarks.slice(4000), 4000)
return batch1.concat(batch2)
*/
}
\ No newline at end of file
......@@ -60,7 +60,7 @@ async function init() {
//window.nets.push(await loadNet('retrained/landmarks_v0.weights'))
//window.nets.push(await loadNet('retrained/landmarks_v2.weights'))
window.trainNet = await loadNet('retrained/landmarks_v6.weights')
window.trainNet = await loadNet('/tmp/retrained/landmarks_v9.weights')
window.nets.push(trainNet)
$('#loader').hide()
......
......@@ -61,7 +61,7 @@
await train()
}
async function train(batchSize = 10) {
async function train(batchSize = 1) {
for (let i = 0; i < trainSteps; i++) {
console.log('step', i)
const batchCreators = createBatchCreators(shuffle(window.trainData), batchSize)
......@@ -70,7 +70,7 @@
ts = Date.now() - ts
console.log('step %s done (%s ms)', i, ts)
if (((i + 1) % saveEveryNthIteration) === 0) {
saveWeights(window.trainNet, 'landmark_trained_weights_' + idx + '.weights')
//saveWeights(window.trainNet, 'landmark_trained_weights_' + idx + '.weights')
}
}
}
......
module.exports = function(config) {
config.set({
frameworks: ['jasmine', 'karma-typescript'],
files: [
'tinyYolov2/**/*.ts'
],
preprocessors: {
'**/*.ts': ['karma-typescript']
},
karmaTypescriptConfig: {
tsconfig: './tsconfig.test.json'
},
browsers: ['Chrome'],
browserNoActivityTimeout: 60000,
client: {
jasmine: {
timeoutInterval: 30000
}
}
})
}
{
"scripts": {
"start": "node server.js"
"start": "node server.js",
"test": "karma start"
},
"author": "justadudewhohacks",
"license": "MIT",
......
......@@ -25,6 +25,7 @@ const detectionFilenames = fs.readdirSync(detectionsPath)
app.use(express.static(trainDataPath))
app.get('/detection_filenames', (req, res) => res.status(202).send(detectionFilenames))
app.get('/', (req, res) => res.sendFile(path.join(publicDir, 'train.html')))
app.get('/', (req, res) => res.sendFile(path.join(publicDir, 'overfit.html')))
app.get('/verify', (req, res) => res.sendFile(path.join(publicDir, 'verify.html')))
app.listen(3000, () => console.log('Listening on port 3000!'))
\ No newline at end of file
......@@ -28,3 +28,5 @@ function saveWeights(net, filename = 'train_tmp') {
)
saveAs(new Blob([binaryWeights]), filename)
}
const log = (str, ...args) => console.log(`[${[(new Date()).toTimeString().substr(0, 8)]}] ${str || ''}`, ...args)
// hyper parameters
const objectScale = 1
const noObjectScale = 0.5
const coordScale = 5
const CELL_SIZE = 32
const getNumCells = inputSize => inputSize / CELL_SIZE
const inverseSigmoid = x => Math.log(x / (1 - x))
function getAnchors() {
return window.net.anchors
}
function squaredSumOverMask(lossTensors, mask) {
return tf.tidy(() => tf.sum(tf.square(tf.mul(mask, lossTensors))))
}
function assignBoxesToAnchors(groundTruthBoxes, reshapedImgDims) {
const inputSize = Math.max(reshapedImgDims.width, reshapedImgDims.height)
......@@ -53,6 +54,30 @@ function getGroundTruthMask(groundTruthBoxes, inputSize) {
return mask
}
function getCoordAndScoreMasks(inputSize) {
const numCells = getNumCells(inputSize)
const coordMask = tf.zeros([numCells, numCells, 25])
const scoreMask = tf.zeros([numCells, numCells, 25])
const coordBuf = coordMask.buffer()
const scoreBuf = scoreMask.buffer()
for (let row = 0; row < numCells; row++) {
for (let col = 0; col < numCells; col++) {
for (let anchor = 0; anchor < 5; anchor++) {
const anchorOffset = 5 * anchor
for (let i = 0; i < 4; i++) {
coordBuf.set(1, row, col, anchorOffset + i)
}
scoreBuf.set(1, row, col, anchorOffset + 4)
}
}
}
return { coordMask, scoreMask }
}
function computeBoxAdjustments(groundTruthBoxes, reshapedImgDims) {
const inputSize = Math.max(reshapedImgDims.width, reshapedImgDims.height)
......@@ -66,10 +91,14 @@ function computeBoxAdjustments(groundTruthBoxes, reshapedImgDims) {
const centerX = (left + right) / 2
const centerY = (top + bottom) / 2
const dx = (centerX - (col * CELL_SIZE + (CELL_SIZE / 2))) / inputSize
const dy = (centerY - (row * CELL_SIZE + (CELL_SIZE / 2))) / inputSize
const dw = Math.log(width / getAnchors()[anchor].x)
const dh = Math.log(height / getAnchors()[anchor].y)
const dCenterX = centerX - (col * CELL_SIZE + (CELL_SIZE / 2))
const dCenterY = centerY - (row * CELL_SIZE + (CELL_SIZE / 2))
const dx = inverseSigmoid(dCenterX / inputSize)
const dy = inverseSigmoid(dCenterY / inputSize)
const dw = Math.log((width / CELL_SIZE) / getAnchors()[anchor].x)
const dh = Math.log((height / CELL_SIZE) / getAnchors()[anchor].y)
const anchorOffset = anchor * 5
buf.set(dx, row, col, anchorOffset + 0)
......@@ -83,7 +112,8 @@ function computeBoxAdjustments(groundTruthBoxes, reshapedImgDims) {
function computeIous(predBoxes, groundTruthBoxes, reshapedImgDims) {
const numCells = getNumCells(Math.max(reshapedImgDims.width, reshapedImgDims.height))
const inputSize = Math.max(reshapedImgDims.width, reshapedImgDims.height)
const numCells = getNumCells(inputSize)
const isSameAnchor = p1 => p2 =>
p1.row === p2.row
......@@ -104,9 +134,15 @@ function computeIous(predBoxes, groundTruthBoxes, reshapedImgDims) {
const iou = faceapi.iou(
box.rescale(reshapedImgDims),
predBox.box.rescale(reshapedImgDims)
predBox.box
)
if (window.debug) {
console.log('ground thruth box:', box.rescale(reshapedImgDims))
console.log('predicted box:', predBox.box)
console.log(iou)
}
const anchorOffset = anchor * 5
buf.set(iou, row, col, anchorOffset + 4)
})
......@@ -114,34 +150,114 @@ function computeIous(predBoxes, groundTruthBoxes, reshapedImgDims) {
return ious
}
function computeNoObjectLoss(outTensor) {
return tf.tidy(() => tf.square(tf.sigmoid(outTensor)))
window.computeNoObjectLoss = function(outTensor, mask) {
return tf.tidy(() => {
const lossTensor = tf.sigmoid(outTensor)
return squaredSumOverMask(lossTensor, mask)
})
}
function computeObjectLoss(outTensor, groundTruthBoxes, reshapedImgDims, paddings) {
function computeObjectLoss(outTensor, groundTruthBoxes, reshapedImgDims, paddings, mask) {
return tf.tidy(() => {
const predBoxes = window.net.postProcess(
outTensor,
{ paddings }
)
if (window.debug) {
console.log(predBoxes)
console.log(predBoxes.filter(b => b.score > 0.1))
}
// debug
const numCells = getNumCells(Math.max(reshapedImgDims.width, reshapedImgDims.height))
if (predBoxes.length !== (numCells * numCells * getAnchors().length)) {
console.log(predBoxes.length)
throw new Error('predBoxes.length !== (numCells * numCells * 25)')
}
const isInvalid = num => !num && num !== 0
predBoxes.forEach(({ row, col, anchor }) => {
if ([row, col, anchor].some(isInvalid)) {
console.log(row, col, anchor)
throw new Error('row, col, anchor invalid')
}
})
// debug
const ious = computeIous(
predBoxes,
groundTruthBoxes,
reshapedImgDims
)
return tf.square(tf.sub(ious, tf.sigmoid(outTensor)))
const lossTensor = tf.sub(ious, tf.sigmoid(outTensor))
return squaredSumOverMask(lossTensor, mask)
})
}
function computeCoordLoss(groundTruthBoxes, outTensor, reshapedImgDims) {
function computeCoordLoss(groundTruthBoxes, outTensor, reshapedImgDims, mask, paddings) {
return tf.tidy(() => {
const boxAdjustments = computeBoxAdjustments(
groundTruthBoxes,
reshapedImgDims
)
return tf.square(tf.sub(boxAdjustments, outTensor))
// debug
if (window.debug) {
const indToPos = []
const numCells = outTensor.shape[1]
for (let row = 0; row < numCells; row++) {
for (let col = 0; col < numCells; col++) {
for (let anchor = 0; anchor < 25; anchor++) {
indToPos.push({ row, col, anchor: parseInt(anchor / 5) })
}
}
}
const m = Array.from(mask.dataSync())
const ind = m.map((val, ind) => ({ val, ind })).filter(v => v.val !== 0).map(v => v.ind)
const gt = Array.from(boxAdjustments.dataSync())
const out = Array.from(outTensor.dataSync())
const comp = ind.map(i => (
{
pos: indToPos[i],
gt: gt[i],
out: out[i]
}
))
console.log(comp)
console.log(comp.map(c => `gt: ${c.gt}, out: ${c.out}`))
const printBbox = (which) => {
const { col, row, anchor } = comp[0].pos
console.log(col, row, anchor)
const ctX = ((col + faceapi.sigmoid(comp[0][which])) / numCells) * paddings.x
const ctY = ((row + faceapi.sigmoid(comp[1][which])) / numCells) * paddings.y
const width = ((Math.exp(comp[2][which]) * getAnchors()[anchor].x) / numCells) * paddings.x
const height = ((Math.exp(comp[3][which]) * getAnchors()[anchor].y) / numCells) * paddings.y
const x = (ctX - (width / 2))
const y = (ctY - (height / 2))
console.log(which, x * reshapedImgDims.width, y * reshapedImgDims.height, width * reshapedImgDims.width, height * reshapedImgDims.height)
}
printBbox('out')
printBbox('gt')
}
// debug
const lossTensor = tf.sub(boxAdjustments, outTensor)
return squaredSumOverMask(lossTensor, mask)
})
}
......@@ -160,29 +276,30 @@ function computeLoss(outTensor, groundTruth, reshapedImgDims, paddings) {
reshapedImgDims
)
const mask = getGroundTruthMask(
groundTruthBoxes,
inputSize
)
const inverseMask = tf.tidy(() => tf.sub(tf.scalar(1), mask))
const groundTruthMask = getGroundTruthMask(groundTruthBoxes, inputSize)
const { coordMask, scoreMask } = getCoordAndScoreMasks(inputSize)
const noObjectLossMask = tf.tidy(() => tf.mul(scoreMask, tf.sub(tf.scalar(1), groundTruthMask)))
const objectLossMask = tf.tidy(() => tf.mul(scoreMask, groundTruthMask))
const coordLossMask = tf.tidy(() => tf.mul(coordMask, groundTruthMask))
const noObjectLoss = tf.tidy(() =>
tf.mul(
tf.scalar(noObjectScale),
tf.sum(tf.mul(inverseMask, computeNoObjectLoss(outTensor)))
computeNoObjectLoss(outTensor, noObjectLossMask)
)
)
const objectLoss = tf.tidy(() =>
tf.mul(
tf.scalar(objectScale),
tf.sum(tf.mul(mask, computeObjectLoss(outTensor, groundTruthBoxes, reshapedImgDims, paddings)))
computeObjectLoss(outTensor, groundTruthBoxes, reshapedImgDims, paddings, objectLossMask)
)
)
const coordLoss = tf.tidy(() =>
tf.mul(
tf.scalar(coordScale),
tf.sum(tf.mul(mask, computeCoordLoss(groundTruthBoxes, outTensor, reshapedImgDims)))
computeCoordLoss(groundTruthBoxes, outTensor, reshapedImgDims, coordLossMask, paddings)
)
)
......
import * as _tf from '@tensorflow/tfjs-core';
const faceapi = require('../../../dist/face-api.js')
const tf: typeof _tf = faceapi.tf
require('./loss')
window['faceapi'] = faceapi
window['tf'] = tf
const anchors = [
new faceapi.Point(1.603231, 2.094468),
new faceapi.Point(6.041143, 7.080126),
new faceapi.Point(2.882459, 3.518061),
new faceapi.Point(4.266906, 5.178857),
new faceapi.Point(9.041765, 10.66308)
]
window['net'] = {
getAnchors() {
return anchors
}
}
describe('loss', () => {
describe('computeNoObjectLoss', () => {
const computeNoObjectLoss = window['computeNoObjectLoss']
it('should only compute loss over scores, 1x1 grid', () => tf.tidy(() => {
const outTensor = tf.zeros([1, 1, 1, 25])
const loss = tf.sum(computeNoObjectLoss(outTensor)).dataSync()[0]
expect(loss).toEqual(0.5 * 0.5 * 5)
}))
it('should only compute loss over scores, 13x13 grid', () => tf.tidy(() => {
const outTensor = tf.zeros([1, 13, 13, 25])
const loss = tf.sum(computeNoObjectLoss(outTensor)).dataSync()[0]
expect(loss).toEqual(0.5 * 0.5 * 5 * 13 * 13)
}))
it('should only compute loss over scores, 13x13 grid, batchSize: 10', () => tf.tidy(() => {
const outTensor = tf.zeros([10, 13, 13, 25])
const loss = tf.sum(computeNoObjectLoss(outTensor)).dataSync()[0]
expect(loss).toEqual(0.5 * 0.5 * 5 * 13 * 13 * 10)
}))
})
})
\ No newline at end of file
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<script src="FileSaver.js"></script>
<script src="trainUtils.js"></script>
<script src="train.js"></script>
<script src="loss.js"></script>
</head>
<body>
<script>
tf = faceapi.tf
const weightsUrl = '/tmp/test.weights'
const fromEpoch = 800
// hyper parameters
window.objectScale = 5
window.noObjectScale = 0.5
window.coordScale = 1
window.saveEveryNthIteration = 50
window.trainSteps = 4000
//window.optimizer = tf.train.sgd(0.001)
window.optimizer = tf.train.adam(0.001, 0.9, 0.999, 1e-8)
const numTrainSamples = 1
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function fetchDetectionFilenames() {
return fetch('/detection_filenames').then(res => res.json())
}
async function run() {
const weights = await loadNetWeights(weightsUrl)
window.net = new faceapi.TinyYolov2(true)
window.net.load(weights)
window.net.variable()
window.detectionFilenames = (await fetchDetectionFilenames()).slice(0, numTrainSamples)
window.lossMap = {}
console.log('ready')
}
const trainSizes = [608]
async function train(batchSize = 1) {
for (let i = fromEpoch; i < trainSteps; i++) {
log('step', i)
let ts = Date.now()
const batchCreators = createBatchCreators(shuffle(window.detectionFilenames), batchSize)
for (let s = 0; s < trainSizes.length; s++) {
let ts2 = Date.now()
await trainStep(batchCreators, trainSizes[s])
ts2 = Date.now() - ts2
//log('train for size %s done (%s ms)', trainSizes[s], ts2)
}
ts = Date.now() - ts
log()
log('--------------------')
log()
log('step %s done (%s ms)', i, ts)
const currentLoss = Object.keys(lossMap).map(k => lossMap[k]).reduce((sum, l) => sum + l)
if (window.prevLoss) {
log('prevLoss:', window.prevLoss)
log('currentLoss:', currentLoss)
log('loss change:', currentLoss - window.prevLoss)
}
log()
log('--------------------')
log()
window.prevLoss = currentLoss
if (((i + 1) % saveEveryNthIteration) === 0) {
saveWeights(window.net, 'adam_511_n1_' + i + '.weights')
}
}
}
run()
</script>
</body>
</html>
\ No newline at end of file
......@@ -17,10 +17,17 @@
<script>
tf = faceapi.tf
const weightsUrl = '/tmp/initial_tiny_yolov2_glorot_normal.weights'
const weightsUrl = '/tmp/.weights'
// hyper parameters
window.objectScale = 5
window.noObjectScale = 0.5
window.coordScale = 1
window.saveEveryNthIteration = 1
window.trainSteps = 100
//window.optimizer = tf.train.sgd(learningRate)
window.optimizer = tf.train.adam(0.001, 0.9, 0.999, 1e-8)
function lossFunction(labels, out) {
......
......@@ -3,10 +3,9 @@ async function trainStep(batchCreators, inputSize) {
await promiseSequential(batchCreators.map((batchCreator, dataIdx) => async () => {
// TODO: skip if groundTruthBoxes are too tiny
const { imgs, groundTruthBoxes } = await batchCreator()
const { imgs, groundTruthBoxes, filenames } = await batchCreator()
const batchInput = (await faceapi.toNetInput(imgs)).managed()
let ts = Date.now()
const loss = optimizer.minimize(() => {
// TBD: batch loss
......@@ -27,21 +26,32 @@ async function trainStep(batchCreators, inputSize) {
)
console.log('ground truth boxes:', groundTruthBoxes[batchIdx].length)
console.log(`noObjectLoss[${dataIdx}]: ${noObjectLoss.dataSync()}`)
console.log(`objectLoss[${dataIdx}]: ${objectLoss.dataSync()}`)
console.log(`coordLoss[${dataIdx}]: ${coordLoss.dataSync()}`)
console.log(`totalLoss[${dataIdx}]: ${totalLoss.dataSync()}`)
const total = totalLoss.dataSync()[0]
if (window.logTrainSteps) {
log(`ground truth boxes: ${groundTruthBoxes[batchIdx].length}`)
log(`noObjectLoss[${dataIdx}]: ${noObjectLoss.dataSync()}`)
log(`objectLoss[${dataIdx}]: ${objectLoss.dataSync()}`)
log(`coordLoss[${dataIdx}]: ${coordLoss.dataSync()}`)
log(`totalLoss[${dataIdx}]: ${total}`)
if (window.lossMap[filenames]) {
log(`loss change: ${total - window.lossMap[filenames]}`)
}
}
window.lossMap[filenames] = total
return totalLoss
}, true)
ts = Date.now() - ts
console.log(`trainStep time for dataIdx ${dataIdx} (${inputSize}): ${ts} ms (${ts / batchInput.batchSize} ms / batch element)`)
if (window.logTrainSteps) {
log(`trainStep time for dataIdx ${dataIdx} (${inputSize}): ${ts} ms (${ts / batchInput.batchSize} ms / batch element)`)
}
loss.dispose()
await tf.nextFrame()
//console.log(tf.memory())
}))
}
......@@ -61,18 +71,19 @@ function createBatchCreators(detectionFilenames, batchSize) {
pushToBatch(detectionFilenames)
const batchCreators = batches.map(filenameForBatch => async () => {
const groundTruthBoxes = await Promise.all(filenameForBatch.map(
const batchCreators = batches.map(filenamesForBatch => async () => {
const groundTruthBoxes = await Promise.all(filenamesForBatch.map(
file => fetch(file).then(res => res.json())
))
const imgs = await Promise.all(filenameForBatch.map(
const imgs = await Promise.all(filenamesForBatch.map(
async file => await faceapi.bufferToImage(await fetchImage(file.replace('.json', '')))
))
return {
imgs,
groundTruthBoxes
groundTruthBoxes,
filenames: filenamesForBatch
}
})
......
<!DOCTYPE html>
<html>
<head>
<script src="face-api.js"></script>
<script src="commons.js"></script>
<link rel="stylesheet" href="styles.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css">
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"></script>
</head>
<body>
<div id="navbar"></div>
<div class="center-content page-container">
<div class="progress" id="loader">
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<img id="inputImg" src="" style="max-width: 800px;" />
<canvas id="overlay" />
</div>
<div class="row side-by-side">
<div id="selectList"></div>
<div class="row">
<label for="imgUrlInput">Get image from URL:</label>
<input id="imgUrlInput" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="loadImageFromUrl()"
>
Ok
</button>
</div>
<div class="row side-by-side">
<div class="row input-field" style="margin-right: 20px;">
<select id="sizeType">
<option value="" disabled selected>Input Size:</option>
<option value="xs">XS: 224 x 224</option>
<option value="sm">SM: 320 x 320</option>
<option value="md">MD: 416 x 416</option>
<option value="lg">LG: 608 x 608</option>
</select>
<label>Input Size</label>
</div>
<div class="row">
<label for="scoreThreshold">Score Threshold:</label>
<input disabled value="0.5" id="scoreThreshold" type="text" class="bold">
</div>
<button
class="waves-effect waves-light btn"
onclick="onDecreaseThreshold()"
>
<i class="material-icons left">-</i>
</button>
<button
class="waves-effect waves-light btn"
onclick="onIncreaseThreshold()"
>
<i class="material-icons left">+</i>
</button>
</div>
<div class="row">
<label for="imgByNr">Enter image NR: </label>
<input id="imgByNr" type="text" class="bold">
</div>
</div>
<script>
let scoreThreshold = 0.5
let sizeType = 'sm'
function onKeyDown(e) {
e.target.value = (
parseInt(e.target.value) + (e.keyCode === 38 ? 1 : (e.keyCode === 40 ? -1 : 0))
) || e.target.value || 0
const imgUri = window.imgs[e.target.value]
console.log(imgUri)
onSelectionChanged(imgUri)
}
function onIncreaseThreshold() {
scoreThreshold = Math.min(faceapi.round(scoreThreshold + 0.1), 1.0)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onDecreaseThreshold() {
scoreThreshold = Math.max(faceapi.round(scoreThreshold - 0.1), 0.1)
$('#scoreThreshold').val(scoreThreshold)
updateResults()
}
function onSizeTypeChanged(e, c) {
sizeType = e.target.value
$('#sizeType').val(sizeType)
updateResults()
}
async function loadImageFromUrl(url) {
const img = await requestExternalImage($('#imgUrlInput').val())
$('#inputImg').get(0).src = img.src
updateResults()
}
async function updateResults() {
const inputImgEl = $('#inputImg').get(0)
const { width, height } = inputImgEl
const canvas = $('#overlay').get(0)
canvas.width = width
canvas.height = height
const forwardParams = {
inputSize: sizeType,
scoreThreshold
}
const detections = await window.net.locateFaces(inputImgEl, forwardParams)
faceapi.drawDetection('overlay', detections.map(det => det.forSize(width, height)))
}
async function onSelectionChanged(uri) {
const imgBuf = await fetchImage(uri)
$(`#inputImg`).get(0).src = (await faceapi.bufferToImage(imgBuf)).src
updateResults()
}
async function loadNetWeights(uri) {
return new Float32Array(await (await fetch(uri)).arrayBuffer())
}
async function fetchDetectionFilenames() {
return fetch('/detection_filenames').then(res => res.json())
}
async function run() {
$('#imgByNr').keydown(onKeyDown)
const weights = await loadNetWeights('/tmp/test.weights')
window.net = new faceapi.TinyYolov2(true)
await window.net.load(weights)
window.imgs = (await fetchDetectionFilenames()).slice(0, 100).map(f => f.replace('.json', ''))
$('#loader').hide()
onSelectionChanged($('#selectList select').val())
}
$(document).ready(function() {
renderNavBar('#navbar', 'tiny_yolov2_face_detection')
renderImageSelectList(
'#selectList',
async (uri) => {
await onSelectionChanged(uri)
},
'bbt1.jpg'
)
const sizeTypeSelect = $('#sizeType')
sizeTypeSelect.val(sizeType)
sizeTypeSelect.on('change', onSizeTypeChanged)
sizeTypeSelect.material_select()
run()
})
</script>
</body>
</html>
\ No newline at end of file
{
"extends": "../../tsconfig.json",
"include": [
"tinyYolov2"
]
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment