Commit 958ca95f by vincent

added some more nodejs examples + move all examples into examples directory

parent d31c4a6e
......@@ -6,3 +6,4 @@ proto
weights_uncompressed
weights_unused
docs
out
\ No newline at end of file
......@@ -2,7 +2,6 @@ node_modules
.rpt2_cache
examples
examples-nodejs
proto
weights
weights_uncompressed
......
import '@tensorflow/tfjs-node';
import * as canvas from 'canvas';
import * as fs from 'fs';
import * as faceapi from '../src';
const { Canvas, Image } = canvas
faceapi.env.monkeyPatch({ Canvas, Image })
async function run() {
await faceapi.nets.ssdMobilenetv1.loadFromDisk('../weights')
const img = await canvas.loadImage('./bbt1.jpg')
const detections = await faceapi.detectAllFaces(img)
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, detections)
fs.writeFileSync('./faceDetection.jpg', out.toBuffer('image/jpeg'))
console.log(detections)
}
run()
\ No newline at end of file
import '@tensorflow/tfjs-node';
import * as canvas from 'canvas';
import * as fs from 'fs';
import * as faceapi from '../src';
const { Canvas, Image } = canvas
faceapi.env.monkeyPatch({ Canvas, Image })
async function run() {
await faceapi.nets.ssdMobilenetv1.loadFromDisk('../weights')
await faceapi.nets.faceLandmark68Net.loadFromDisk('../weights')
const img = await canvas.loadImage('./bbt1.jpg')
const results = await faceapi.detectAllFaces(img).withFaceLandmarks()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.faceLandmarks), { drawLines: true, color: 'red' })
fs.writeFileSync('./faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
}
run()
\ No newline at end of file
const classes = ['amy', 'bernadette', 'howard', 'leonard', 'penny', 'raj', 'sheldon', 'stuart']
function getFaceImageUri(className, idx) {
return `images/${className}/${className}${idx}.png`
return `${className}/${className}${idx}.png`
}
function renderFaceImageSelectList(selectListId, onChange, initialValue) {
......
function getImageUri(imageName) {
return `images/${imageName}`
}
async function requestExternalImage(imageUrl) {
const res = await fetch('fetch_external_image', {
method: 'post',
......
......@@ -17,7 +17,7 @@ function renderImageSelectList(selectListId, onChange, initialValue) {
renderOption(
select,
imageName,
getImageUri(imageName)
imageName
)
)
}
......@@ -25,7 +25,7 @@ function renderImageSelectList(selectListId, onChange, initialValue) {
renderSelectList(
selectListId,
onChange,
getImageUri(initialValue),
initialValue,
renderChildren
)
}
......
......@@ -10,10 +10,10 @@ app.use(express.urlencoded({ extended: true }))
const viewsDir = path.join(__dirname, 'views')
app.use(express.static(viewsDir))
app.use(express.static(path.join(__dirname, './public')))
app.use(express.static(path.join(__dirname, '../weights')))
app.use(express.static(path.join(__dirname, '../weights_uncompressed')))
app.use(express.static(path.join(__dirname, '../dist')))
app.use(express.static(path.join(__dirname, './node_modules/axios/dist')))
app.use(express.static(path.join(__dirname, '../images')))
app.use(express.static(path.join(__dirname, '../media')))
app.use(express.static(path.join(__dirname, '../../weights')))
app.use(express.static(path.join(__dirname, '../../dist')))
app.get('/', (req, res) => res.redirect('/face_and_landmark_detection'))
app.get('/face_and_landmark_detection', (req, res) => res.sendFile(path.join(viewsDir, 'faceAndLandmarkDetection.html')))
......
......@@ -18,7 +18,7 @@
<div class="indeterminate"></div>
</div>
<div style="position: relative" class="margin">
<video src="media/bbt.mp4" id="inputVideo" autoplay muted loop></video>
<video src="bbt.mp4" id="inputVideo" autoplay muted loop></video>
<canvas id="overlay" />
</div>
......
// import nodejs bindings to native tensorflow,
// not required, but will speed up things drastically (python required)
import '@tensorflow/tfjs-node';
// implements nodejs wrappers for HTMLCanvasElement, HTMLImageElement, ImageData
import * as canvas from 'canvas';
import * as faceapi from '../../../src';
// patch nodejs environment, we need to provide an implementation of
// HTMLCanvasElement and HTMLImageElement, additionally an implementation
// of ImageData is required, in case you want to use the MTCNN
const { Canvas, Image, ImageData } = canvas
faceapi.env.monkeyPatch({ Canvas, Image, ImageData })
export { canvas, faceapi }
\ No newline at end of file
import { NeuralNetwork } from 'tfjs-image-recognition-base';
import { faceapi } from './env';
export const faceDetectionNet = faceapi.nets.ssdMobilenetv1
// export const faceDetectionNet = tinyFaceDetector
// export const faceDetectionNet = mtcnn
// SsdMobilenetv1Options
const minConfidence = 0.5
// TinyFaceDetectorOptions
const inputSize = 408
const scoreThreshold = 0.5
// MtcnnOptions
const minFaceSize = 50
const scaleFactor = 0.8
function getFaceDetectorOptions(net: NeuralNetwork<any>) {
return net === faceapi.nets.ssdMobilenetv1
? new faceapi.SsdMobilenetv1Options({ minConfidence })
: (net === faceapi.nets.tinyFaceDetector
? new faceapi.TinyFaceDetectorOptions({ inputSize, scoreThreshold })
: new faceapi.MtcnnOptions({ minFaceSize, scaleFactor })
)
}
export const faceDetectionOptions = getFaceDetectorOptions(faceDetectionNet)
\ No newline at end of file
export { canvas, faceapi } from './env';
export { faceDetectionNet, faceDetectionOptions } from './faceDetection';
export { saveFile } from './saveFile';
\ No newline at end of file
import * as fs from 'fs';
import * as path from 'path';
const baseDir = path.resolve(__dirname, '../out')
export function saveFile(fileName: string, buf: Buffer) {
if (!fs.existsSync(baseDir)) {
fs.mkdirSync(baseDir)
}
fs.writeFileSync(path.resolve(baseDir, fileName), buf)
}
\ No newline at end of file
import { canvas, faceapi, faceDetectionNet, faceDetectionOptions, saveFile } from './commons';
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
const img = await canvas.loadImage('../images/bbt1.jpg')
const detections = await faceapi.detectAllFaces(img, faceDetectionOptions)
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, detections)
saveFile('faceDetection.jpg', out.toBuffer('image/jpeg'))
}
run()
\ No newline at end of file
import { canvas, faceapi, faceDetectionNet, faceDetectionOptions, saveFile } from './commons';
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights')
const img = await canvas.loadImage('../images/bbt1.jpg')
const results = await faceapi.detectAllFaces(img, faceDetectionOptions)
.withFaceLandmarks()
const out = faceapi.createCanvasFromMedia(img) as any
faceapi.drawDetection(out, results.map(res => res.detection))
faceapi.drawLandmarks(out, results.map(res => res.faceLandmarks), { drawLines: true, color: 'red' })
saveFile('faceLandmarkDetection.jpg', out.toBuffer('image/jpeg'))
}
run()
\ No newline at end of file
import { canvas, faceapi, faceDetectionNet, faceDetectionOptions, saveFile } from './commons';
const REFERENCE_IMAGE = '../images/bbt1.jpg'
const QUERY_IMAGE = '../images/bbt4.jpg'
async function run() {
await faceDetectionNet.loadFromDisk('../../weights')
await faceapi.nets.faceLandmark68Net.loadFromDisk('../../weights')
await faceapi.nets.faceRecognitionNet.loadFromDisk('../../weights')
const referenceImage = await canvas.loadImage(REFERENCE_IMAGE)
const queryImage = await canvas.loadImage(QUERY_IMAGE)
const resultsRef = await faceapi.detectAllFaces(referenceImage, faceDetectionOptions)
.withFaceLandmarks()
.withFaceDescriptors()
const resultsQuery = await faceapi.detectAllFaces(queryImage, faceDetectionOptions)
.withFaceLandmarks()
.withFaceDescriptors()
const faceMatcher = new faceapi.FaceMatcher(resultsRef)
const labels = faceMatcher.labeledDescriptors
.map(ld => ld.label)
const refBoxesWithText = resultsRef
.map(res => res.detection.box)
.map((box, i) => new faceapi.BoxWithText(box, labels[i]))
const outRef = faceapi.createCanvasFromMedia(referenceImage) as any
faceapi.drawDetection(outRef, refBoxesWithText)
saveFile('referenceImage.jpg', outRef.toBuffer('image/jpeg'))
const queryBoxesWithText = resultsQuery.map(res => {
const bestMatch = faceMatcher.findBestMatch(res.descriptor)
return new faceapi.BoxWithText(res.detection.box, bestMatch.toString())
})
const outQuery = faceapi.createCanvasFromMedia(queryImage) as any
faceapi.drawDetection(outQuery, queryBoxesWithText)
saveFile('queryImage.jpg', outQuery.toBuffer('image/jpeg'))
}
run()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment