Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
F
face
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Иван Кубота
face
Commits
9a54b06c
Commit
9a54b06c
authored
Apr 29, 2019
by
vincent
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixed examples
parent
df5ac5da
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
102 additions
and
122 deletions
+102
-122
drawing.js
examples/examples-browser/public/js/drawing.js
+0
-43
batchFaceLandmarks.html
examples/examples-browser/views/batchFaceLandmarks.html
+1
-1
batchFaceRecognition.html
examples/examples-browser/views/batchFaceRecognition.html
+4
-7
bbtFaceLandmarkDetection.html
...ples/examples-browser/views/bbtFaceLandmarkDetection.html
+2
-2
bbtFaceRecognition.html
examples/examples-browser/views/bbtFaceRecognition.html
+11
-10
faceDetection.html
examples/examples-browser/views/faceDetection.html
+3
-2
faceExpressionRecognition.html
...les/examples-browser/views/faceExpressionRecognition.html
+8
-2
faceExtraction.html
examples/examples-browser/views/faceExtraction.html
+1
-3
faceLandmarkDetection.html
examples/examples-browser/views/faceLandmarkDetection.html
+8
-2
faceRecognition.html
examples/examples-browser/views/faceRecognition.html
+21
-21
videoFaceTracking.html
examples/examples-browser/views/videoFaceTracking.html
+15
-9
webcamFaceDetection.html
examples/examples-browser/views/webcamFaceDetection.html
+3
-2
webcamFaceExpressionRecognition.html
...amples-browser/views/webcamFaceExpressionRecognition.html
+9
-2
webcamFaceLandmarkDetection.html
...s/examples-browser/views/webcamFaceLandmarkDetection.html
+0
-1
faceDetection.ts
examples/examples-nodejs/faceDetection.ts
+1
-1
faceExpressionRecognition.ts
examples/examples-nodejs/faceExpressionRecognition.ts
+2
-2
faceLandmarkDetection.ts
examples/examples-nodejs/faceLandmarkDetection.ts
+2
-2
faceRecognition.ts
examples/examples-nodejs/faceRecognition.ts
+11
-10
No files found.
examples/examples-browser/public/js/drawing.js
deleted
100644 → 0
View file @
df5ac5da
function
resizeCanvasAndResults
(
dimensions
,
canvas
,
results
)
{
const
{
width
,
height
}
=
dimensions
instanceof
HTMLVideoElement
?
faceapi
.
getMediaDimensions
(
dimensions
)
:
dimensions
canvas
.
width
=
width
canvas
.
height
=
height
// resize detections (and landmarks) in case displayed image is smaller than
// original size
return
faceapi
.
resizeResults
(
results
,
{
width
,
height
})
}
function
drawDetections
(
dimensions
,
canvas
,
detections
)
{
const
resizedDetections
=
resizeCanvasAndResults
(
dimensions
,
canvas
,
detections
)
faceapi
.
drawDetection
(
canvas
,
resizedDetections
)
}
function
drawLandmarks
(
dimensions
,
canvas
,
results
,
withBoxes
=
true
)
{
const
resizedResults
=
resizeCanvasAndResults
(
dimensions
,
canvas
,
results
)
if
(
withBoxes
)
{
faceapi
.
drawDetection
(
canvas
,
resizedResults
.
map
(
det
=>
det
.
detection
))
}
const
faceLandmarks
=
resizedResults
.
map
(
det
=>
det
.
landmarks
)
const
drawLandmarksOptions
=
{
lineWidth
:
2
,
drawLines
:
true
,
color
:
'green'
}
faceapi
.
drawLandmarks
(
canvas
,
faceLandmarks
,
drawLandmarksOptions
)
}
function
drawExpressions
(
dimensions
,
canvas
,
results
,
thresh
,
withBoxes
=
true
)
{
const
resizedResults
=
resizeCanvasAndResults
(
dimensions
,
canvas
,
results
)
if
(
withBoxes
)
{
faceapi
.
drawDetection
(
canvas
,
resizedResults
.
map
(
det
=>
det
.
detection
),
{
withScore
:
false
})
}
faceapi
.
drawFaceExpressions
(
canvas
,
resizedResults
.
map
(({
detection
,
expressions
})
=>
({
position
:
detection
.
box
,
expressions
})))
}
\ No newline at end of file
examples/examples-browser/views/batchFaceLandmarks.html
View file @
9a54b06c
...
@@ -65,7 +65,7 @@
...
@@ -65,7 +65,7 @@
function
drawLandmarkCanvas
(
img
,
landmarks
)
{
function
drawLandmarkCanvas
(
img
,
landmarks
)
{
const
canvas
=
faceapi
.
createCanvasFromMedia
(
img
)
const
canvas
=
faceapi
.
createCanvasFromMedia
(
img
)
$
(
'#faceContainer'
).
append
(
canvas
)
$
(
'#faceContainer'
).
append
(
canvas
)
faceapi
.
drawLandmarks
(
canvas
,
landmarks
,
{
lineWidth
:
2
,
drawLines
:
true
}
)
new
faceapi
.
draw
.
DrawFaceLandmarks
(
landmarks
).
draw
(
canvas
)
}
}
async
function
runLandmarkDetection
(
useBatchInput
)
{
async
function
runLandmarkDetection
(
useBatchInput
)
{
...
...
examples/examples-browser/views/batchFaceRecognition.html
View file @
9a54b06c
...
@@ -68,13 +68,10 @@
...
@@ -68,13 +68,10 @@
$
(
'#faceContainer'
).
append
(
canvas
)
$
(
'#faceContainer'
).
append
(
canvas
)
const
x
=
20
,
y
=
canvas
.
height
-
20
const
x
=
20
,
y
=
canvas
.
height
-
20
faceapi
.
drawText
(
const
ctx
=
faceapi
.
getContext2dOrThrow
(
canvas
)
canvas
.
getContext
(
'2d'
),
ctx
.
font
=
'16px Georgia'
x
,
ctx
.
fillStyle
=
'red'
y
,
ctx
.
fillText
(
faceMatcher
.
findBestMatch
(
descriptor
).
toString
(),
x
,
y
)
faceMatcher
.
findBestMatch
(
descriptor
).
toString
(),
Object
.
assign
(
faceapi
.
getDefaultDrawOptions
(),
{
color
:
'red'
,
fontSize
:
16
})
)
}
}
async
function
runComputeFaceDescriptors
(
useBatchInput
)
{
async
function
runComputeFaceDescriptors
(
useBatchInput
)
{
...
...
examples/examples-browser/views/bbtFaceLandmarkDetection.html
View file @
9a54b06c
...
@@ -43,12 +43,12 @@
...
@@ -43,12 +43,12 @@
const
canvas
=
faceapi
.
createCanvasFromMedia
(
currentImg
)
const
canvas
=
faceapi
.
createCanvasFromMedia
(
currentImg
)
$
(
'#faceContainer'
).
empty
()
$
(
'#faceContainer'
).
empty
()
$
(
'#faceContainer'
).
append
(
canvas
)
$
(
'#faceContainer'
).
append
(
canvas
)
faceapi
.
drawLandmarks
(
canvas
,
landmarks
,
{
lineWidth
:
drawLines
?
2
:
4
,
drawLines
}
)
new
faceapi
.
draw
.
DrawFaceLandmarks
(
landmarks
,
{
drawLines
}).
draw
(
canvas
)
}
}
async
function
onSelectionChanged
(
uri
)
{
async
function
onSelectionChanged
(
uri
)
{
currentImg
=
await
faceapi
.
fetchImage
(
uri
)
currentImg
=
await
faceapi
.
fetchImage
(
uri
)
landmarks
=
await
faceapi
.
detectLandmarks
(
currentImg
)
landmarks
=
await
faceapi
.
detect
Face
Landmarks
(
currentImg
)
redraw
()
redraw
()
}
}
...
...
examples/examples-browser/views/bbtFaceRecognition.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<script
src=
"js/bbt.js"
></script>
<script
src=
"js/bbt.js"
></script>
...
@@ -159,17 +158,19 @@
...
@@ -159,17 +158,19 @@
function
drawFaceRecognitionResults
(
results
)
{
function
drawFaceRecognitionResults
(
results
)
{
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
const
inputImgEl
=
$
(
'#inputImg'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
// resize detection and landmarks in case displayed image is smaller than
// resize detection and landmarks in case displayed image is smaller than
// original size
// original size
resizedResults
=
resizeCanvasAndResults
(
$
(
'#inputImg'
).
get
(
0
),
canvas
,
results
)
const
resizedResults
=
faceapi
.
resizeResults
(
results
,
inputImgEl
)
const
boxesWithText
=
resizedResults
.
map
(({
detection
,
descriptor
})
=>
resizedResults
.
forEach
(({
detection
,
descriptor
})
=>
{
new
faceapi
.
BoxWithText
(
const
label
=
faceMatcher
.
findBestMatch
(
descriptor
).
toString
()
detection
.
box
,
const
options
=
{
label
}
faceMatcher
.
findBestMatch
(
descriptor
).
toString
()
const
drawBox
=
new
faceapi
.
draw
.
DrawBox
(
detection
.
box
,
options
)
)
drawBox
.
draw
(
canvas
)
)
})
faceapi
.
drawDetection
(
canvas
,
boxesWithText
)
}
}
async
function
run
()
{
async
function
run
()
{
...
...
examples/examples-browser/views/faceDetection.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
...
@@ -148,7 +147,9 @@
...
@@ -148,7 +147,9 @@
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
)
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
)
drawDetections
(
inputImgEl
,
$
(
'#overlay'
).
get
(
0
),
results
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
faceapi
.
draw
.
drawDetections
(
canvas
,
faceapi
.
resizeResults
(
results
,
inputImgEl
))
}
}
async
function
run
()
{
async
function
run
()
{
...
...
examples/examples-browser/views/faceExpressionRecognition.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
...
@@ -150,7 +149,14 @@
...
@@ -150,7 +149,14 @@
const
options
=
getFaceDetectorOptions
()
const
options
=
getFaceDetectorOptions
()
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
).
withFaceExpressions
()
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
).
withFaceExpressions
()
drawExpressions
(
inputImgEl
,
$
(
'#overlay'
).
get
(
0
),
results
,
thresh
,
true
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
const
resizedResults
=
faceapi
.
resizeResults
(
results
,
inputImgEl
)
const
minConfidence
=
0.05
faceapi
.
draw
.
drawDetections
(
canvas
,
resizedResults
)
faceapi
.
draw
.
drawFaceExpressions
(
canvas
,
resizedResults
,
minConfidence
)
}
}
async
function
run
()
{
async
function
run
()
{
...
...
examples/examples-browser/views/faceExtraction.html
View file @
9a54b06c
...
@@ -150,9 +150,7 @@
...
@@ -150,9 +150,7 @@
function
displayExtractedFaces
(
faceImages
)
{
function
displayExtractedFaces
(
faceImages
)
{
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
const
{
width
,
height
}
=
$
(
'#inputImg'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
$
(
'#inputImg'
).
get
(
0
))
canvas
.
width
=
width
canvas
.
height
=
height
$
(
'#facesContainer'
).
empty
()
$
(
'#facesContainer'
).
empty
()
faceImages
.
forEach
(
canvas
=>
$
(
'#facesContainer'
).
append
(
canvas
))
faceImages
.
forEach
(
canvas
=>
$
(
'#facesContainer'
).
append
(
canvas
))
...
...
examples/examples-browser/views/faceLandmarkDetection.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<script
src=
"js/imageSelectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
...
@@ -162,7 +161,14 @@
...
@@ -162,7 +161,14 @@
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
).
withFaceLandmarks
()
const
results
=
await
faceapi
.
detectAllFaces
(
inputImgEl
,
options
).
withFaceLandmarks
()
drawLandmarks
(
inputImgEl
,
$
(
'#overlay'
).
get
(
0
),
results
,
withBoxes
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
const
resizedResults
=
faceapi
.
resizeResults
(
results
,
inputImgEl
)
if
(
withBoxes
)
{
faceapi
.
draw
.
drawDetections
(
canvas
,
resizedResults
)
}
faceapi
.
draw
.
drawFaceLandmarks
(
canvas
,
resizedResults
)
}
}
async
function
run
()
{
async
function
run
()
{
...
...
examples/examples-browser/views/faceRecognition.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
...
@@ -205,11 +204,11 @@
...
@@ -205,11 +204,11 @@
}
}
async
function
updateReferenceImageResults
()
{
async
function
updateReferenceImageResults
()
{
const
imgEl
=
$
(
'#refImg'
).
get
(
0
)
const
i
nputI
mgEl
=
$
(
'#refImg'
).
get
(
0
)
const
canvas
=
$
(
'#refImgOverlay'
).
get
(
0
)
const
canvas
=
$
(
'#refImgOverlay'
).
get
(
0
)
const
fullFaceDescriptions
=
await
faceapi
const
fullFaceDescriptions
=
await
faceapi
.
detectAllFaces
(
imgEl
,
getFaceDetectorOptions
())
.
detectAllFaces
(
i
nputI
mgEl
,
getFaceDetectorOptions
())
.
withFaceLandmarks
()
.
withFaceLandmarks
()
.
withFaceDescriptors
()
.
withFaceDescriptors
()
...
@@ -221,16 +220,19 @@
...
@@ -221,16 +220,19 @@
// from the detection results for the reference image
// from the detection results for the reference image
faceMatcher
=
new
faceapi
.
FaceMatcher
(
fullFaceDescriptions
)
faceMatcher
=
new
faceapi
.
FaceMatcher
(
fullFaceDescriptions
)
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
// resize detection and landmarks in case displayed image is smaller than
// resize detection and landmarks in case displayed image is smaller than
// original size
// original size
resizedResults
=
resizeCanvasAndResults
(
imgEl
,
canvas
,
fullFaceDescriptions
)
const
resizedResults
=
faceapi
.
resizeResults
(
fullFaceDescriptions
,
inputImgEl
)
// draw boxes with the corresponding label as text
// draw boxes with the corresponding label as text
const
labels
=
faceMatcher
.
labeledDescriptors
const
labels
=
faceMatcher
.
labeledDescriptors
.
map
(
ld
=>
ld
.
label
)
.
map
(
ld
=>
ld
.
label
)
const
boxesWithText
=
resizedResults
resizedResults
.
forEach
(({
detection
,
descriptor
})
=>
{
.
map
(
res
=>
res
.
detection
.
box
)
const
label
=
faceMatcher
.
findBestMatch
(
descriptor
).
toString
()
.
map
((
box
,
i
)
=>
new
faceapi
.
BoxWithText
(
box
,
labels
[
i
]))
const
options
=
{
label
}
faceapi
.
drawDetection
(
canvas
,
boxesWithText
)
const
drawBox
=
new
faceapi
.
draw
.
DrawBox
(
detection
.
box
,
options
)
drawBox
.
draw
(
canvas
)
})
}
}
async
function
updateQueryImageResults
()
{
async
function
updateQueryImageResults
()
{
...
@@ -238,27 +240,25 @@
...
@@ -238,27 +240,25 @@
return
return
}
}
const
imgEl
=
$
(
'#queryImg'
).
get
(
0
)
const
i
nputI
mgEl
=
$
(
'#queryImg'
).
get
(
0
)
const
canvas
=
$
(
'#queryImgOverlay'
).
get
(
0
)
const
canvas
=
$
(
'#queryImgOverlay'
).
get
(
0
)
const
results
=
await
faceapi
const
results
=
await
faceapi
.
detectAllFaces
(
imgEl
,
getFaceDetectorOptions
())
.
detectAllFaces
(
i
nputI
mgEl
,
getFaceDetectorOptions
())
.
withFaceLandmarks
()
.
withFaceLandmarks
()
.
withFaceDescriptors
()
.
withFaceDescriptors
()
faceapi
.
matchDimensions
(
canvas
,
inputImgEl
)
// resize detection and landmarks in case displayed image is smaller than
// resize detection and landmarks in case displayed image is smaller than
// original size
// original size
resizedResults
=
resizeCanvasAndResults
(
imgEl
,
canvas
,
results
)
const
resizedResults
=
faceapi
.
resizeResults
(
results
,
inputImgEl
)
// draw boxes with the corresponding label as text
const
boxesWithText
=
resizedResults
.
map
(({
detection
,
descriptor
})
=>
resizedResults
.
forEach
(({
detection
,
descriptor
})
=>
{
new
faceapi
.
BoxWithText
(
const
label
=
faceMatcher
.
findBestMatch
(
descriptor
).
toString
()
detection
.
box
,
const
options
=
{
label
}
// match each face descriptor to the reference descriptor
const
drawBox
=
new
faceapi
.
draw
.
DrawBox
(
detection
.
box
,
options
)
// with lowest euclidean distance and display the result as text
drawBox
.
draw
(
canvas
)
faceMatcher
.
findBestMatch
(
descriptor
).
toString
()
})
)
)
faceapi
.
drawDetection
(
canvas
,
boxesWithText
)
}
}
async
function
updateResults
()
{
async
function
updateResults
()
{
...
...
examples/examples-browser/views/videoFaceTracking.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
...
@@ -169,18 +168,25 @@
...
@@ -169,18 +168,25 @@
const
ts
=
Date
.
now
()
const
ts
=
Date
.
now
()
const
faceDetectionTask
=
faceapi
.
detectAllFaces
(
videoEl
,
options
)
const
drawBoxes
=
withBoxes
const
results
=
withFaceLandmarks
const
drawLandmarks
=
withFaceLandmarks
?
await
faceDetectionTask
.
withFaceLandmarks
()
:
await
faceDetectionTask
let
task
=
faceapi
.
detectAllFaces
(
videoEl
,
options
)
task
=
withFaceLandmarks
?
task
.
withFaceLandmarks
()
:
task
const
results
=
await
task
updateTimeStats
(
Date
.
now
()
-
ts
)
updateTimeStats
(
Date
.
now
()
-
ts
)
const
drawFunction
=
withFaceLandmarks
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
?
drawLandmarks
const
dims
=
faceapi
.
matchDimensions
(
canvas
,
videoEl
,
true
)
:
drawDetections
drawFunction
(
videoEl
,
$
(
'#overlay'
).
get
(
0
),
results
,
withBoxes
)
const
resizedResults
=
faceapi
.
resizeResults
(
results
,
dims
)
if
(
drawBoxes
)
{
faceapi
.
draw
.
drawDetections
(
canvas
,
resizedResults
)
}
if
(
drawLandmarks
)
{
faceapi
.
draw
.
drawFaceLandmarks
(
canvas
,
resizedResults
)
}
setTimeout
(()
=>
onPlay
(
videoEl
))
setTimeout
(()
=>
onPlay
(
videoEl
))
}
}
...
...
examples/examples-browser/views/webcamFaceDetection.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
...
@@ -159,7 +158,9 @@
...
@@ -159,7 +158,9 @@
updateTimeStats
(
Date
.
now
()
-
ts
)
updateTimeStats
(
Date
.
now
()
-
ts
)
if
(
result
)
{
if
(
result
)
{
drawDetections
(
videoEl
,
$
(
'#overlay'
).
get
(
0
),
[
result
])
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
videoEl
)
faceapi
.
draw
.
drawDetections
(
canvas
,
faceapi
.
resizeResults
(
results
,
videoEl
))
}
}
setTimeout
(()
=>
onPlay
())
setTimeout
(()
=>
onPlay
())
...
...
examples/examples-browser/views/webcamFaceExpressionRecognition.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
...
@@ -171,7 +170,15 @@
...
@@ -171,7 +170,15 @@
updateTimeStats
(
Date
.
now
()
-
ts
)
updateTimeStats
(
Date
.
now
()
-
ts
)
if
(
result
)
{
if
(
result
)
{
drawExpressions
(
videoEl
,
$
(
'#overlay'
).
get
(
0
),
[
result
],
withBoxes
)
const
canvas
=
$
(
'#overlay'
).
get
(
0
)
faceapi
.
matchDimensions
(
canvas
,
videoEl
,
true
)
const
resizedResult
=
faceapi
.
resizeResults
(
result
,
videoEl
)
const
minConfidence
=
0.05
if
(
withBoxes
)
{
faceapi
.
draw
.
drawDetections
(
canvas
,
resizedResults
)
}
faceapi
.
draw
.
drawFaceExpressions
(
canvas
,
resizedResults
,
minConfidence
)
}
}
setTimeout
(()
=>
onPlay
())
setTimeout
(()
=>
onPlay
())
...
...
examples/examples-browser/views/webcamFaceLandmarkDetection.html
View file @
9a54b06c
...
@@ -3,7 +3,6 @@
...
@@ -3,7 +3,6 @@
<head>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"face-api.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/commons.js"
></script>
<script
src=
"js/drawing.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<script
src=
"js/faceDetectionControls.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
...
...
examples/examples-nodejs/faceDetection.ts
View file @
9a54b06c
...
@@ -10,7 +10,7 @@ async function run() {
...
@@ -10,7 +10,7 @@ async function run() {
const
detections
=
await
faceapi
.
detectAllFaces
(
img
,
faceDetectionOptions
)
const
detections
=
await
faceapi
.
detectAllFaces
(
img
,
faceDetectionOptions
)
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
faceapi
.
draw
Detection
(
out
,
detections
)
faceapi
.
draw
.
drawDetections
(
out
,
detections
)
saveFile
(
'faceDetection.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
saveFile
(
'faceDetection.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
console
.
log
(
'done, saved results to out/faceDetection.jpg'
)
console
.
log
(
'done, saved results to out/faceDetection.jpg'
)
...
...
examples/examples-nodejs/faceExpressionRecognition.ts
View file @
9a54b06c
...
@@ -12,8 +12,8 @@ async function run() {
...
@@ -12,8 +12,8 @@ async function run() {
.
withFaceExpressions
()
.
withFaceExpressions
()
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
faceapi
.
draw
Detection
(
out
,
results
.
map
(
res
=>
res
.
detection
),
{
withScore
:
false
}
)
faceapi
.
draw
.
drawDetections
(
out
,
results
.
map
(
res
=>
res
.
detection
)
)
faceapi
.
draw
FaceExpressions
(
out
,
results
.
map
(({
detection
,
expressions
})
=>
({
position
:
detection
.
box
,
expressions
}))
)
faceapi
.
draw
.
drawFaceExpressions
(
out
,
results
)
saveFile
(
'faceExpressionRecognition.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
saveFile
(
'faceExpressionRecognition.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
console
.
log
(
'done, saved results to out/faceExpressionRecognition.jpg'
)
console
.
log
(
'done, saved results to out/faceExpressionRecognition.jpg'
)
...
...
examples/examples-nodejs/faceLandmarkDetection.ts
View file @
9a54b06c
...
@@ -12,8 +12,8 @@ async function run() {
...
@@ -12,8 +12,8 @@ async function run() {
.
withFaceLandmarks
()
.
withFaceLandmarks
()
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
const
out
=
faceapi
.
createCanvasFromMedia
(
img
)
as
any
faceapi
.
draw
Detection
(
out
,
results
.
map
(
res
=>
res
.
detection
))
faceapi
.
draw
.
drawDetections
(
out
,
results
.
map
(
res
=>
res
.
detection
))
faceapi
.
draw
Landmarks
(
out
,
results
.
map
(
res
=>
res
.
landmarks
),
{
drawLines
:
true
,
color
:
'red'
}
)
faceapi
.
draw
.
drawFaceLandmarks
(
out
,
results
.
map
(
res
=>
res
.
landmarks
)
)
saveFile
(
'faceLandmarkDetection.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
saveFile
(
'faceLandmarkDetection.jpg'
,
out
.
toBuffer
(
'image/jpeg'
))
console
.
log
(
'done, saved results to out/faceLandmarkDetection.jpg'
)
console
.
log
(
'done, saved results to out/faceLandmarkDetection.jpg'
)
...
...
examples/examples-nodejs/faceRecognition.ts
View file @
9a54b06c
...
@@ -26,20 +26,21 @@ async function run() {
...
@@ -26,20 +26,21 @@ async function run() {
const
labels
=
faceMatcher
.
labeledDescriptors
const
labels
=
faceMatcher
.
labeledDescriptors
.
map
(
ld
=>
ld
.
label
)
.
map
(
ld
=>
ld
.
label
)
const
ref
BoxesWithText
=
resultsRef
const
ref
DrawBoxes
=
resultsRef
.
map
(
res
=>
res
.
detection
.
box
)
.
map
(
res
=>
res
.
detection
.
box
)
.
map
((
box
,
i
)
=>
new
faceapi
.
BoxWithText
(
box
,
labels
[
i
]))
.
map
((
box
,
i
)
=>
new
faceapi
.
draw
.
DrawBox
(
box
,
{
label
:
labels
[
i
]
}))
const
outRef
=
faceapi
.
createCanvasFromMedia
(
referenceImage
)
as
any
const
outRef
=
faceapi
.
createCanvasFromMedia
(
referenceImage
)
faceapi
.
drawDetection
(
outRef
,
refBoxesWithText
)
refDrawBoxes
.
forEach
(
drawBox
=>
drawBox
.
draw
(
outRef
))
saveFile
(
'referenceImage.jpg'
,
outRef
.
toBuffer
(
'image/jpeg'
))
const
queryBoxesWithText
=
resultsQuery
.
map
(
res
=>
{
saveFile
(
'referenceImage.jpg'
,
(
outRef
as
any
).
toBuffer
(
'image/jpeg'
))
const
queryDrawBoxes
=
resultsQuery
.
map
(
res
=>
{
const
bestMatch
=
faceMatcher
.
findBestMatch
(
res
.
descriptor
)
const
bestMatch
=
faceMatcher
.
findBestMatch
(
res
.
descriptor
)
return
new
faceapi
.
BoxWithText
(
res
.
detection
.
box
,
bestMatch
.
toString
()
)
return
new
faceapi
.
draw
.
DrawBox
(
res
.
detection
.
box
,
{
label
:
bestMatch
.
toString
()
}
)
})
})
const
outQuery
=
faceapi
.
createCanvasFromMedia
(
queryImage
)
as
any
const
outQuery
=
faceapi
.
createCanvasFromMedia
(
queryImage
)
faceapi
.
drawDetection
(
outQuery
,
queryBoxesWithText
)
queryDrawBoxes
.
forEach
(
drawBox
=>
drawBox
.
draw
(
outQuery
)
)
saveFile
(
'queryImage.jpg'
,
outQuery
.
toBuffer
(
'image/jpeg'
))
saveFile
(
'queryImage.jpg'
,
(
outQuery
as
any
)
.
toBuffer
(
'image/jpeg'
))
console
.
log
(
'done, saved results to out/queryImage.jpg'
)
console
.
log
(
'done, saved results to out/queryImage.jpg'
)
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment