Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
F
face
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Иван Кубота
face
Commits
d3ddbb5d
Commit
d3ddbb5d
authored
Jul 03, 2018
by
vincent
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add batch face recognition example + batch processing in allFaces can be enabled via flag
parent
1e2d2616
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
171 additions
and
14 deletions
+171
-14
commons.js
examples/public/commons.js
+4
-0
styles.css
examples/public/styles.css
+4
-0
server.js
examples/server.js
+1
-2
batchFaceRecognition.html
examples/views/batchFaceRecognition.html
+136
-0
detectAndRecognizeFaces.html
examples/views/detectAndRecognizeFaces.html
+14
-5
allFacesFactory.ts
src/allFacesFactory.ts
+10
-6
globalApi.ts
src/globalApi.ts
+2
-1
No files found.
examples/public/commons.js
View file @
d3ddbb5d
...
...
@@ -117,6 +117,10 @@ function renderNavBar(navbarId, exampleUri) {
{
uri
:
'batch_face_landmarks'
,
name
:
'Batch Face Landmarks'
},
{
uri
:
'batch_face_recognition'
,
name
:
'Batch Face Recognition'
}
]
...
...
examples/public/styles.css
View file @
d3ddbb5d
...
...
@@ -49,6 +49,10 @@
margin
:
20px
;
}
.button-sm
{
padding
:
0
10px
!important
;
}
#github-link
{
display
:
flex
!important
;
justify-content
:
center
;
...
...
examples/server.js
View file @
d3ddbb5d
...
...
@@ -25,8 +25,7 @@ app.get('/detect_and_draw_landmarks', (req, res) => res.sendFile(path.join(views
app
.
get
(
'/face_alignment'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceAlignment.html'
)))
app
.
get
(
'/detect_and_recognize_faces'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'detectAndRecognizeFaces.html'
)))
app
.
get
(
'/batch_face_landmarks'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'batchFaceLandmarks.html'
)))
app
.
get
(
'/batch_face_recognition'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'batchFaceRecognition.html'
)))
app
.
post
(
'/fetch_external_image'
,
async
(
req
,
res
)
=>
{
const
{
imageUrl
}
=
req
.
body
...
...
examples/views/batchFaceRecognition.html
0 → 100644
View file @
d3ddbb5d
<!DOCTYPE html>
<html>
<head>
<script
src=
"face-api.js"
></script>
<script
src=
"commons.js"
></script>
<link
rel=
"stylesheet"
href=
"styles.css"
>
<link
rel=
"stylesheet"
href=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/css/materialize.css"
>
<script
type=
"text/javascript"
src=
"https://code.jquery.com/jquery-2.1.1.min.js"
></script>
<script
src=
"https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.2/js/materialize.min.js"
></script>
</head>
<body>
<div
id=
"navbar"
></div>
<div
class=
"center-content page-container"
>
<div>
<div
class=
"progress"
id=
"loader"
>
<div
class=
"indeterminate"
></div>
</div>
<div
class=
"row side-by-side"
>
<div
class=
"row"
>
<label
for=
"timeNoBatch"
>
Time for processing each face seperately:
</label>
<input
disabled
value=
"-"
id=
"timeNoBatch"
type=
"text"
class=
"bold"
/>
</div>
<div
class=
"row"
>
<label
for=
"timeBatch"
>
Time for processing in Batch:
</label>
<input
disabled
value=
"-"
id=
"timeBatch"
type=
"text"
class=
"bold"
/>
</div>
</div>
<div
class=
"row side-by-side"
>
<div>
<label
for=
"numImages"
>
Num Images:
</label>
<input
id=
"numImages"
type=
"text"
class=
"bold"
value=
"32"
/>
</div>
<button
class=
"waves-effect waves-light btn"
onclick=
"measureTimingsAndDisplay()"
>
Ok
</button>
</div>
<div
class=
"row side-by-side"
>
<div
class=
"center-content"
>
<div
id=
"faceContainer"
></div>
</div>
</div>
</div>
</div>
<script>
let
images
=
[]
let
trainDescriptorsByClass
=
[]
let
descriptorsByFace
=
[]
let
numImages
=
32
let
maxDistance
=
0.6
function
onNumImagesChanged
(
e
)
{
const
val
=
parseInt
(
e
.
target
.
value
)
||
32
numImages
=
Math
.
min
(
Math
.
max
(
val
,
0
),
32
)
e
.
target
.
value
=
numImages
}
function
displayTimeStats
(
timeNoBatch
,
timeBatch
)
{
$
(
'#timeNoBatch'
).
val
(
`
${
timeNoBatch
}
ms`
)
$
(
'#timeBatch'
).
val
(
`
${
timeBatch
}
ms`
)
}
function
drawFaceRecognitionCanvas
(
img
,
descriptor
)
{
const
canvas
=
faceapi
.
createCanvasFromMedia
(
img
)
$
(
'#faceContainer'
).
append
(
canvas
)
const
bestMatch
=
getBestMatch
(
trainDescriptorsByClass
,
descriptor
)
const
text
=
`
${
bestMatch
.
distance
<
maxDistance
?
bestMatch
.
className
:
'unkown'
}
(
${
bestMatch
.
distance
}
)`
const
x
=
20
,
y
=
canvas
.
height
-
20
faceapi
.
drawText
(
canvas
.
getContext
(
'2d'
),
x
,
y
,
text
,
Object
.
assign
(
faceapi
.
getDefaultDrawOptions
(),
{
color
:
'red'
,
fontSize
:
16
})
)
}
async
function
runComputeFaceDescriptors
(
useBatchInput
)
{
const
ts
=
Date
.
now
()
descriptorsByFace
=
useBatchInput
?
await
faceapi
.
computeFaceDescriptor
(
images
.
slice
(
0
,
numImages
))
:
await
Promise
.
all
(
images
.
slice
(
0
,
numImages
).
map
(
img
=>
faceapi
.
computeFaceDescriptor
(
img
)))
const
time
=
Date
.
now
()
-
ts
return
time
}
async
function
measureTimings
()
{
const
timeNoBatch
=
await
runComputeFaceDescriptors
(
false
)
const
timeBatch
=
await
runComputeFaceDescriptors
(
true
)
return
{
timeNoBatch
,
timeBatch
}
}
async
function
measureTimingsAndDisplay
()
{
const
{
timeNoBatch
,
timeBatch
}
=
await
measureTimings
()
displayTimeStats
(
timeNoBatch
,
timeBatch
)
$
(
'#faceContainer'
).
empty
()
descriptorsByFace
.
forEach
((
descriptor
,
i
)
=>
drawFaceRecognitionCanvas
(
images
[
i
],
descriptor
))
}
async
function
run
()
{
await
faceapi
.
loadFaceRecognitionModel
(
'/'
)
trainDescriptorsByClass
=
await
initTrainDescriptorsByClass
(
faceapi
.
recognitionNet
,
1
)
$
(
'#loader'
).
hide
()
const
imgUris
=
classes
// skip images with idx 1, as they are used as reference data
.
map
(
clazz
=>
Array
.
from
(
Array
(
4
),
(
_
,
idx
)
=>
getFaceImageUri
(
clazz
,
idx
+
2
)))
.
reduce
((
flat
,
arr
)
=>
flat
.
concat
(
arr
))
images
=
await
Promise
.
all
(
imgUris
.
map
(
async
uri
=>
faceapi
.
bufferToImage
(
await
fetchImage
(
uri
))
))
// warmup
await
measureTimings
()
// run
measureTimingsAndDisplay
()
}
$
(
document
).
ready
(
function
()
{
$
(
'#numImages'
).
on
(
'change'
,
onNumImagesChanged
)
renderNavBar
(
'#navbar'
,
'batch_face_recognition'
)
run
()
})
</script>
</body>
</html>
\ No newline at end of file
examples/views/detectAndRecognizeFaces.html
View file @
d3ddbb5d
...
...
@@ -30,6 +30,10 @@
>
Ok
</button>
<p>
<input
type=
"checkbox"
id=
"useBatchProcessing"
onchange=
"onChangeUseBatchProcessing(event)"
/>
<label
for=
"useBatchProcessing"
>
Use Batch Processing
</label>
</p>
</div>
<div
class=
"row side-by-side"
>
<div
class=
"row"
>
...
...
@@ -37,13 +41,13 @@
<input
disabled
value=
"0.7"
id=
"minConfidence"
type=
"text"
class=
"bold"
>
</div>
<button
class=
"waves-effect waves-light btn"
class=
"waves-effect waves-light btn
button-sm
"
onclick=
"onDecreaseMinConfidence()"
>
<i
class=
"material-icons left"
>
-
</i>
</button>
<button
class=
"waves-effect waves-light btn"
class=
"waves-effect waves-light btn
button-sm
"
onclick=
"onIncreaseMinConfidence()"
>
<i
class=
"material-icons left"
>
+
</i>
...
...
@@ -53,13 +57,13 @@
<input
disabled
value=
"0.6"
id=
"maxDistance"
type=
"text"
class=
"bold"
>
</div>
<button
class=
"waves-effect waves-light btn"
class=
"waves-effect waves-light btn
button-sm
"
onclick=
"onDecreaseMaxDistance()"
>
<i
class=
"material-icons left"
>
-
</i>
</button>
<button
class=
"waves-effect waves-light btn"
class=
"waves-effect waves-light btn
button-sm
"
onclick=
"onIncreaseMaxDistance()"
>
<i
class=
"material-icons left"
>
+
</i>
...
...
@@ -70,9 +74,14 @@
<script>
let
maxDistance
=
0.6
let
minConfidence
=
0.7
let
useBatchProcessing
=
false
let
detectionNet
,
recognitionNet
,
landmarkNet
let
trainDescriptorsByClass
=
[]
function
onChangeUseBatchProcessing
(
e
)
{
useBatchProcessing
=
$
(
e
.
target
).
prop
(
'checked'
)
}
function
onIncreaseMinConfidence
()
{
minConfidence
=
Math
.
min
(
faceapi
.
round
(
minConfidence
+
0.1
),
1.0
)
$
(
'#minConfidence'
).
val
(
minConfidence
)
...
...
@@ -110,7 +119,7 @@
canvas
.
width
=
width
canvas
.
height
=
height
const
fullFaceDescriptions
=
(
await
faceapi
.
allFaces
(
inputImgEl
,
minConfidence
))
const
fullFaceDescriptions
=
(
await
faceapi
.
allFaces
(
inputImgEl
,
minConfidence
,
useBatchProcessing
))
.
map
(
fd
=>
fd
.
forSize
(
width
,
height
))
fullFaceDescriptions
.
forEach
(({
detection
,
descriptor
})
=>
{
...
...
src/allFacesFactory.ts
View file @
d3ddbb5d
...
...
@@ -13,18 +13,19 @@ export function allFacesFactory(
)
{
return
async
function
(
input
:
TNetInput
,
minConfidence
:
number
minConfidence
:
number
,
useBatchProcessing
:
boolean
=
false
):
Promise
<
FullFaceDescription
[]
>
{
const
detections
=
await
detectionNet
.
locateFaces
(
input
,
minConfidence
)
const
faceTensors
=
await
extractFaceTensors
(
input
,
detections
)
/**
const faceLandmarksByFace = await Promise.all(faceTensors.map(
const
faceLandmarksByFace
=
useBatchProcessing
?
await
landmarkNet
.
detectLandmarks
(
faceTensors
)
as
FaceLandmarks
[]
:
await
Promise
.
all
(
faceTensors
.
map
(
faceTensor
=>
landmarkNet
.
detectLandmarks
(
faceTensor
)
))
as
FaceLandmarks
[]
*/
const
faceLandmarksByFace
=
await
landmarkNet
.
detectLandmarks
(
faceTensors
)
as
FaceLandmarks
[]
faceTensors
.
forEach
(
t
=>
t
.
dispose
())
...
...
@@ -33,9 +34,12 @@ export function allFacesFactory(
)
const
alignedFaceTensors
=
await
extractFaceTensors
(
input
,
alignedFaceBoxes
)
const
descriptors
=
await
Promise
.
all
(
alignedFaceTensors
.
map
(
const
descriptors
=
useBatchProcessing
?
await
recognitionNet
.
computeFaceDescriptor
(
alignedFaceTensors
)
as
Float32Array
[]
:
await
Promise
.
all
(
alignedFaceTensors
.
map
(
faceTensor
=>
recognitionNet
.
computeFaceDescriptor
(
faceTensor
)
))
as
Float32Array
[]
alignedFaceTensors
.
forEach
(
t
=>
t
.
dispose
())
return
detections
.
map
((
detection
,
i
)
=>
...
...
src/globalApi.ts
View file @
d3ddbb5d
...
...
@@ -56,7 +56,8 @@ export function computeFaceDescriptor(
export
const
allFaces
:
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
,
minConfidence
:
number
minConfidence
:
number
,
useBatchProcessing
?:
boolean
)
=>
Promise
<
FullFaceDescription
[]
>
=
allFacesFactory
(
detectionNet
,
landmarkNet
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment