Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
F
face
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Иван Кубота
face
Commits
89e9691e
Commit
89e9691e
authored
Jun 07, 2018
by
vincent
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
move utility to package
parent
4fee52e6
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
233 additions
and
134 deletions
+233
-134
commons.js
examples/public/commons.js
+4
-114
styles.css
examples/public/styles.css
+3
-0
server.js
examples/server.js
+1
-0
faceDetection.html
examples/views/faceDetection.html
+9
-6
faceRecognition.html
examples/views/faceRecognition.html
+7
-5
faceSimilarity.html
examples/views/faceSimilarity.html
+7
-3
index.ts
src/faceDetectionNet/index.ts
+2
-1
types.ts
src/faceDetectionNet/types.ts
+10
-0
index.ts
src/index.ts
+5
-5
utils.ts
src/utils.ts
+185
-0
No files found.
examples/public/commons.js
View file @
89e9691e
...
@@ -12,17 +12,6 @@ async function fetchImage(uri) {
...
@@ -12,17 +12,6 @@ async function fetchImage(uri) {
return
(
await
axios
.
get
(
uri
,
{
responseType
:
'blob'
})).
data
return
(
await
axios
.
get
(
uri
,
{
responseType
:
'blob'
})).
data
}
}
function
round
(
num
)
{
return
Math
.
floor
(
num
*
100
)
/
100
}
function
getElement
(
arg
)
{
if
(
typeof
arg
===
'string'
)
{
return
document
.
getElementById
(
arg
)
}
return
arg
}
async
function
initFaceDetectionNet
()
{
async
function
initFaceDetectionNet
()
{
const
res
=
await
axios
.
get
(
'face_detection_model.weights'
,
{
responseType
:
'arraybuffer'
})
const
res
=
await
axios
.
get
(
'face_detection_model.weights'
,
{
responseType
:
'arraybuffer'
})
const
weights
=
new
Float32Array
(
res
.
data
)
const
weights
=
new
Float32Array
(
res
.
data
)
...
@@ -35,109 +24,6 @@ async function initFaceRecognitionNet() {
...
@@ -35,109 +24,6 @@ async function initFaceRecognitionNet() {
return
facerecognition
.
faceRecognitionNet
(
weights
)
return
facerecognition
.
faceRecognitionNet
(
weights
)
}
}
function
drawImgToCanvas
(
canvasArg
,
imgArg
)
{
const
canvas
=
getElement
(
canvasArg
)
const
img
=
getElement
(
imgArg
)
canvas
.
width
=
img
.
width
canvas
.
height
=
img
.
height
const
ctx
=
canvas
.
getContext
(
'2d'
)
ctx
.
drawImage
(
img
,
0
,
0
,
img
.
width
,
img
.
height
)
return
ctx
}
function
imgSrcToImageData
(
src
)
{
return
new
Promise
((
resolve
,
reject
)
=>
{
const
img
=
new
Image
()
img
.
onload
=
function
()
{
const
ctx
=
drawImgToCanvas
(
document
.
createElement
(
'canvas'
),
img
)
resolve
(
ctx
.
getImageData
(
0
,
0
,
img
.
width
,
img
.
height
))
}
img
.
onerror
=
reject
img
.
src
=
src
})
}
function
bufferToImgSrc
(
buf
)
{
return
new
Promise
((
resolve
,
reject
)
=>
{
const
reader
=
new
window
.
FileReader
()
reader
.
onload
=
()
=>
resolve
(
reader
.
result
)
reader
.
onerror
=
reject
reader
.
readAsDataURL
(
buf
)
})
}
async
function
bufferToImageData
(
buf
)
{
return
imgSrcToImageData
(
await
bufferToImgSrc
(
buf
))
}
function
drawBox
(
canvasArg
,
x
,
y
,
w
,
h
,
lineWidth
=
2
,
color
=
'blue'
)
{
const
canvas
=
getElement
(
canvasArg
)
const
ctx
=
canvas
.
getContext
(
'2d'
)
ctx
.
strokeStyle
=
color
ctx
.
lineWidth
=
lineWidth
ctx
.
strokeRect
(
x
,
y
,
w
,
h
)
}
function
drawText
(
canvasArg
,
x
,
y
,
text
,
fontSize
=
20
,
fontStyle
=
'Georgia'
,
color
=
'blue'
)
{
const
canvas
=
getElement
(
canvasArg
)
const
ctx
=
canvas
.
getContext
(
'2d'
)
ctx
.
fillStyle
=
color
ctx
.
font
=
fontSize
+
'px '
+
fontStyle
ctx
.
fillText
(
text
,
x
,
y
)
}
function
drawDetection
(
canvasArg
,
detection
,
options
=
{})
{
const
canvas
=
getElement
(
canvasArg
)
const
detectionArray
=
Array
.
isArray
(
detection
)
?
detection
:
[
detection
]
detectionArray
.
forEach
((
det
)
=>
{
const
{
score
,
box
}
=
det
const
{
left
,
right
,
top
,
bottom
}
=
box
const
{
color
,
lineWidth
=
2
,
fontSize
=
20
,
fontStyle
,
withScore
=
true
}
=
options
const
padText
=
2
+
lineWidth
drawBox
(
canvas
,
left
,
top
,
right
-
left
,
bottom
-
top
,
lineWidth
,
color
)
if
(
withScore
)
{
drawText
(
canvas
,
left
+
padText
,
top
+
(
fontSize
*
0.6
)
+
padText
,
round
(
score
),
fontSize
,
fontStyle
,
color
)
}
})
}
function
renderNavBar
(
navbarId
,
exampleUri
)
{
function
renderNavBar
(
navbarId
,
exampleUri
)
{
const
examples
=
[
const
examples
=
[
{
{
...
@@ -145,6 +31,10 @@ function renderNavBar(navbarId, exampleUri) {
...
@@ -145,6 +31,10 @@ function renderNavBar(navbarId, exampleUri) {
name
:
'Face Detection'
name
:
'Face Detection'
},
},
{
{
uri
:
'face_detection_video'
,
name
:
'Face Detection Video'
},
{
uri
:
'face_recognition'
,
uri
:
'face_recognition'
,
name
:
'Face Recognition'
name
:
'Face Recognition'
},
},
...
...
examples/public/styles.css
View file @
89e9691e
...
@@ -17,6 +17,9 @@
...
@@ -17,6 +17,9 @@
justify-content
:
center
;
justify-content
:
center
;
align-items
:
center
;
align-items
:
center
;
}
}
.side-by-side
>*
{
margin
:
0
5px
;
}
.bold
{
.bold
{
font-weight
:
bold
;
font-weight
:
bold
;
...
...
examples/server.js
View file @
89e9691e
...
@@ -12,6 +12,7 @@ app.use(express.static(path.join(__dirname, './node_modules/axios/dist')))
...
@@ -12,6 +12,7 @@ app.use(express.static(path.join(__dirname, './node_modules/axios/dist')))
app
.
get
(
'/'
,
(
req
,
res
)
=>
res
.
redirect
(
'/face_detection'
))
app
.
get
(
'/'
,
(
req
,
res
)
=>
res
.
redirect
(
'/face_detection'
))
app
.
get
(
'/face_detection'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceDetection.html'
)))
app
.
get
(
'/face_detection'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceDetection.html'
)))
app
.
get
(
'/face_detection_video'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceDetectionVideo.html'
)))
app
.
get
(
'/face_recognition'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceRecognition.html'
)))
app
.
get
(
'/face_recognition'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceRecognition.html'
)))
app
.
get
(
'/face_similarity'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceSimilarity.html'
)))
app
.
get
(
'/face_similarity'
,
(
req
,
res
)
=>
res
.
sendFile
(
path
.
join
(
viewsDir
,
'faceSimilarity.html'
)))
...
...
examples/views/faceDetection.html
View file @
89e9691e
...
@@ -45,26 +45,29 @@
...
@@ -45,26 +45,29 @@
let
net
,
result
let
net
,
result
function
onIncreaseThreshold
()
{
function
onIncreaseThreshold
()
{
minConfidence
=
Math
.
min
(
round
(
minConfidence
+
0.1
),
1.0
)
minConfidence
=
Math
.
min
(
facerecognition
.
round
(
minConfidence
+
0.1
),
1.0
)
$
(
'#minConfidence'
).
val
(
minConfidence
)
$
(
'#minConfidence'
).
val
(
minConfidence
)
updateResults
()
updateResults
()
}
}
function
onDecreaseThreshold
()
{
function
onDecreaseThreshold
()
{
minConfidence
=
Math
.
max
(
round
(
minConfidence
-
0.1
),
0.1
)
minConfidence
=
Math
.
max
(
facerecognition
.
round
(
minConfidence
-
0.1
),
0.1
)
$
(
'#minConfidence'
).
val
(
minConfidence
)
$
(
'#minConfidence'
).
val
(
minConfidence
)
updateResults
()
updateResults
()
}
}
async
function
updateResults
()
{
async
function
updateResults
()
{
result
=
await
net
.
locateFaces
(
await
imgSrcToImageData
(
$
(
`#img`
).
get
(
0
).
src
),
minConfidence
)
result
=
await
net
.
locateFaces
(
drawImgToCanvas
(
'overlay'
,
'img'
)
await
facerecognition
.
mediaSrcToImageData
(
$
(
`#img`
).
get
(
0
).
src
),
drawDetection
(
'overlay'
,
result
)
minConfidence
)
facerecognition
.
drawMediaToCanvas
(
'overlay'
,
'img'
)
facerecognition
.
drawDetection
(
'overlay'
,
result
)
}
}
async
function
onSelectionChanged
(
uri
)
{
async
function
onSelectionChanged
(
uri
)
{
const
imgBuf
=
await
fetchImage
(
uri
)
const
imgBuf
=
await
fetchImage
(
uri
)
$
(
`#img`
).
get
(
0
).
src
=
await
bufferToImgSrc
(
imgBuf
)
$
(
`#img`
).
get
(
0
).
src
=
await
facerecognition
.
bufferToImgSrc
(
imgBuf
)
updateResults
()
updateResults
()
}
}
...
...
examples/views/faceRecognition.html
View file @
89e9691e
...
@@ -101,7 +101,7 @@
...
@@ -101,7 +101,7 @@
function
displayTimeStats
(
timeInMs
)
{
function
displayTimeStats
(
timeInMs
)
{
$
(
'#time'
).
val
(
`
${
timeInMs
}
ms`
)
$
(
'#time'
).
val
(
`
${
timeInMs
}
ms`
)
$
(
'#fps'
).
val
(
`
${
round
(
1000
/
timeInMs
)}
`
)
$
(
'#fps'
).
val
(
`
${
facerecognition
.
round
(
1000
/
timeInMs
)}
`
)
}
}
function
displayImage
(
src
)
{
function
displayImage
(
src
)
{
...
@@ -111,7 +111,7 @@
...
@@ -111,7 +111,7 @@
async
function
loadTrainingData
(
cb
)
{
async
function
loadTrainingData
(
cb
)
{
return
await
Promise
.
all
(
classes
.
map
(
return
await
Promise
.
all
(
classes
.
map
(
async
className
=>
({
async
className
=>
({
imgData
:
await
bufferToImageData
(
imgData
:
await
facerecognition
.
bufferToImageData
(
await
fetchImage
(
getFaceImageUri
(
className
,
1
))
await
fetchImage
(
getFaceImageUri
(
className
,
1
))
),
),
className
className
...
@@ -123,7 +123,9 @@
...
@@ -123,7 +123,9 @@
return
trainDescriptorsByClass
return
trainDescriptorsByClass
.
map
(
.
map
(
({
descriptor
,
className
})
=>
({
({
descriptor
,
className
})
=>
({
distance
:
round
(
facerecognition
.
euclideanDistance
(
descriptor
,
queryDescriptor
)),
distance
:
facerecognition
.
round
(
facerecognition
.
euclideanDistance
(
descriptor
,
queryDescriptor
)
),
className
className
})
})
)
)
...
@@ -134,9 +136,9 @@
...
@@ -134,9 +136,9 @@
async
function
next
()
{
async
function
next
()
{
const
imgBuf
=
await
fetchImage
(
getFaceImageUri
(
classes
[
currClassIdx
],
currImageIdx
))
const
imgBuf
=
await
fetchImage
(
getFaceImageUri
(
classes
[
currClassIdx
],
currImageIdx
))
const
imgEl
=
$
(
'#face'
).
get
(
0
)
const
imgEl
=
$
(
'#face'
).
get
(
0
)
imgEl
.
src
=
await
bufferToImgSrc
(
imgBuf
)
imgEl
.
src
=
await
facerecognition
.
bufferToImgSrc
(
imgBuf
)
const
imageData
=
await
img
SrcToImageData
(
imgEl
.
src
)
const
imageData
=
await
facerecognition
.
media
SrcToImageData
(
imgEl
.
src
)
const
ts
=
Date
.
now
()
const
ts
=
Date
.
now
()
const
result
=
await
net
.
forward
(
imageData
)
const
result
=
await
net
.
forward
(
imageData
)
...
...
examples/views/faceSimilarity.html
View file @
89e9691e
...
@@ -40,7 +40,9 @@
...
@@ -40,7 +40,9 @@
let
net
,
descriptors
=
{
desc1
:
null
,
desc2
:
null
}
let
net
,
descriptors
=
{
desc1
:
null
,
desc2
:
null
}
function
updateResult
()
{
function
updateResult
()
{
const
distance
=
round
(
facerecognition
.
euclideanDistance
(
descriptors
.
desc1
,
descriptors
.
desc2
))
const
distance
=
facerecognition
.
round
(
facerecognition
.
euclideanDistance
(
descriptors
.
desc1
,
descriptors
.
desc2
)
)
let
text
=
distance
let
text
=
distance
let
bgColor
=
'#ffffff'
let
bgColor
=
'#ffffff'
if
(
distance
>
threshold
)
{
if
(
distance
>
threshold
)
{
...
@@ -52,13 +54,15 @@
...
@@ -52,13 +54,15 @@
}
}
async
function
computeDescriptorFromSrc
(
imgEl
)
{
async
function
computeDescriptorFromSrc
(
imgEl
)
{
return
net
.
computeFaceDescriptor
(
await
imgSrcToImageData
(
imgEl
.
src
))
return
net
.
computeFaceDescriptor
(
await
facerecognition
.
mediaSrcToImageData
(
imgEl
.
src
)
)
}
}
async
function
onSelectionChanged
(
which
,
uri
)
{
async
function
onSelectionChanged
(
which
,
uri
)
{
const
imgBuf
=
await
fetchImage
(
uri
)
const
imgBuf
=
await
fetchImage
(
uri
)
const
imgEl
=
$
(
`#face
${
which
}
`
).
get
(
0
)
const
imgEl
=
$
(
`#face
${
which
}
`
).
get
(
0
)
imgEl
.
src
=
await
bufferToImgSrc
(
imgBuf
)
imgEl
.
src
=
await
facerecognition
.
bufferToImgSrc
(
imgBuf
)
descriptors
[
`desc
${
which
}
`
]
=
await
computeDescriptorFromSrc
(
imgEl
)
descriptors
[
`desc
${
which
}
`
]
=
await
computeDescriptorFromSrc
(
imgEl
)
}
}
...
...
src/faceDetectionNet/index.ts
View file @
89e9691e
...
@@ -7,6 +7,7 @@ import { resizeLayer } from './resizeLayer';
...
@@ -7,6 +7,7 @@ import { resizeLayer } from './resizeLayer';
import
{
predictionLayer
}
from
'./predictionLayer'
;
import
{
predictionLayer
}
from
'./predictionLayer'
;
import
{
outputLayer
}
from
'./outputLayer'
;
import
{
outputLayer
}
from
'./outputLayer'
;
import
{
nonMaxSuppression
}
from
'./nonMaxSuppression'
;
import
{
nonMaxSuppression
}
from
'./nonMaxSuppression'
;
import
{
FaceDetectionNet
}
from
'./types'
;
function
fromData
(
input
:
number
[]):
tf
.
Tensor4D
{
function
fromData
(
input
:
number
[]):
tf
.
Tensor4D
{
const
pxPerChannel
=
input
.
length
/
3
const
pxPerChannel
=
input
.
length
/
3
...
@@ -78,7 +79,7 @@ export function faceDetectionNet(weights: Float32Array) {
...
@@ -78,7 +79,7 @@ export function faceDetectionNet(weights: Float32Array) {
input
:
ImageData
|
ImageData
[]
|
number
[],
input
:
ImageData
|
ImageData
[]
|
number
[],
minConfidence
:
number
=
0.8
,
minConfidence
:
number
=
0.8
,
maxResults
:
number
=
100
,
maxResults
:
number
=
100
,
)
{
)
:
Promise
<
FaceDetectionNet
.
Detection
[]
>
{
const
imgTensor
=
getImgTensor
(
input
)
const
imgTensor
=
getImgTensor
(
input
)
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
...
...
src/faceDetectionNet/types.ts
View file @
89e9691e
...
@@ -66,4 +66,14 @@ export namespace FaceDetectionNet {
...
@@ -66,4 +66,14 @@ export namespace FaceDetectionNet {
output_layer_params
:
OutputLayerParams
output_layer_params
:
OutputLayerParams
}
}
export
type
Detection
=
{
score
:
number
box
:
{
top
:
number
,
left
:
number
,
right
:
number
,
bottom
:
number
}
}
}
}
src/index.ts
View file @
89e9691e
...
@@ -2,12 +2,12 @@ import { euclideanDistance } from './euclideanDistance';
...
@@ -2,12 +2,12 @@ import { euclideanDistance } from './euclideanDistance';
import
{
faceDetectionNet
}
from
'./faceDetectionNet'
;
import
{
faceDetectionNet
}
from
'./faceDetectionNet'
;
import
{
faceRecognitionNet
}
from
'./faceRecognitionNet'
;
import
{
faceRecognitionNet
}
from
'./faceRecognitionNet'
;
import
{
normalize
}
from
'./normalize'
;
import
{
normalize
}
from
'./normalize'
;
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
export
{
export
{
euclideanDistance
,
euclideanDistance
,
faceDetectionNet
,
faceDetectionNet
,
faceRecognitionNet
,
faceRecognitionNet
,
normalize
,
normalize
tf
}
}
\ No newline at end of file
export
*
from
'./utils'
\ No newline at end of file
src/utils.ts
View file @
89e9691e
import
{
FaceDetectionNet
}
from
'./faceDetectionNet/types'
;
function
getElement
(
arg
:
string
|
any
)
{
if
(
typeof
arg
===
'string'
)
{
return
document
.
getElementById
(
arg
)
}
return
arg
}
function
getContext2dOrThrow
(
canvas
:
HTMLCanvasElement
):
CanvasRenderingContext2D
{
const
ctx
=
canvas
.
getContext
(
'2d'
)
if
(
!
ctx
)
{
throw
new
Error
(
'canvas 2d context is null'
)
}
return
ctx
}
export
function
isFloat
(
num
:
number
)
{
export
function
isFloat
(
num
:
number
)
{
return
num
%
1
!==
0
return
num
%
1
!==
0
}
}
export
function
round
(
num
:
number
)
{
return
Math
.
floor
(
num
*
100
)
/
100
}
export
function
drawMediaToCanvas
(
canvasArg
:
string
|
HTMLCanvasElement
,
mediaArg
:
string
|
HTMLImageElement
|
HTMLVideoElement
):
CanvasRenderingContext2D
{
const
canvas
=
getElement
(
canvasArg
)
const
media
=
getElement
(
mediaArg
)
if
(
!
(
canvas
instanceof
HTMLCanvasElement
))
{
throw
new
Error
(
'drawMediaToCanvas - expected canvas to be of type: HTMLCanvasElement'
)
}
if
(
!
(
media
instanceof
HTMLImageElement
||
media
instanceof
HTMLVideoElement
))
{
throw
new
Error
(
'drawMediaToCanvas - expected media to be of type: HTMLImageElement | HTMLVideoElement'
)
}
canvas
.
width
=
media
.
width
canvas
.
height
=
media
.
height
const
ctx
=
getContext2dOrThrow
(
canvas
)
ctx
.
drawImage
(
media
,
0
,
0
,
media
.
width
,
media
.
height
)
return
ctx
}
export
function
mediaToImageData
(
media
:
HTMLImageElement
|
HTMLVideoElement
):
ImageData
{
if
(
!
(
media
instanceof
HTMLImageElement
||
media
instanceof
HTMLVideoElement
))
{
throw
new
Error
(
'mediaToImageData - expected media to be of type: HTMLImageElement | HTMLVideoElement'
)
}
const
ctx
=
drawMediaToCanvas
(
document
.
createElement
(
'canvas'
),
media
)
return
ctx
.
getImageData
(
0
,
0
,
media
.
width
,
media
.
height
)
}
export
function
mediaSrcToImageData
(
src
:
string
|
HTMLImageElement
|
HTMLVideoElement
):
Promise
<
ImageData
>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
if
(
typeof
src
!==
'string'
)
{
if
(
!
(
src
instanceof
HTMLImageElement
||
src
instanceof
HTMLVideoElement
))
{
return
reject
(
'mediaSrcToImageData - expected src to be of type: string | HTMLImageElement | HTMLVideoElement'
)
}
return
resolve
(
mediaToImageData
(
src
))
}
const
img
=
new
Image
()
img
.
onload
=
()
=>
resolve
(
mediaToImageData
(
img
))
img
.
onerror
=
reject
img
.
src
=
src
})
}
export
function
bufferToImgSrc
(
buf
:
Blob
):
Promise
<
string
>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
if
(
!
(
buf
instanceof
Blob
))
{
return
reject
(
'bufferToImgSrc - expected buf to be of type: Blob'
)
}
const
reader
=
new
FileReader
()
reader
.
onload
=
()
=>
resolve
(
reader
.
result
)
reader
.
onerror
=
reject
reader
.
readAsDataURL
(
buf
)
})
}
export
async
function
bufferToImageData
(
buf
:
Blob
):
Promise
<
ImageData
>
{
if
(
!
(
buf
instanceof
Blob
))
{
throw
new
Error
(
'bufferToImageData - expected buf to be of type: Blob'
)
}
return
mediaSrcToImageData
(
await
bufferToImgSrc
(
buf
))
}
export
type
DrawBoxOptions
=
{
lineWidth
:
number
color
:
string
}
export
function
drawBox
(
ctx
:
CanvasRenderingContext2D
,
x
:
number
,
y
:
number
,
w
:
number
,
h
:
number
,
options
:
DrawBoxOptions
)
{
ctx
.
strokeStyle
=
options
.
color
ctx
.
lineWidth
=
options
.
lineWidth
ctx
.
strokeRect
(
x
,
y
,
w
,
h
)
}
export
type
DrawTextOptions
=
{
fontSize
:
number
fontStyle
:
string
color
:
string
}
export
function
drawText
(
ctx
:
CanvasRenderingContext2D
,
x
:
number
,
y
:
number
,
text
:
string
,
options
:
DrawTextOptions
)
{
ctx
.
fillStyle
=
options
.
color
ctx
.
font
=
`
${
options
.
fontSize
}
px
${
options
.
fontStyle
}
`
ctx
.
fillText
(
text
,
x
,
y
)
}
export
function
drawDetection
(
canvasArg
:
string
|
HTMLCanvasElement
,
detection
:
FaceDetectionNet
.
Detection
|
FaceDetectionNet
.
Detection
[],
options
?:
DrawBoxOptions
&
DrawTextOptions
&
{
withScore
:
boolean
}
)
{
const
canvas
=
getElement
(
canvasArg
)
if
(
!
(
canvas
instanceof
HTMLCanvasElement
))
{
throw
new
Error
(
'drawBox - expected canvas to be of type: HTMLCanvasElement'
)
}
const
detectionArray
=
Array
.
isArray
(
detection
)
?
detection
:
[
detection
]
detectionArray
.
forEach
((
det
)
=>
{
const
{
score
,
box
}
=
det
const
{
left
,
right
,
top
,
bottom
}
=
box
const
{
color
=
'blue'
,
lineWidth
=
2
,
fontSize
=
20
,
fontStyle
=
'Georgia'
,
withScore
=
true
}
=
(
options
||
{})
const
padText
=
2
+
lineWidth
const
ctx
=
getContext2dOrThrow
(
canvas
)
drawBox
(
ctx
,
left
,
top
,
right
-
left
,
bottom
-
top
,
{
lineWidth
,
color
}
)
if
(
withScore
)
{
drawText
(
ctx
,
left
+
padText
,
top
+
(
fontSize
*
0.6
)
+
padText
,
`
${
round
(
score
)}
`
,
{
fontSize
,
fontStyle
,
color
}
)
}
})
}
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment