Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
F
face
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Иван Кубота
face
Commits
2ae76364
Commit
2ae76364
authored
Jun 09, 2018
by
vincent
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
NetInput to simplify api
parent
b23e6376
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
176 additions
and
116 deletions
+176
-116
NetInput.ts
src/NetInput.ts
+70
-0
boxPredictionLayer.ts
src/faceDetectionNet/boxPredictionLayer.ts
+3
-4
index.ts
src/faceDetectionNet/index.ts
+7
-82
predictionLayer.ts
src/faceDetectionNet/predictionLayer.ts
+11
-7
index.ts
src/faceRecognitionNet/index.ts
+8
-6
normalize.ts
src/faceRecognitionNet/normalize.ts
+3
-5
index.ts
src/index.ts
+3
-5
transformInputs.ts
src/transformInputs.ts
+42
-0
types.ts
src/types.ts
+3
-0
utils.ts
src/utils.ts
+26
-7
No files found.
src/NetInput.ts
0 → 100644
View file @
2ae76364
import
{
TMediaElement
,
TNetInput
}
from
'./types'
;
import
{
Dimensions
,
getContext2dOrThrow
,
getElement
,
getMediaDimensions
}
from
'./utils'
;
export
class
NetInput
{
private
_canvases
:
HTMLCanvasElement
[]
constructor
(
mediaArg
:
TNetInput
,
dims
?:
Dimensions
)
{
const
mediaArgArray
=
Array
.
isArray
(
mediaArg
)
?
mediaArg
:
[
mediaArg
]
if
(
!
mediaArgArray
.
length
)
{
throw
new
Error
(
'NetInput - empty array passed as input'
)
}
const
medias
=
mediaArgArray
.
map
(
getElement
)
medias
.
forEach
((
media
,
i
)
=>
{
if
(
!
(
media
instanceof
HTMLImageElement
||
media
instanceof
HTMLVideoElement
||
media
instanceof
HTMLCanvasElement
))
{
const
idxHint
=
Array
.
isArray
(
mediaArg
)
?
` at input index
${
i
}
:`
:
''
if
(
typeof
mediaArgArray
[
i
]
===
'string'
)
{
throw
new
Error
(
`NetInput -
${
idxHint
}
string passed, but could not resolve HTMLElement for element id`
)
}
throw
new
Error
(
`NetInput -
${
idxHint
}
expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, or to be an element id`
)
}
})
this
.
_canvases
=
[]
medias
.
forEach
(
m
=>
this
.
initCanvas
(
m
,
dims
))
}
private
initCanvas
(
media
:
TMediaElement
,
dims
?:
Dimensions
)
{
if
(
media
instanceof
HTMLCanvasElement
)
{
this
.
_canvases
.
push
(
media
)
return
}
// if input is batch type, make sure every canvas has the same dimensions
const
{
width
,
height
}
=
this
.
dims
||
dims
||
getMediaDimensions
(
media
)
const
canvas
=
document
.
createElement
(
'canvas'
)
canvas
.
width
=
width
canvas
.
height
=
height
getContext2dOrThrow
(
canvas
).
drawImage
(
media
,
0
,
0
,
width
,
height
)
this
.
_canvases
.
push
(
canvas
)
}
public
get
canvases
()
:
HTMLCanvasElement
[]
{
return
this
.
_canvases
}
public
get
width
()
:
number
{
return
(
this
.
_canvases
[
0
]
||
{}).
width
}
public
get
height
()
:
number
{
return
(
this
.
_canvases
[
0
]
||
{}).
height
}
public
get
dims
()
:
Dimensions
|
null
{
const
{
width
,
height
}
=
this
return
(
width
>
0
&&
height
>
0
)
?
{
width
,
height
}
:
null
}
}
\ No newline at end of file
src/faceDetectionNet/boxPredictionLayer.ts
View file @
2ae76364
...
@@ -16,8 +16,7 @@ function convWithBias(
...
@@ -16,8 +16,7 @@ function convWithBias(
export
function
boxPredictionLayer
(
export
function
boxPredictionLayer
(
x
:
tf
.
Tensor4D
,
x
:
tf
.
Tensor4D
,
params
:
FaceDetectionNet
.
BoxPredictionParams
,
params
:
FaceDetectionNet
.
BoxPredictionParams
size
:
number
)
{
)
{
return
tf
.
tidy
(()
=>
{
return
tf
.
tidy
(()
=>
{
...
@@ -25,11 +24,11 @@ export function boxPredictionLayer(
...
@@ -25,11 +24,11 @@ export function boxPredictionLayer(
const
boxPredictionEncoding
=
tf
.
reshape
(
const
boxPredictionEncoding
=
tf
.
reshape
(
convWithBias
(
x
,
params
.
box_encoding_predictor_params
),
convWithBias
(
x
,
params
.
box_encoding_predictor_params
),
[
batchSize
,
size
,
1
,
4
]
[
batchSize
,
-
1
,
1
,
4
]
)
)
const
classPrediction
=
tf
.
reshape
(
const
classPrediction
=
tf
.
reshape
(
convWithBias
(
x
,
params
.
class_predictor_params
),
convWithBias
(
x
,
params
.
class_predictor_params
),
[
batchSize
,
size
,
3
]
[
batchSize
,
-
1
,
3
]
)
)
return
{
return
{
...
...
src/faceDetectionNet/index.ts
View file @
2ae76364
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
{
isFloat
}
from
'../utils'
;
import
{
NetInput
}
from
'../NetInput'
;
import
{
getImageTensor
,
padToSquare
}
from
'../transformInputs'
;
import
{
TNetInput
}
from
'../types'
;
import
{
extractParams
}
from
'./extractParams'
;
import
{
extractParams
}
from
'./extractParams'
;
import
{
FaceDetectionResult
}
from
'./FaceDetectionResult'
;
import
{
FaceDetectionResult
}
from
'./FaceDetectionResult'
;
import
{
mobileNetV1
}
from
'./mobileNetV1'
;
import
{
mobileNetV1
}
from
'./mobileNetV1'
;
...
@@ -9,81 +11,6 @@ import { outputLayer } from './outputLayer';
...
@@ -9,81 +11,6 @@ import { outputLayer } from './outputLayer';
import
{
predictionLayer
}
from
'./predictionLayer'
;
import
{
predictionLayer
}
from
'./predictionLayer'
;
import
{
resizeLayer
}
from
'./resizeLayer'
;
import
{
resizeLayer
}
from
'./resizeLayer'
;
function
fromData
(
input
:
number
[]):
tf
.
Tensor4D
{
const
pxPerChannel
=
input
.
length
/
3
const
dim
=
Math
.
sqrt
(
pxPerChannel
)
if
(
isFloat
(
dim
))
{
throw
new
Error
(
`invalid input size:
${
dim
}
x
${
dim
}
x3 (array length:
${
input
.
length
}
)`
)
}
return
tf
.
tensor4d
(
input
as
number
[],
[
1
,
dim
,
dim
,
3
])
}
function
fromImageData
(
input
:
ImageData
[])
{
return
tf
.
tidy
(()
=>
{
const
idx
=
input
.
findIndex
(
data
=>
!
(
data
instanceof
ImageData
))
if
(
idx
!==
-
1
)
{
throw
new
Error
(
`expected input at index
${
idx
}
to be instanceof ImageData`
)
}
const
imgTensors
=
input
.
map
(
data
=>
tf
.
fromPixels
(
data
))
.
map
(
data
=>
tf
.
expandDims
(
data
,
0
))
as
tf
.
Tensor4D
[]
return
tf
.
cast
(
tf
.
concat
(
imgTensors
,
0
),
'float32'
)
})
}
function
padToSquare
(
imgTensor
:
tf
.
Tensor4D
):
tf
.
Tensor4D
{
return
tf
.
tidy
(()
=>
{
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
if
(
height
===
width
)
{
return
imgTensor
}
if
(
height
>
width
)
{
const
pad
=
tf
.
fill
([
1
,
height
,
height
-
width
,
3
],
0
)
as
tf
.
Tensor4D
return
tf
.
concat
([
imgTensor
,
pad
],
2
)
}
const
pad
=
tf
.
fill
([
1
,
width
-
height
,
width
,
3
],
0
)
as
tf
.
Tensor4D
return
tf
.
concat
([
imgTensor
,
pad
],
1
)
})
}
function
getImgTensor
(
input
:
tf
.
Tensor
|
HTMLCanvasElement
|
ImageData
|
ImageData
[]
|
number
[])
{
return
tf
.
tidy
(()
=>
{
if
(
input
instanceof
HTMLCanvasElement
)
{
return
tf
.
cast
(
tf
.
expandDims
(
tf
.
fromPixels
(
input
),
0
),
'float32'
)
as
tf
.
Tensor4D
}
if
(
input
instanceof
tf
.
Tensor
)
{
const
rank
=
input
.
shape
.
length
if
(
rank
!==
3
&&
rank
!==
4
)
{
throw
new
Error
(
'input tensor must be of rank 3 or 4'
)
}
return
tf
.
cast
(
rank
===
3
?
tf
.
expandDims
(
input
,
0
)
:
input
,
'float32'
)
as
tf
.
Tensor4D
}
const
imgDataArray
=
input
instanceof
ImageData
?
[
input
]
:
(
input
[
0
]
instanceof
ImageData
?
input
as
ImageData
[]
:
null
)
return
imgDataArray
!==
null
?
fromImageData
(
imgDataArray
)
:
fromData
(
input
as
number
[])
})
}
export
function
faceDetectionNet
(
weights
:
Float32Array
)
{
export
function
faceDetectionNet
(
weights
:
Float32Array
)
{
const
params
=
extractParams
(
weights
)
const
params
=
extractParams
(
weights
)
...
@@ -102,14 +29,14 @@ export function faceDetectionNet(weights: Float32Array) {
...
@@ -102,14 +29,14 @@ export function faceDetectionNet(weights: Float32Array) {
})
})
}
}
function
forward
(
input
:
tf
.
Tensor
|
ImageData
|
ImageData
[]
|
number
[]
)
{
function
forward
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
)
{
return
tf
.
tidy
(
return
tf
.
tidy
(
()
=>
forwardTensor
(
padToSquare
(
getIm
g
Tensor
(
input
)))
()
=>
forwardTensor
(
padToSquare
(
getIm
age
Tensor
(
input
)))
)
)
}
}
async
function
locateFaces
(
async
function
locateFaces
(
input
:
tf
.
Tensor
|
HTMLCanvasElement
|
ImageData
|
ImageData
[]
|
number
[]
,
input
:
tf
.
Tensor
|
NetInput
,
minConfidence
:
number
=
0.8
,
minConfidence
:
number
=
0.8
,
maxResults
:
number
=
100
,
maxResults
:
number
=
100
,
):
Promise
<
FaceDetectionResult
[]
>
{
):
Promise
<
FaceDetectionResult
[]
>
{
...
@@ -121,7 +48,7 @@ export function faceDetectionNet(weights: Float32Array) {
...
@@ -121,7 +48,7 @@ export function faceDetectionNet(weights: Float32Array) {
scores
:
_scores
scores
:
_scores
}
=
tf
.
tidy
(()
=>
{
}
=
tf
.
tidy
(()
=>
{
let
imgTensor
=
getIm
g
Tensor
(
input
)
let
imgTensor
=
getIm
age
Tensor
(
input
)
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
imgTensor
=
padToSquare
(
imgTensor
)
imgTensor
=
padToSquare
(
imgTensor
)
...
@@ -140,9 +67,7 @@ export function faceDetectionNet(weights: Float32Array) {
...
@@ -140,9 +67,7 @@ export function faceDetectionNet(weights: Float32Array) {
}
}
// TODO find a better way to filter by minConfidence
// TODO find a better way to filter by minConfidence
//const ts = Date.now()
const
scoresData
=
Array
.
from
(
await
scores
.
data
())
const
scoresData
=
Array
.
from
(
await
scores
.
data
())
//console.log('await data:', (Date.now() - ts))
const
iouThreshold
=
0.5
const
iouThreshold
=
0.5
const
indices
=
nonMaxSuppression
(
const
indices
=
nonMaxSuppression
(
...
...
src/faceDetectionNet/predictionLayer.ts
View file @
2ae76364
...
@@ -4,7 +4,11 @@ import { boxPredictionLayer } from './boxPredictionLayer';
...
@@ -4,7 +4,11 @@ import { boxPredictionLayer } from './boxPredictionLayer';
import
{
pointwiseConvLayer
}
from
'./pointwiseConvLayer'
;
import
{
pointwiseConvLayer
}
from
'./pointwiseConvLayer'
;
import
{
FaceDetectionNet
}
from
'./types'
;
import
{
FaceDetectionNet
}
from
'./types'
;
export
function
predictionLayer
(
x
:
tf
.
Tensor4D
,
conv11
:
tf
.
Tensor4D
,
params
:
FaceDetectionNet
.
PredictionLayerParams
)
{
export
function
predictionLayer
(
x
:
tf
.
Tensor4D
,
conv11
:
tf
.
Tensor4D
,
params
:
FaceDetectionNet
.
PredictionLayerParams
)
{
return
tf
.
tidy
(()
=>
{
return
tf
.
tidy
(()
=>
{
const
conv0
=
pointwiseConvLayer
(
x
,
params
.
conv_0_params
,
[
1
,
1
])
const
conv0
=
pointwiseConvLayer
(
x
,
params
.
conv_0_params
,
[
1
,
1
])
...
@@ -16,12 +20,12 @@ export function predictionLayer(x: tf.Tensor4D, conv11: tf.Tensor4D, params: Fac
...
@@ -16,12 +20,12 @@ export function predictionLayer(x: tf.Tensor4D, conv11: tf.Tensor4D, params: Fac
const
conv6
=
pointwiseConvLayer
(
conv5
,
params
.
conv_6_params
,
[
1
,
1
])
const
conv6
=
pointwiseConvLayer
(
conv5
,
params
.
conv_6_params
,
[
1
,
1
])
const
conv7
=
pointwiseConvLayer
(
conv6
,
params
.
conv_7_params
,
[
2
,
2
])
const
conv7
=
pointwiseConvLayer
(
conv6
,
params
.
conv_7_params
,
[
2
,
2
])
const
boxPrediction0
=
boxPredictionLayer
(
conv11
,
params
.
box_predictor_0_params
,
3072
)
const
boxPrediction0
=
boxPredictionLayer
(
conv11
,
params
.
box_predictor_0_params
)
const
boxPrediction1
=
boxPredictionLayer
(
x
,
params
.
box_predictor_1_params
,
1536
)
const
boxPrediction1
=
boxPredictionLayer
(
x
,
params
.
box_predictor_1_params
)
const
boxPrediction2
=
boxPredictionLayer
(
conv1
,
params
.
box_predictor_2_params
,
384
)
const
boxPrediction2
=
boxPredictionLayer
(
conv1
,
params
.
box_predictor_2_params
)
const
boxPrediction3
=
boxPredictionLayer
(
conv3
,
params
.
box_predictor_3_params
,
96
)
const
boxPrediction3
=
boxPredictionLayer
(
conv3
,
params
.
box_predictor_3_params
)
const
boxPrediction4
=
boxPredictionLayer
(
conv5
,
params
.
box_predictor_4_params
,
24
)
const
boxPrediction4
=
boxPredictionLayer
(
conv5
,
params
.
box_predictor_4_params
)
const
boxPrediction5
=
boxPredictionLayer
(
conv7
,
params
.
box_predictor_5_params
,
6
)
const
boxPrediction5
=
boxPredictionLayer
(
conv7
,
params
.
box_predictor_5_params
)
const
boxPredictions
=
tf
.
concat
([
const
boxPredictions
=
tf
.
concat
([
boxPrediction0
.
boxPredictionEncoding
,
boxPrediction0
.
boxPredictionEncoding
,
...
...
src/faceRecognitionNet/index.ts
View file @
2ae76364
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
{
normalize
}
from
'../normalize'
;
import
{
NetInput
}
from
'../NetInput'
;
import
{
getImageTensor
,
padToSquare
}
from
'../transformInputs'
;
import
{
TNetInput
}
from
'../types'
;
import
{
convDown
}
from
'./convLayer'
;
import
{
convDown
}
from
'./convLayer'
;
import
{
extractParams
}
from
'./extractParams'
;
import
{
extractParams
}
from
'./extractParams'
;
import
{
normalize
}
from
'./normalize'
;
import
{
residual
,
residualDown
}
from
'./residualLayer'
;
import
{
residual
,
residualDown
}
from
'./residualLayer'
;
export
function
faceRecognitionNet
(
weights
:
Float32Array
)
{
export
function
faceRecognitionNet
(
weights
:
Float32Array
)
{
const
params
=
extractParams
(
weights
)
const
params
=
extractParams
(
weights
)
function
forward
(
input
:
number
[]
|
ImageData
)
{
function
forward
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
)
{
return
tf
.
tidy
(()
=>
{
return
tf
.
tidy
(()
=>
{
const
x
=
normalize
(
input
)
const
x
=
normalize
(
padToSquare
(
getImageTensor
(
input
))
)
let
out
=
convDown
(
x
,
params
.
conv32_down
)
let
out
=
convDown
(
x
,
params
.
conv32_down
)
out
=
tf
.
maxPool
(
out
,
3
,
2
,
'valid'
)
out
=
tf
.
maxPool
(
out
,
3
,
2
,
'valid'
)
...
@@ -42,14 +44,14 @@ export function faceRecognitionNet(weights: Float32Array) {
...
@@ -42,14 +44,14 @@ export function faceRecognitionNet(weights: Float32Array) {
})
})
}
}
const
computeFaceDescriptor
=
async
(
input
:
number
[]
|
ImageData
)
=>
{
const
computeFaceDescriptor
=
async
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
)
=>
{
const
result
=
forward
(
input
)
const
result
=
forward
(
input
)
const
data
=
await
result
.
data
()
const
data
=
await
result
.
data
()
result
.
dispose
()
result
.
dispose
()
return
data
return
data
}
}
const
computeFaceDescriptorSync
=
(
input
:
number
[]
|
ImageData
)
=>
{
const
computeFaceDescriptorSync
=
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
)
=>
{
const
result
=
forward
(
input
)
const
result
=
forward
(
input
)
const
data
=
result
.
dataSync
()
const
data
=
result
.
dataSync
()
result
.
dispose
()
result
.
dispose
()
...
...
src/normalize.ts
→
src/
faceRecognitionNet/
normalize.ts
View file @
2ae76364
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
export
function
normalize
(
input
:
number
[]
|
ImageData
):
tf
.
Tensor4D
{
export
function
normalize
(
x
:
tf
.
Tensor4D
):
tf
.
Tensor4D
{
return
tf
.
tidy
(()
=>
{
return
tf
.
tidy
(()
=>
{
const
avg_r
=
tf
.
fill
([
1
,
150
,
150
,
1
],
122.782
);
const
avg_r
=
tf
.
fill
([
1
,
150
,
150
,
1
],
122.782
);
const
avg_g
=
tf
.
fill
([
1
,
150
,
150
,
1
],
117.001
);
const
avg_g
=
tf
.
fill
([
1
,
150
,
150
,
1
],
117.001
);
const
avg_b
=
tf
.
fill
([
1
,
150
,
150
,
1
],
104.298
);
const
avg_b
=
tf
.
fill
([
1
,
150
,
150
,
1
],
104.298
);
const
avg_rgb
=
tf
.
concat
([
avg_r
,
avg_g
,
avg_b
],
3
)
const
avg_rgb
=
tf
.
concat
([
avg_r
,
avg_g
,
avg_b
],
3
)
const
x
=
input
instanceof
ImageData
return
tf
.
div
(
tf
.
sub
(
x
,
avg_rgb
),
tf
.
scalar
(
256
))
?
tf
.
cast
(
tf
.
reshape
(
tf
.
fromPixels
(
input
),
[
1
,
150
,
150
,
3
]),
'float32'
)
:
tf
.
tensor4d
(
input
,
[
1
,
150
,
150
,
3
])
return
tf
.
div
(
tf
.
sub
(
x
,
avg_rgb
),
tf
.
fill
(
x
.
shape
,
256
))
})
})
}
}
\ No newline at end of file
src/index.ts
View file @
2ae76364
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
{
euclideanDistance
}
from
'./euclideanDistance'
;
import
{
euclideanDistance
}
from
'./euclideanDistance'
;
import
{
faceDetectionNet
}
from
'./faceDetectionNet'
;
import
{
faceDetectionNet
}
from
'./faceDetectionNet'
;
import
{
faceRecognitionNet
}
from
'./faceRecognitionNet'
;
import
{
faceRecognitionNet
}
from
'./faceRecognitionNet'
;
import
{
normalize
}
from
'./normalize
'
;
import
{
NetInput
}
from
'./NetInput
'
;
export
{
export
{
euclideanDistance
,
euclideanDistance
,
faceDetectionNet
,
faceDetectionNet
,
faceRecognitionNet
,
faceRecognitionNet
,
normalize
,
NetInput
tf
}
}
export
*
from
'./utils'
export
*
from
'./utils'
\ No newline at end of file
src/transformInputs.ts
0 → 100644
View file @
2ae76364
import
*
as
tf
from
'@tensorflow/tfjs-core'
;
import
{
NetInput
}
from
'./NetInput'
;
import
{
TNetInput
}
from
'./types'
;
export
function
padToSquare
(
imgTensor
:
tf
.
Tensor4D
):
tf
.
Tensor4D
{
return
tf
.
tidy
(()
=>
{
const
[
_
,
height
,
width
]
=
imgTensor
.
shape
if
(
height
===
width
)
{
return
imgTensor
}
if
(
height
>
width
)
{
const
pad
=
tf
.
fill
([
1
,
height
,
height
-
width
,
3
],
0
)
as
tf
.
Tensor4D
return
tf
.
concat
([
imgTensor
,
pad
],
2
)
}
const
pad
=
tf
.
fill
([
1
,
width
-
height
,
width
,
3
],
0
)
as
tf
.
Tensor4D
return
tf
.
concat
([
imgTensor
,
pad
],
1
)
})
}
export
function
getImageTensor
(
input
:
tf
.
Tensor
|
NetInput
|
TNetInput
):
tf
.
Tensor4D
{
return
tf
.
tidy
(()
=>
{
if
(
input
instanceof
tf
.
Tensor
)
{
const
rank
=
input
.
shape
.
length
if
(
rank
!==
3
&&
rank
!==
4
)
{
throw
new
Error
(
'input tensor must be of rank 3 or 4'
)
}
return
(
rank
===
3
?
input
.
expandDims
(
0
)
:
input
).
toFloat
()
as
tf
.
Tensor4D
}
const
netInput
=
input
instanceof
NetInput
?
input
:
new
NetInput
(
input
)
return
tf
.
concat
(
netInput
.
canvases
.
map
(
canvas
=>
tf
.
fromPixels
(
canvas
).
expandDims
(
0
).
toFloat
()
)
)
as
tf
.
Tensor4D
})
}
\ No newline at end of file
src/types.ts
0 → 100644
View file @
2ae76364
export
type
TMediaElement
=
HTMLImageElement
|
HTMLVideoElement
|
HTMLCanvasElement
export
type
TNetInputArg
=
string
|
TMediaElement
export
type
TNetInput
=
TNetInputArg
|
Array
<
TNetInputArg
>
src/utils.ts
View file @
2ae76364
import
{
FaceDetectionNet
}
from
'./faceDetectionNet/types'
;
import
{
FaceDetectionNet
}
from
'./faceDetectionNet/types'
;
function
getElement
(
arg
:
string
|
any
)
{
export
function
getElement
(
arg
:
string
|
any
)
{
if
(
typeof
arg
===
'string'
)
{
if
(
typeof
arg
===
'string'
)
{
return
document
.
getElementById
(
arg
)
return
document
.
getElementById
(
arg
)
}
}
return
arg
return
arg
}
}
function
getContext2dOrThrow
(
canvas
:
HTMLCanvasElement
):
CanvasRenderingContext2D
{
export
function
getContext2dOrThrow
(
canvas
:
HTMLCanvasElement
):
CanvasRenderingContext2D
{
const
ctx
=
canvas
.
getContext
(
'2d'
)
const
ctx
=
canvas
.
getContext
(
'2d'
)
if
(
!
ctx
)
{
if
(
!
ctx
)
{
throw
new
Error
(
'canvas 2d context is null'
)
throw
new
Error
(
'canvas 2d context is null'
)
...
@@ -15,7 +15,7 @@ function getContext2dOrThrow(canvas: HTMLCanvasElement): CanvasRenderingContext2
...
@@ -15,7 +15,7 @@ function getContext2dOrThrow(canvas: HTMLCanvasElement): CanvasRenderingContext2
return
ctx
return
ctx
}
}
function
getMediaDimensions
(
media
:
HTMLImageElement
|
HTMLVideoElement
)
{
export
function
getMediaDimensions
(
media
:
HTMLImageElement
|
HTMLVideoElement
)
{
if
(
media
instanceof
HTMLVideoElement
)
{
if
(
media
instanceof
HTMLVideoElement
)
{
return
{
width
:
media
.
videoWidth
,
height
:
media
.
videoHeight
}
return
{
width
:
media
.
videoWidth
,
height
:
media
.
videoHeight
}
}
}
...
@@ -35,11 +35,11 @@ export type Dimensions = {
...
@@ -35,11 +35,11 @@ export type Dimensions = {
height
:
number
height
:
number
}
}
export
function
drawMediaToCanvas
(
export
function
toNetInput
(
canvasArg
:
string
|
HTMLCanvasElement
,
canvasArg
:
string
|
HTMLCanvasElement
,
mediaArg
:
string
|
HTMLImageElement
|
HTMLVideoElement
,
mediaArg
:
string
|
HTMLImageElement
|
HTMLVideoElement
,
dims
?:
Dimensions
dims
?:
Dimensions
):
CanvasRenderingContext2D
{
):
HTMLCanvasElement
{
const
canvas
=
getElement
(
canvasArg
)
const
canvas
=
getElement
(
canvasArg
)
const
media
=
getElement
(
mediaArg
)
const
media
=
getElement
(
mediaArg
)
...
@@ -56,7 +56,7 @@ export function drawMediaToCanvas(
...
@@ -56,7 +56,7 @@ export function drawMediaToCanvas(
const
ctx
=
getContext2dOrThrow
(
canvas
)
const
ctx
=
getContext2dOrThrow
(
canvas
)
ctx
.
drawImage
(
media
,
0
,
0
,
width
,
height
)
ctx
.
drawImage
(
media
,
0
,
0
,
width
,
height
)
return
c
tx
return
c
anvas
}
}
export
function
mediaToImageData
(
media
:
HTMLImageElement
|
HTMLVideoElement
,
dims
?:
Dimensions
):
ImageData
{
export
function
mediaToImageData
(
media
:
HTMLImageElement
|
HTMLVideoElement
,
dims
?:
Dimensions
):
ImageData
{
...
@@ -64,7 +64,8 @@ export function mediaToImageData(media: HTMLImageElement | HTMLVideoElement, dim
...
@@ -64,7 +64,8 @@ export function mediaToImageData(media: HTMLImageElement | HTMLVideoElement, dim
throw
new
Error
(
'mediaToImageData - expected media to be of type: HTMLImageElement | HTMLVideoElement'
)
throw
new
Error
(
'mediaToImageData - expected media to be of type: HTMLImageElement | HTMLVideoElement'
)
}
}
const
ctx
=
drawMediaToCanvas
(
document
.
createElement
(
'canvas'
),
media
)
const
canvas
=
toNetInput
(
document
.
createElement
(
'canvas'
),
media
)
const
ctx
=
getContext2dOrThrow
(
canvas
)
const
{
width
,
height
}
=
dims
||
getMediaDimensions
(
media
)
const
{
width
,
height
}
=
dims
||
getMediaDimensions
(
media
)
return
ctx
.
getImageData
(
0
,
0
,
width
,
height
)
return
ctx
.
getImageData
(
0
,
0
,
width
,
height
)
...
@@ -108,6 +109,24 @@ export async function bufferToImageData(buf: Blob): Promise<ImageData> {
...
@@ -108,6 +109,24 @@ export async function bufferToImageData(buf: Blob): Promise<ImageData> {
return
mediaSrcToImageData
(
await
bufferToImgSrc
(
buf
))
return
mediaSrcToImageData
(
await
bufferToImgSrc
(
buf
))
}
}
export
function
bufferToImage
(
buf
:
Blob
):
Promise
<
HTMLImageElement
>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
if
(
!
(
buf
instanceof
Blob
))
{
return
reject
(
'bufferToImage - expected buf to be of type: Blob'
)
}
const
reader
=
new
FileReader
()
reader
.
onload
=
()
=>
{
const
img
=
new
Image
()
img
.
onload
=
()
=>
resolve
(
img
)
img
.
onerror
=
reject
img
.
src
=
reader
.
result
}
reader
.
onerror
=
reject
reader
.
readAsDataURL
(
buf
)
})
}
export
type
DrawBoxOptions
=
{
export
type
DrawBoxOptions
=
{
lineWidth
:
number
lineWidth
:
number
color
:
string
color
:
string
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment