Unverified Commit 123df714 by Vincent Mühler Committed by GitHub

Merge pull request #246 from javyxx/master

Update to tensorflowjs 1.0.1
parents 21ce7136 4c7fb27f
...@@ -35,12 +35,12 @@ ...@@ -35,12 +35,12 @@
"author": "justadudewhohacks", "author": "justadudewhohacks",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@tensorflow/tfjs-core": "0.14.2", "@tensorflow/tfjs-core": "1.0.1",
"tfjs-image-recognition-base": "^0.4.1", "tfjs-image-recognition-base": "^0.5.0",
"tslib": "^1.9.3" "tslib": "^1.9.3"
}, },
"devDependencies": { "devDependencies": {
"@tensorflow/tfjs-node": "^0.2.3", "@tensorflow/tfjs-node": "^1.0.1",
"@types/jasmine": "^3.3.8", "@types/jasmine": "^3.3.8",
"@types/node": "^10.12.18", "@types/node": "^10.12.18",
"canvas": "2.0.1", "canvas": "2.0.1",
......
...@@ -45,7 +45,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -45,7 +45,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
const imgTensor = tf.tidy(() => const imgTensor = tf.tidy(() =>
bgrToRgbTensor( bgrToRgbTensor(
tf.expandDims(tf.fromPixels(inputCanvas)).toFloat() as tf.Tensor4D tf.expandDims(tf.browser.fromPixels(inputCanvas)).toFloat() as tf.Tensor4D
) )
) )
......
...@@ -28,9 +28,10 @@ function extractBoundingBoxes( ...@@ -28,9 +28,10 @@ function extractBoundingBoxes(
// TODO: fix this!, maybe better to use tf.gather here // TODO: fix this!, maybe better to use tf.gather here
const indices: Point[] = [] const indices: Point[] = []
const scoresData = scoresTensor.arraySync();
for (let y = 0; y < scoresTensor.shape[0]; y++) { for (let y = 0; y < scoresTensor.shape[0]; y++) {
for (let x = 0; x < scoresTensor.shape[1]; x++) { for (let x = 0; x < scoresTensor.shape[1]; x++) {
if (scoresTensor.get(y, x) >= scoreThreshold) { if (scoresData[y][x] >= scoreThreshold) {
indices.push(new Point(x, y)) indices.push(new Point(x, y))
} }
} }
...@@ -44,13 +45,14 @@ function extractBoundingBoxes( ...@@ -44,13 +45,14 @@ function extractBoundingBoxes(
Math.round((idx.x * CELL_STRIDE + CELL_SIZE) / scale) Math.round((idx.x * CELL_STRIDE + CELL_SIZE) / scale)
) )
const score = scoresTensor.get(idx.y, idx.x) const score = scoresData[idx.y][idx.x]
const regionsData = regionsTensor.arraySync()
const region = new MtcnnBox( const region = new MtcnnBox(
regionsTensor.get(idx.y, idx.x, 0), regionsData[idx.y][idx.x][0],
regionsTensor.get(idx.y, idx.x, 1), regionsData[idx.y][idx.x][1],
regionsTensor.get(idx.y, idx.x, 2), regionsData[idx.y][idx.x][2],
regionsTensor.get(idx.y, idx.x, 3) regionsData[idx.y][idx.x][3]
) )
return { return {
......
...@@ -54,13 +54,15 @@ export async function stage2( ...@@ -54,13 +54,15 @@ export async function stage2(
) )
stats.stage2_nms = Date.now() - ts stats.stage2_nms = Date.now() - ts
const regions = indicesNms.map(idx => const regions = indicesNms.map(idx =>{
new MtcnnBox( const regionsData = rnetOuts[indices[idx]].regions.arraySync()
rnetOuts[indices[idx]].regions.get(0, 0), return new MtcnnBox(
rnetOuts[indices[idx]].regions.get(0, 1), regionsData[0][0],
rnetOuts[indices[idx]].regions.get(0, 2), regionsData[0][1],
rnetOuts[indices[idx]].regions.get(0, 3) regionsData[0][2],
) regionsData[0][3]
)
}
) )
finalScores = indicesNms.map(idx => filteredScores[idx]) finalScores = indicesNms.map(idx => filteredScores[idx])
......
...@@ -39,12 +39,14 @@ export async function stage3( ...@@ -39,12 +39,14 @@ export async function stage3(
.filter(c => c.score > scoreThreshold) .filter(c => c.score > scoreThreshold)
.map(({ idx }) => idx) .map(({ idx }) => idx)
const filteredRegions = indices.map(idx => new MtcnnBox( const filteredRegions = indices.map(idx => {
onetOuts[idx].regions.get(0, 0), const regionsData = onetOuts[idx].regions.arraySync();
onetOuts[idx].regions.get(0, 1), return new MtcnnBox(
onetOuts[idx].regions.get(0, 2), regionsData[0][0],
onetOuts[idx].regions.get(0, 3) regionsData[0][1],
)) regionsData[0][2],
regionsData[0][3]
)})
const filteredBoxes = indices const filteredBoxes = indices
.map((idx, i) => inputBoxes[idx].calibrate(filteredRegions[i])) .map((idx, i) => inputBoxes[idx].calibrate(filteredRegions[i]))
const filteredScores = indices.map(idx => scores[idx]) const filteredScores = indices.map(idx => scores[idx])
...@@ -67,11 +69,13 @@ export async function stage3( ...@@ -67,11 +69,13 @@ export async function stage3(
finalBoxes = indicesNms.map(idx => filteredBoxes[idx]) finalBoxes = indicesNms.map(idx => filteredBoxes[idx])
finalScores = indicesNms.map(idx => filteredScores[idx]) finalScores = indicesNms.map(idx => filteredScores[idx])
points = indicesNms.map((idx, i) => points = indicesNms.map((idx, i) =>
Array(5).fill(0).map((_, ptIdx) => Array(5).fill(0).map((_, ptIdx) =>{
new Point( const pointsData = onetOuts[idx].points.arraySync()
((onetOuts[idx].points.get(0, ptIdx) * (finalBoxes[i].width + 1)) + finalBoxes[i].left) , return new Point(
((onetOuts[idx].points.get(0, ptIdx + 5) * (finalBoxes[i].height + 1)) + finalBoxes[i].top) ((pointsData[0][ptIdx] * (finalBoxes[i].width + 1)) + finalBoxes[i].left) ,
) ((pointsData[0][ptIdx+5] * (finalBoxes[i].height + 1)) + finalBoxes[i].top)
)
}
) )
) )
} }
......
...@@ -85,15 +85,16 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> { ...@@ -85,15 +85,16 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
const padX = inputSize / reshapedDims.width const padX = inputSize / reshapedDims.width
const padY = inputSize / reshapedDims.height const padY = inputSize / reshapedDims.height
const boxesData = boxes.arraySync()
const results = indices const results = indices
.map(idx => { .map(idx => {
const [top, bottom] = [ const [top, bottom] = [
Math.max(0, boxes.get(idx, 0)), Math.max(0, boxesData[idx][0]),
Math.min(1.0, boxes.get(idx, 2)) Math.min(1.0, boxesData[idx][2])
].map(val => val * padY) ].map(val => val * padY)
const [left, right] = [ const [left, right] = [
Math.max(0, boxes.get(idx, 1)), Math.max(0, boxesData[idx][1]),
Math.min(1.0, boxes.get(idx, 3)) Math.min(1.0, boxesData[idx][3])
].map(val => val * padX) ].map(val => val * padX)
return new FaceDetection( return new FaceDetection(
scoresData[idx], scoresData[idx],
......
...@@ -13,13 +13,13 @@ function depthwiseConvLayer( ...@@ -13,13 +13,13 @@ function depthwiseConvLayer(
return tf.tidy(() => { return tf.tidy(() => {
let out = tf.depthwiseConv2d(x, params.filters, strides, 'same') let out = tf.depthwiseConv2d(x, params.filters, strides, 'same')
out = tf.batchNormalization<tf.Rank.R4>( out = tf.batchNorm<tf.Rank.R4>(
out, out,
params.batch_norm_mean, params.batch_norm_mean,
params.batch_norm_variance, params.batch_norm_variance,
epsilon, params.batch_norm_offset,
params.batch_norm_scale, params.batch_norm_scale,
params.batch_norm_offset epsilon
) )
return tf.clipByValue(out, 0, 6) return tf.clipByValue(out, 0, 6)
......
...@@ -49,14 +49,15 @@ export function nonMaxSuppression( ...@@ -49,14 +49,15 @@ export function nonMaxSuppression(
} }
function IOU(boxes: tf.Tensor2D, i: number, j: number) { function IOU(boxes: tf.Tensor2D, i: number, j: number) {
const yminI = Math.min(boxes.get(i, 0), boxes.get(i, 2)) const boxesData = boxes.arraySync()
const xminI = Math.min(boxes.get(i, 1), boxes.get(i, 3)) const yminI = Math.min(boxesData[i][0], boxesData[i][2])
const ymaxI = Math.max(boxes.get(i, 0), boxes.get(i, 2)) const xminI = Math.min(boxesData[i][1], boxesData[i][3])
const xmaxI = Math.max(boxes.get(i, 1), boxes.get(i, 3)) const ymaxI = Math.max(boxesData[i][0], boxesData[i][2])
const yminJ = Math.min(boxes.get(j, 0), boxes.get(j, 2)) const xmaxI = Math.max(boxesData[i][1], boxesData[i][3])
const xminJ = Math.min(boxes.get(j, 1), boxes.get(j, 3)) const yminJ = Math.min(boxesData[j][0], boxesData[j][2])
const ymaxJ = Math.max(boxes.get(j, 0), boxes.get(j, 2)) const xminJ = Math.min(boxesData[j][1], boxesData[j][3])
const xmaxJ = Math.max(boxes.get(j, 1), boxes.get(j, 3)) const ymaxJ = Math.max(boxesData[j][0], boxesData[j][2])
const xmaxJ = Math.max(boxesData[j][1], boxesData[j][3])
const areaI = (ymaxI - yminI) * (xmaxI - xminI) const areaI = (ymaxI - yminI) * (xmaxI - xminI)
const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ) const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ)
if (areaI <= 0 || areaJ <= 0) { if (areaI <= 0 || areaJ <= 0) {
......
...@@ -6,7 +6,7 @@ describe('extractFaceTensors', () => { ...@@ -6,7 +6,7 @@ describe('extractFaceTensors', () => {
let imgTensor: tf.Tensor3D let imgTensor: tf.Tensor3D
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png')))
}) })
describe('extracts tensors', () => { describe('extracts tensors', () => {
......
...@@ -57,7 +57,7 @@ describe('faceExpressionNet', () => { ...@@ -57,7 +57,7 @@ describe('faceExpressionNet', () => {
}) })
it('computes face landmarks for batch of tf.Tensor3D', async () => { it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.fromPixels(createCanvasFromMedia(el))) const inputs = [imgElAngry, imgElSurprised].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][] const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
...@@ -80,7 +80,7 @@ describe('faceExpressionNet', () => { ...@@ -80,7 +80,7 @@ describe('faceExpressionNet', () => {
}) })
it('computes face landmarks for batch of mixed inputs', async () => { it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.fromPixels(createCanvasFromMedia(imgElSurprised))] const inputs = [imgElAngry, tf.browser.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][] const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
...@@ -125,7 +125,7 @@ describe('faceExpressionNet', () => { ...@@ -125,7 +125,7 @@ describe('faceExpressionNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor)) const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
...@@ -136,7 +136,7 @@ describe('faceExpressionNet', () => { ...@@ -136,7 +136,7 @@ describe('faceExpressionNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors)) const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
...@@ -147,7 +147,7 @@ describe('faceExpressionNet', () => { ...@@ -147,7 +147,7 @@ describe('faceExpressionNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor)) const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
...@@ -159,7 +159,7 @@ describe('faceExpressionNet', () => { ...@@ -159,7 +159,7 @@ describe('faceExpressionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry] const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors)) const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
...@@ -186,7 +186,7 @@ describe('faceExpressionNet', () => { ...@@ -186,7 +186,7 @@ describe('faceExpressionNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor) await faceExpressionNet.predictExpressions(tensor)
...@@ -196,7 +196,7 @@ describe('faceExpressionNet', () => { ...@@ -196,7 +196,7 @@ describe('faceExpressionNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
...@@ -207,7 +207,7 @@ describe('faceExpressionNet', () => { ...@@ -207,7 +207,7 @@ describe('faceExpressionNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor) await faceExpressionNet.predictExpressions(tensor)
...@@ -218,7 +218,7 @@ describe('faceExpressionNet', () => { ...@@ -218,7 +218,7 @@ describe('faceExpressionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry] const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors) await faceExpressionNet.predictExpressions(tensors)
......
...@@ -92,7 +92,7 @@ describe('faceLandmark68Net', () => { ...@@ -92,7 +92,7 @@ describe('faceLandmark68Net', () => {
}) })
it('computes face landmarks for batch of tf.Tensor3D', async () => { it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el))) const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [ const faceLandmarkPositions = [
faceLandmarkPositions1, faceLandmarkPositions1,
...@@ -117,7 +117,7 @@ describe('faceLandmark68Net', () => { ...@@ -117,7 +117,7 @@ describe('faceLandmark68Net', () => {
}) })
it('computes face landmarks for batch of mixed inputs', async () => { it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))] const inputs = [imgEl1, tf.browser.fromPixels(createCanvasFromMedia(imgEl2)), tf.browser.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [ const faceLandmarkPositions = [
faceLandmarkPositions1, faceLandmarkPositions1,
...@@ -164,7 +164,7 @@ describe('faceLandmark68Net', () => { ...@@ -164,7 +164,7 @@ describe('faceLandmark68Net', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor]) const netInput = new NetInput([tensor])
...@@ -176,7 +176,7 @@ describe('faceLandmark68Net', () => { ...@@ -176,7 +176,7 @@ describe('faceLandmark68Net', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors) const netInput = new NetInput(tensors)
...@@ -188,7 +188,7 @@ describe('faceLandmark68Net', () => { ...@@ -188,7 +188,7 @@ describe('faceLandmark68Net', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensor)) const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensor))
...@@ -200,7 +200,7 @@ describe('faceLandmark68Net', () => { ...@@ -200,7 +200,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensors)) const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensors))
...@@ -227,7 +227,7 @@ describe('faceLandmark68Net', () => { ...@@ -227,7 +227,7 @@ describe('faceLandmark68Net', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor) await faceLandmark68Net.detectLandmarks(tensor)
...@@ -237,7 +237,7 @@ describe('faceLandmark68Net', () => { ...@@ -237,7 +237,7 @@ describe('faceLandmark68Net', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
...@@ -248,7 +248,7 @@ describe('faceLandmark68Net', () => { ...@@ -248,7 +248,7 @@ describe('faceLandmark68Net', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor) await faceLandmark68Net.detectLandmarks(tensor)
...@@ -259,7 +259,7 @@ describe('faceLandmark68Net', () => { ...@@ -259,7 +259,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensors) await faceLandmark68Net.detectLandmarks(tensors)
......
...@@ -91,7 +91,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -91,7 +91,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('computes face landmarks for batch of tf.Tensor3D', async () => { it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el))) const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [ const faceLandmarkPositions = [
faceLandmarkPositions1, faceLandmarkPositions1,
...@@ -116,7 +116,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -116,7 +116,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('computes face landmarks for batch of mixed inputs', async () => { it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))] const inputs = [imgEl1, tf.browser.fromPixels(createCanvasFromMedia(imgEl2)), tf.browser.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [ const faceLandmarkPositions = [
faceLandmarkPositions1, faceLandmarkPositions1,
...@@ -164,7 +164,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -164,7 +164,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor]) const netInput = new NetInput([tensor])
...@@ -176,7 +176,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -176,7 +176,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors) const netInput = new NetInput(tensors)
...@@ -188,7 +188,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -188,7 +188,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensor)) const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensor))
...@@ -200,7 +200,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -200,7 +200,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensors)) const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensors))
...@@ -227,7 +227,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -227,7 +227,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1)) const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor) await faceLandmark68TinyNet.detectLandmarks(tensor)
...@@ -237,7 +237,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -237,7 +237,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el))) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
...@@ -248,7 +248,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -248,7 +248,7 @@ describe('faceLandmark68TinyNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor) await faceLandmark68TinyNet.detectLandmarks(tensor)
...@@ -259,7 +259,7 @@ describe('faceLandmark68TinyNet', () => { ...@@ -259,7 +259,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensors) await faceLandmark68TinyNet.detectLandmarks(tensors)
......
...@@ -60,7 +60,7 @@ describe('faceRecognitionNet', () => { ...@@ -60,7 +60,7 @@ describe('faceRecognitionNet', () => {
}) })
it('computes face descriptors for batch of tf.Tensor3D', async () => { it('computes face descriptors for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(el)) const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(el))
const faceDescriptors = [ const faceDescriptors = [
faceDescriptor1, faceDescriptor1,
...@@ -77,7 +77,7 @@ describe('faceRecognitionNet', () => { ...@@ -77,7 +77,7 @@ describe('faceRecognitionNet', () => {
}) })
it('computes face descriptors for batch of mixed inputs', async () => { it('computes face descriptors for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(imgEl2), tf.fromPixels(imgElRect)] const inputs = [imgEl1, tf.browser.fromPixels(imgEl2), tf.browser.fromPixels(imgElRect)]
const faceDescriptors = [ const faceDescriptors = [
faceDescriptor1, faceDescriptor1,
...@@ -116,7 +116,7 @@ describe('faceRecognitionNet', () => { ...@@ -116,7 +116,7 @@ describe('faceRecognitionNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1) const tensor = tf.browser.fromPixels(imgEl1)
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor]) const netInput = new NetInput([tensor])
...@@ -128,7 +128,7 @@ describe('faceRecognitionNet', () => { ...@@ -128,7 +128,7 @@ describe('faceRecognitionNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el)) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(el))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors) const netInput = new NetInput(tensors)
...@@ -140,7 +140,7 @@ describe('faceRecognitionNet', () => { ...@@ -140,7 +140,7 @@ describe('faceRecognitionNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensor)) const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensor))
...@@ -152,7 +152,7 @@ describe('faceRecognitionNet', () => { ...@@ -152,7 +152,7 @@ describe('faceRecognitionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(el).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensors)) const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensors))
...@@ -179,7 +179,7 @@ describe('faceRecognitionNet', () => { ...@@ -179,7 +179,7 @@ describe('faceRecognitionNet', () => {
}) })
it('single tf.Tensor3D', async () => { it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1) const tensor = tf.browser.fromPixels(imgEl1)
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensor) await faceRecognitionNet.computeFaceDescriptor(tensor)
...@@ -189,7 +189,7 @@ describe('faceRecognitionNet', () => { ...@@ -189,7 +189,7 @@ describe('faceRecognitionNet', () => {
}) })
it('multiple tf.Tensor3Ds', async () => { it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el)) const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(el))
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
...@@ -200,7 +200,7 @@ describe('faceRecognitionNet', () => { ...@@ -200,7 +200,7 @@ describe('faceRecognitionNet', () => {
}) })
it('single batch size 1 tf.Tensor4Ds', async () => { it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D const tensor = tf.tidy(() => tf.browser.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensor) await faceRecognitionNet.computeFaceDescriptor(tensor)
...@@ -211,7 +211,7 @@ describe('faceRecognitionNet', () => { ...@@ -211,7 +211,7 @@ describe('faceRecognitionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => { it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1] const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[] .map(el => tf.tidy(() => tf.browser.fromPixels(el).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensors) await faceRecognitionNet.computeFaceDescriptor(tensors)
......
...@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => { ...@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61] const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
}) })
......
...@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => { ...@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => {
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84] const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => { beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg'))) imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes) expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
}) })
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment