Unverified Commit 123df714 by Vincent Mühler Committed by GitHub

Merge pull request #246 from javyxx/master

Update to tensorflowjs 1.0.1
parents 21ce7136 4c7fb27f
......@@ -35,12 +35,12 @@
"author": "justadudewhohacks",
"license": "MIT",
"dependencies": {
"@tensorflow/tfjs-core": "0.14.2",
"tfjs-image-recognition-base": "^0.4.1",
"@tensorflow/tfjs-core": "1.0.1",
"tfjs-image-recognition-base": "^0.5.0",
"tslib": "^1.9.3"
},
"devDependencies": {
"@tensorflow/tfjs-node": "^0.2.3",
"@tensorflow/tfjs-node": "^1.0.1",
"@types/jasmine": "^3.3.8",
"@types/node": "^10.12.18",
"canvas": "2.0.1",
......
......@@ -45,7 +45,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
const imgTensor = tf.tidy(() =>
bgrToRgbTensor(
tf.expandDims(tf.fromPixels(inputCanvas)).toFloat() as tf.Tensor4D
tf.expandDims(tf.browser.fromPixels(inputCanvas)).toFloat() as tf.Tensor4D
)
)
......
......@@ -28,9 +28,10 @@ function extractBoundingBoxes(
// TODO: fix this!, maybe better to use tf.gather here
const indices: Point[] = []
const scoresData = scoresTensor.arraySync();
for (let y = 0; y < scoresTensor.shape[0]; y++) {
for (let x = 0; x < scoresTensor.shape[1]; x++) {
if (scoresTensor.get(y, x) >= scoreThreshold) {
if (scoresData[y][x] >= scoreThreshold) {
indices.push(new Point(x, y))
}
}
......@@ -44,13 +45,14 @@ function extractBoundingBoxes(
Math.round((idx.x * CELL_STRIDE + CELL_SIZE) / scale)
)
const score = scoresTensor.get(idx.y, idx.x)
const score = scoresData[idx.y][idx.x]
const regionsData = regionsTensor.arraySync()
const region = new MtcnnBox(
regionsTensor.get(idx.y, idx.x, 0),
regionsTensor.get(idx.y, idx.x, 1),
regionsTensor.get(idx.y, idx.x, 2),
regionsTensor.get(idx.y, idx.x, 3)
regionsData[idx.y][idx.x][0],
regionsData[idx.y][idx.x][1],
regionsData[idx.y][idx.x][2],
regionsData[idx.y][idx.x][3]
)
return {
......
......@@ -54,13 +54,15 @@ export async function stage2(
)
stats.stage2_nms = Date.now() - ts
const regions = indicesNms.map(idx =>
new MtcnnBox(
rnetOuts[indices[idx]].regions.get(0, 0),
rnetOuts[indices[idx]].regions.get(0, 1),
rnetOuts[indices[idx]].regions.get(0, 2),
rnetOuts[indices[idx]].regions.get(0, 3)
)
const regions = indicesNms.map(idx =>{
const regionsData = rnetOuts[indices[idx]].regions.arraySync()
return new MtcnnBox(
regionsData[0][0],
regionsData[0][1],
regionsData[0][2],
regionsData[0][3]
)
}
)
finalScores = indicesNms.map(idx => filteredScores[idx])
......
......@@ -39,12 +39,14 @@ export async function stage3(
.filter(c => c.score > scoreThreshold)
.map(({ idx }) => idx)
const filteredRegions = indices.map(idx => new MtcnnBox(
onetOuts[idx].regions.get(0, 0),
onetOuts[idx].regions.get(0, 1),
onetOuts[idx].regions.get(0, 2),
onetOuts[idx].regions.get(0, 3)
))
const filteredRegions = indices.map(idx => {
const regionsData = onetOuts[idx].regions.arraySync();
return new MtcnnBox(
regionsData[0][0],
regionsData[0][1],
regionsData[0][2],
regionsData[0][3]
)})
const filteredBoxes = indices
.map((idx, i) => inputBoxes[idx].calibrate(filteredRegions[i]))
const filteredScores = indices.map(idx => scores[idx])
......@@ -67,11 +69,13 @@ export async function stage3(
finalBoxes = indicesNms.map(idx => filteredBoxes[idx])
finalScores = indicesNms.map(idx => filteredScores[idx])
points = indicesNms.map((idx, i) =>
Array(5).fill(0).map((_, ptIdx) =>
new Point(
((onetOuts[idx].points.get(0, ptIdx) * (finalBoxes[i].width + 1)) + finalBoxes[i].left) ,
((onetOuts[idx].points.get(0, ptIdx + 5) * (finalBoxes[i].height + 1)) + finalBoxes[i].top)
)
Array(5).fill(0).map((_, ptIdx) =>{
const pointsData = onetOuts[idx].points.arraySync()
return new Point(
((pointsData[0][ptIdx] * (finalBoxes[i].width + 1)) + finalBoxes[i].left) ,
((pointsData[0][ptIdx+5] * (finalBoxes[i].height + 1)) + finalBoxes[i].top)
)
}
)
)
}
......
......@@ -85,15 +85,16 @@ export class SsdMobilenetv1 extends NeuralNetwork<NetParams> {
const padX = inputSize / reshapedDims.width
const padY = inputSize / reshapedDims.height
const boxesData = boxes.arraySync()
const results = indices
.map(idx => {
const [top, bottom] = [
Math.max(0, boxes.get(idx, 0)),
Math.min(1.0, boxes.get(idx, 2))
Math.max(0, boxesData[idx][0]),
Math.min(1.0, boxesData[idx][2])
].map(val => val * padY)
const [left, right] = [
Math.max(0, boxes.get(idx, 1)),
Math.min(1.0, boxes.get(idx, 3))
Math.max(0, boxesData[idx][1]),
Math.min(1.0, boxesData[idx][3])
].map(val => val * padX)
return new FaceDetection(
scoresData[idx],
......
......@@ -13,13 +13,13 @@ function depthwiseConvLayer(
return tf.tidy(() => {
let out = tf.depthwiseConv2d(x, params.filters, strides, 'same')
out = tf.batchNormalization<tf.Rank.R4>(
out = tf.batchNorm<tf.Rank.R4>(
out,
params.batch_norm_mean,
params.batch_norm_variance,
epsilon,
params.batch_norm_offset,
params.batch_norm_scale,
params.batch_norm_offset
epsilon
)
return tf.clipByValue(out, 0, 6)
......
......@@ -49,14 +49,15 @@ export function nonMaxSuppression(
}
function IOU(boxes: tf.Tensor2D, i: number, j: number) {
const yminI = Math.min(boxes.get(i, 0), boxes.get(i, 2))
const xminI = Math.min(boxes.get(i, 1), boxes.get(i, 3))
const ymaxI = Math.max(boxes.get(i, 0), boxes.get(i, 2))
const xmaxI = Math.max(boxes.get(i, 1), boxes.get(i, 3))
const yminJ = Math.min(boxes.get(j, 0), boxes.get(j, 2))
const xminJ = Math.min(boxes.get(j, 1), boxes.get(j, 3))
const ymaxJ = Math.max(boxes.get(j, 0), boxes.get(j, 2))
const xmaxJ = Math.max(boxes.get(j, 1), boxes.get(j, 3))
const boxesData = boxes.arraySync()
const yminI = Math.min(boxesData[i][0], boxesData[i][2])
const xminI = Math.min(boxesData[i][1], boxesData[i][3])
const ymaxI = Math.max(boxesData[i][0], boxesData[i][2])
const xmaxI = Math.max(boxesData[i][1], boxesData[i][3])
const yminJ = Math.min(boxesData[j][0], boxesData[j][2])
const xminJ = Math.min(boxesData[j][1], boxesData[j][3])
const ymaxJ = Math.max(boxesData[j][0], boxesData[j][2])
const xmaxJ = Math.max(boxesData[j][1], boxesData[j][3])
const areaI = (ymaxI - yminI) * (xmaxI - xminI)
const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ)
if (areaI <= 0 || areaJ <= 0) {
......
......@@ -6,7 +6,7 @@ describe('extractFaceTensors', () => {
let imgTensor: tf.Tensor3D
beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/face1.png')))
})
describe('extracts tensors', () => {
......
......@@ -57,7 +57,7 @@ describe('faceExpressionNet', () => {
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgElAngry, imgElSurprised].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const inputs = [imgElAngry, imgElSurprised].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
......@@ -80,7 +80,7 @@ describe('faceExpressionNet', () => {
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgElAngry, tf.fromPixels(createCanvasFromMedia(imgElSurprised))]
const inputs = [imgElAngry, tf.browser.fromPixels(createCanvasFromMedia(imgElSurprised))]
const results = await faceExpressionNet.predictExpressions(inputs) as FaceExpressionPrediction[][]
expect(Array.isArray(results)).toBe(true)
......@@ -125,7 +125,7 @@ describe('faceExpressionNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
......@@ -136,7 +136,7 @@ describe('faceExpressionNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
......@@ -147,7 +147,7 @@ describe('faceExpressionNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensor))
......@@ -159,7 +159,7 @@ describe('faceExpressionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceExpressionNet.forwardInput(await toNetInput(tensors))
......@@ -186,7 +186,7 @@ describe('faceExpressionNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgElAngry))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgElAngry))
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
......@@ -196,7 +196,7 @@ describe('faceExpressionNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgElAngry, imgElAngry, imgElAngry].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
......@@ -207,7 +207,7 @@ describe('faceExpressionNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgElAngry)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensor)
......@@ -218,7 +218,7 @@ describe('faceExpressionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgElAngry, imgElAngry, imgElAngry]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceExpressionNet.predictExpressions(tensors)
......
......@@ -92,7 +92,7 @@ describe('faceLandmark68Net', () => {
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -117,7 +117,7 @@ describe('faceLandmark68Net', () => {
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))]
const inputs = [imgEl1, tf.browser.fromPixels(createCanvasFromMedia(imgEl2)), tf.browser.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -164,7 +164,7 @@ describe('faceLandmark68Net', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor])
......@@ -176,7 +176,7 @@ describe('faceLandmark68Net', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors)
......@@ -188,7 +188,7 @@ describe('faceLandmark68Net', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensor))
......@@ -200,7 +200,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68Net.forwardInput(await toNetInput(tensors))
......@@ -227,7 +227,7 @@ describe('faceLandmark68Net', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor)
......@@ -237,7 +237,7 @@ describe('faceLandmark68Net', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
......@@ -248,7 +248,7 @@ describe('faceLandmark68Net', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensor)
......@@ -259,7 +259,7 @@ describe('faceLandmark68Net', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceLandmark68Net.detectLandmarks(tensors)
......
......@@ -91,7 +91,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('computes face landmarks for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -116,7 +116,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('computes face landmarks for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(createCanvasFromMedia(imgEl2)), tf.fromPixels(createCanvasFromMedia(imgElRect))]
const inputs = [imgEl1, tf.browser.fromPixels(createCanvasFromMedia(imgEl2)), tf.browser.fromPixels(createCanvasFromMedia(imgElRect))]
const faceLandmarkPositions = [
faceLandmarkPositions1,
......@@ -164,7 +164,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor])
......@@ -176,7 +176,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors)
......@@ -188,7 +188,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensor))
......@@ -200,7 +200,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceLandmark68TinyNet.forwardInput(await toNetInput(tensors))
......@@ -227,7 +227,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(createCanvasFromMedia(imgEl1))
const tensor = tf.browser.fromPixels(createCanvasFromMedia(imgEl1))
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor)
......@@ -237,7 +237,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(createCanvasFromMedia(el)))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(createCanvasFromMedia(el)))
await expectAllTensorsReleased(async () => {
......@@ -248,7 +248,7 @@ describe('faceLandmark68TinyNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(imgEl1)).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensor)
......@@ -259,7 +259,7 @@ describe('faceLandmark68TinyNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(createCanvasFromMedia(el)).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceLandmark68TinyNet.detectLandmarks(tensors)
......
......@@ -60,7 +60,7 @@ describe('faceRecognitionNet', () => {
})
it('computes face descriptors for batch of tf.Tensor3D', async () => {
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.fromPixels(el))
const inputs = [imgEl1, imgEl2, imgElRect].map(el => tf.browser.fromPixels(el))
const faceDescriptors = [
faceDescriptor1,
......@@ -77,7 +77,7 @@ describe('faceRecognitionNet', () => {
})
it('computes face descriptors for batch of mixed inputs', async () => {
const inputs = [imgEl1, tf.fromPixels(imgEl2), tf.fromPixels(imgElRect)]
const inputs = [imgEl1, tf.browser.fromPixels(imgEl2), tf.browser.fromPixels(imgElRect)]
const faceDescriptors = [
faceDescriptor1,
......@@ -116,7 +116,7 @@ describe('faceRecognitionNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.browser.fromPixels(imgEl1)
await expectAllTensorsReleased(async () => {
const netInput = new NetInput([tensor])
......@@ -128,7 +128,7 @@ describe('faceRecognitionNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(el))
await expectAllTensorsReleased(async () => {
const netInput = new NetInput(tensors)
......@@ -140,7 +140,7 @@ describe('faceRecognitionNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensor))
......@@ -152,7 +152,7 @@ describe('faceRecognitionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(el).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
const outTensor = await faceRecognitionNet.forwardInput(await toNetInput(tensors))
......@@ -179,7 +179,7 @@ describe('faceRecognitionNet', () => {
})
it('single tf.Tensor3D', async () => {
const tensor = tf.fromPixels(imgEl1)
const tensor = tf.browser.fromPixels(imgEl1)
await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensor)
......@@ -189,7 +189,7 @@ describe('faceRecognitionNet', () => {
})
it('multiple tf.Tensor3Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.fromPixels(el))
const tensors = [imgEl1, imgEl1, imgEl1].map(el => tf.browser.fromPixels(el))
await expectAllTensorsReleased(async () => {
......@@ -200,7 +200,7 @@ describe('faceRecognitionNet', () => {
})
it('single batch size 1 tf.Tensor4Ds', async () => {
const tensor = tf.tidy(() => tf.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
const tensor = tf.tidy(() => tf.browser.fromPixels(imgEl1).expandDims()) as tf.Tensor4D
await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensor)
......@@ -211,7 +211,7 @@ describe('faceRecognitionNet', () => {
it('multiple batch size 1 tf.Tensor4Ds', async () => {
const tensors = [imgEl1, imgEl1, imgEl1]
.map(el => tf.tidy(() => tf.fromPixels(el).expandDims())) as tf.Tensor4D[]
.map(el => tf.tidy(() => tf.browser.fromPixels(el).expandDims())) as tf.Tensor4D[]
await expectAllTensorsReleased(async () => {
await faceRecognitionNet.computeFaceDescriptor(tensors)
......
......@@ -15,7 +15,7 @@ describe('ssdMobilenetv1 - node', () => {
const expectedScores = [0.54, 0.81, 0.97, 0.88, 0.84, 0.61]
beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedSsdBoxes)
})
......
......@@ -15,7 +15,7 @@ describe('tinyFaceDetector - node', () => {
const expectedScores = [0.7, 0.82, 0.93, 0.86, 0.79, 0.84]
beforeAll(async () => {
imgTensor = tf.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
imgTensor = tf.browser.fromPixels(createCanvasFromMedia(await loadImage('test/images/faces.jpg')))
expectedFullFaceDescriptions = await assembleExpectedFullFaceDescriptions(expectedTinyFaceDetectorBoxes)
})
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment