Commit 2b794bc3 by vincent

added mtcnn e2e tests + check in final model weights

parent 1d133c54
...@@ -72,7 +72,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> { ...@@ -72,7 +72,7 @@ export class Mtcnn extends NeuralNetwork<NetParams> {
scaleSteps scaleSteps
} = Object.assign({}, getDefaultMtcnnForwardParams(), forwardParams) } = Object.assign({}, getDefaultMtcnnForwardParams(), forwardParams)
const scales = scaleSteps || pyramidDown(minFaceSize, scaleFactor, [height, width]) const scales = (scaleSteps || pyramidDown(minFaceSize, scaleFactor, [height, width]))
.filter(scale => { .filter(scale => {
const sizes = getSizesForScale(scale, [height, width]) const sizes = getSizesForScale(scale, [height, width])
return Math.min(sizes.width, sizes.height) > CELL_SIZE return Math.min(sizes.width, sizes.height) > CELL_SIZE
......
import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../../src/NetInput'; import { NetInput } from '../../src/NetInput';
import { bufferToImage, createCanvasFromMedia } from '../../src/utils'; import { bufferToImage } from '../../src/utils';
import { expectAllTensorsReleased, tensor3D } from '../utils'; import { expectAllTensorsReleased, tensor3D } from '../utils';
describe('NetInput', () => { describe('NetInput', () => {
let imgEl: HTMLImageElement let imgEl: HTMLImageElement
......
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { FaceDetection } from '../../../src/faceDetectionNet/FaceDetection'; import { describeWithNets, expectAllTensorsReleased, expectRectClose } from '../../utils';
import { IRect } from '../../../src/Rect';
import { describeWithNets, expectAllTensorsReleased, expectMaxDelta } from '../../utils';
function expectRectClose(
result: IRect,
expectedBox: IRect,
maxDelta: number
) {
const { x, y, width, height } = result
expectMaxDelta(x, expectedBox.x, maxDelta)
expectMaxDelta(y, expectedBox.y, maxDelta)
expectMaxDelta(width, expectedBox.width, maxDelta)
expectMaxDelta(height, expectedBox.height, maxDelta)
}
const expectedBoxes = [ const expectedBoxes = [
{ x: 48, y: 253, width: 104, height: 129 }, { x: 48, y: 253, width: 104, height: 129 },
...@@ -39,7 +25,7 @@ describe('faceDetectionNet', () => { ...@@ -39,7 +25,7 @@ describe('faceDetectionNet', () => {
const maxBoxDelta = 1 const maxBoxDelta = 1
it('scores > 0.8', async () => { it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl) as faceapi.FaceDetection[]
expect(detections.length).toEqual(3) expect(detections.length).toEqual(3)
detections.forEach((det, i) => { detections.forEach((det, i) => {
...@@ -51,7 +37,7 @@ describe('faceDetectionNet', () => { ...@@ -51,7 +37,7 @@ describe('faceDetectionNet', () => {
}) })
it('scores > 0.5', async () => { it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6) expect(detections.length).toEqual(6)
detections.forEach((det, i) => { detections.forEach((det, i) => {
...@@ -70,7 +56,7 @@ describe('faceDetectionNet', () => { ...@@ -70,7 +56,7 @@ describe('faceDetectionNet', () => {
const maxBoxDelta = 5 const maxBoxDelta = 5
it('scores > 0.8', async () => { it('scores > 0.8', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl) as faceapi.FaceDetection[]
expect(detections.length).toEqual(4) expect(detections.length).toEqual(4)
detections.forEach((det, i) => { detections.forEach((det, i) => {
...@@ -82,7 +68,7 @@ describe('faceDetectionNet', () => { ...@@ -82,7 +68,7 @@ describe('faceDetectionNet', () => {
}) })
it('scores > 0.5', async () => { it('scores > 0.5', async () => {
const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as FaceDetection[] const detections = await faceDetectionNet.locateFaces(imgEl, 0.5) as faceapi.FaceDetection[]
expect(detections.length).toEqual(6) expect(detections.length).toEqual(6)
detections.forEach((det, i) => { detections.forEach((det, i) => {
...@@ -103,7 +89,7 @@ describe('faceDetectionNet', () => { ...@@ -103,7 +89,7 @@ describe('faceDetectionNet', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights/uncompressed/face_detection_model.weights') const res = await fetch('base/weights/uncompressed/face_detection_model.weights')
const weights = new Float32Array(await res.arrayBuffer()) const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.faceDetectionNet(weights) const net = faceapi.createFaceDetectionNet(weights)
net.dispose() net.dispose()
}) })
}) })
......
...@@ -2,7 +2,6 @@ import * as tf from '@tensorflow/tfjs-core'; ...@@ -2,7 +2,6 @@ import * as tf from '@tensorflow/tfjs-core';
import * as faceapi from '../../../src'; import * as faceapi from '../../../src';
import { isTensor3D } from '../../../src/commons/isTensor'; import { isTensor3D } from '../../../src/commons/isTensor';
import { FaceLandmarks } from '../../../src/faceLandmarkNet/FaceLandmarks';
import { Point } from '../../../src/Point'; import { Point } from '../../../src/Point';
import { Dimensions, TMediaElement } from '../../../src/types'; import { Dimensions, TMediaElement } from '../../../src/types';
import { expectMaxDelta, expectAllTensorsReleased, tensor3D, describeWithNets } from '../../utils'; import { expectMaxDelta, expectAllTensorsReleased, tensor3D, describeWithNets } from '../../utils';
...@@ -43,7 +42,7 @@ describe('faceLandmarkNet', () => { ...@@ -43,7 +42,7 @@ describe('faceLandmarkNet', () => {
it('computes face landmarks for squared input', async () => { it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1 const { width, height } = imgEl1
const result = await faceLandmarkNet.detectLandmarks(imgEl1) as FaceLandmarks const result = await faceLandmarkNet.detectLandmarks(imgEl1) as faceapi.FaceLandmarks68
expect(result.getImageWidth()).toEqual(width) expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height) expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0) expect(result.getShift().x).toEqual(0)
...@@ -57,7 +56,7 @@ describe('faceLandmarkNet', () => { ...@@ -57,7 +56,7 @@ describe('faceLandmarkNet', () => {
it('computes face landmarks for rectangular input', async () => { it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect const { width, height } = imgElRect
const result = await faceLandmarkNet.detectLandmarks(imgElRect) as FaceLandmarks const result = await faceLandmarkNet.detectLandmarks(imgElRect) as faceapi.FaceLandmarks68
expect(result.getImageWidth()).toEqual(width) expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height) expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0) expect(result.getShift().x).toEqual(0)
...@@ -75,7 +74,7 @@ describe('faceLandmarkNet', () => { ...@@ -75,7 +74,7 @@ describe('faceLandmarkNet', () => {
it('computes face landmarks for squared input', async () => { it('computes face landmarks for squared input', async () => {
const { width, height } = imgEl1 const { width, height } = imgEl1
const result = await faceLandmarkNet.detectLandmarks(imgEl1) as FaceLandmarks const result = await faceLandmarkNet.detectLandmarks(imgEl1) as faceapi.FaceLandmarks68
expect(result.getImageWidth()).toEqual(width) expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height) expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0) expect(result.getShift().x).toEqual(0)
...@@ -89,7 +88,7 @@ describe('faceLandmarkNet', () => { ...@@ -89,7 +88,7 @@ describe('faceLandmarkNet', () => {
it('computes face landmarks for rectangular input', async () => { it('computes face landmarks for rectangular input', async () => {
const { width, height } = imgElRect const { width, height } = imgElRect
const result = await faceLandmarkNet.detectLandmarks(imgElRect) as FaceLandmarks const result = await faceLandmarkNet.detectLandmarks(imgElRect) as faceapi.FaceLandmarks68
expect(result.getImageWidth()).toEqual(width) expect(result.getImageWidth()).toEqual(width)
expect(result.getImageHeight()).toEqual(height) expect(result.getImageHeight()).toEqual(height)
expect(result.getShift().x).toEqual(0) expect(result.getShift().x).toEqual(0)
...@@ -113,7 +112,7 @@ describe('faceLandmarkNet', () => { ...@@ -113,7 +112,7 @@ describe('faceLandmarkNet', () => {
faceLandmarkPositionsRect faceLandmarkPositionsRect
] ]
const results = await faceLandmarkNet.detectLandmarks(inputs) as FaceLandmarks[] const results = await faceLandmarkNet.detectLandmarks(inputs) as faceapi.FaceLandmarks68[]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(3) expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => { results.forEach((result, batchIdx) => {
...@@ -138,7 +137,7 @@ describe('faceLandmarkNet', () => { ...@@ -138,7 +137,7 @@ describe('faceLandmarkNet', () => {
faceLandmarkPositionsRect faceLandmarkPositionsRect
] ]
const results = await faceLandmarkNet.detectLandmarks(inputs) as FaceLandmarks[] const results = await faceLandmarkNet.detectLandmarks(inputs) as faceapi.FaceLandmarks68[]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(3) expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => { results.forEach((result, batchIdx) => {
...@@ -163,7 +162,7 @@ describe('faceLandmarkNet', () => { ...@@ -163,7 +162,7 @@ describe('faceLandmarkNet', () => {
faceLandmarkPositionsRect faceLandmarkPositionsRect
] ]
const results = await faceLandmarkNet.detectLandmarks(tf.stack(inputs) as tf.Tensor4D) as FaceLandmarks[] const results = await faceLandmarkNet.detectLandmarks(tf.stack(inputs) as tf.Tensor4D) as faceapi.FaceLandmarks68[]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(2) expect(results.length).toEqual(2)
results.forEach((result, batchIdx) => { results.forEach((result, batchIdx) => {
...@@ -188,7 +187,7 @@ describe('faceLandmarkNet', () => { ...@@ -188,7 +187,7 @@ describe('faceLandmarkNet', () => {
faceLandmarkPositionsRect faceLandmarkPositionsRect
] ]
const results = await faceLandmarkNet.detectLandmarks(inputs) as FaceLandmarks[] const results = await faceLandmarkNet.detectLandmarks(inputs) as faceapi.FaceLandmarks68[]
expect(Array.isArray(results)).toBe(true) expect(Array.isArray(results)).toBe(true)
expect(results.length).toEqual(3) expect(results.length).toEqual(3)
results.forEach((result, batchIdx) => { results.forEach((result, batchIdx) => {
...@@ -214,7 +213,7 @@ describe('faceLandmarkNet', () => { ...@@ -214,7 +213,7 @@ describe('faceLandmarkNet', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights/uncompressed/face_landmark_68_model.weights') const res = await fetch('base/weights/uncompressed/face_landmark_68_model.weights')
const weights = new Float32Array(await res.arrayBuffer()) const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.faceLandmarkNet(weights) const net = faceapi.createFaceLandmarkNet(weights)
net.dispose() net.dispose()
}) })
}) })
......
...@@ -141,7 +141,7 @@ describe('faceRecognitionNet', () => { ...@@ -141,7 +141,7 @@ describe('faceRecognitionNet', () => {
await expectAllTensorsReleased(async () => { await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights/uncompressed/face_recognition_model.weights') const res = await fetch('base/weights/uncompressed/face_recognition_model.weights')
const weights = new Float32Array(await res.arrayBuffer()) const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.faceRecognitionNet(weights) const net = faceapi.createFaceRecognitionNet(weights)
net.dispose() net.dispose()
}) })
}) })
......
import * as faceapi from '../../../src';
import { MtcnnResult } from '../../../src/mtcnn/types';
import { Point } from '../../../src/Point';
import { describeWithNets, expectAllTensorsReleased, expectPointClose, expectRectClose, expectMaxDelta } from '../../utils';
const expectedBoxes = [
{ x: 70, y: 21, width: 112, height: 112 },
{ x: 36, y: 250, width: 133, height: 132 },
{ x: 221, y: 43, width: 112, height: 111 },
{ x: 247, y: 231, width: 106, height: 107 },
{ x: 566, y: 67, width: 104, height: 104 },
{ x: 451, y: 176, width: 122, height: 122 }
]
const expectedFaceLandmarks = [
[new Point(117, 58), new Point(156, 63), new Point(141, 86), new Point(109, 98), new Point(147, 104)],
[new Point(82, 292), new Point(134, 304), new Point(104, 330), new Point(72, 342), new Point(120, 353)],
[new Point(261, 82), new Point(306, 83), new Point(282, 113), new Point(257, 124), new Point(306, 126)],
[new Point(277, 273), new Point(318, 273), new Point(295, 300), new Point(279, 311), new Point(316, 313)],
[new Point(607, 110), new Point(645, 115), new Point(626, 138), new Point(601, 144), new Point(639, 150)],
[new Point(489, 224), new Point(534, 223), new Point(507, 250), new Point(493, 271), new Point(530, 270)]
]
describe('mtcnn', () => {
let imgEl: HTMLImageElement
beforeAll(async () => {
const img = await (await fetch('base/test/images/faces.jpg')).blob()
imgEl = await faceapi.bufferToImage(img)
})
describeWithNets('uncompressed weights', { withMtcnn: { quantized: false } }, ({ mtcnn }) => {
function expectResults(
results: MtcnnResult[],
boxOrder: number[],
maxBoxDelta: number,
maxLandmarkPointsDelta: number
) {
results.forEach((result, i) => {
const { faceDetection, faceLandmarks } = result
expect(faceDetection instanceof faceapi.FaceDetection).toBe(true)
expect(faceLandmarks instanceof faceapi.FaceLandmarks5).toBe(true)
expectRectClose(faceDetection.getBox(), expectedBoxes[boxOrder[i]], maxBoxDelta)
faceLandmarks.getPositions().forEach((pt, j) => expectPointClose(pt, expectedFaceLandmarks[boxOrder[i]][j], maxLandmarkPointsDelta))
expectMaxDelta(faceDetection.getScore(), 0.99, 0.01)
})
}
it('minFaceSize = 20, finds all faces', async () => {
const forwardParams = {
minFaceSize: 20
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
expectResults(results, [0, 1, 2, 3, 4, 5], 1, 1)
})
it('minFaceSize = 80, finds all faces', async () => {
const forwardParams = {
minFaceSize: 80
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
expectResults(results, [0, 5, 3, 1, 2, 4], 12, 12)
})
it('all optional params passed, finds all faces', async () => {
const forwardParams = {
maxNumScales: 10,
scaleFactor: 0.8,
scoreThresholds: [0.8, 0.8, 0.9],
minFaceSize: 20
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
expectResults(results, [5, 1, 4, 3, 2, 0], 6, 10)
})
it('scale steps passed, finds all faces', async () => {
const forwardParams = {
scaleSteps: [0.6, 0.4, 0.2, 0.15, 0.1, 0.08, 0.02]
}
const results = await mtcnn.forward(imgEl, forwardParams)
expect(results.length).toEqual(6)
expectResults(results, [5, 1, 3, 0, 2, 4], 7, 15)
})
})
describe('no memory leaks', () => {
describe('NeuralNetwork, uncompressed model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const res = await fetch('base/weights/uncompressed/mtcnn_model.weights')
const weights = new Float32Array(await res.arrayBuffer())
const net = faceapi.createMtcnn(weights)
net.dispose()
})
})
})
describe('NeuralNetwork, quantized model', () => {
it('disposes all param tensors', async () => {
await expectAllTensorsReleased(async () => {
const net = new faceapi.Mtcnn()
await net.load('base/weights')
net.dispose()
})
})
})
})
})
\ No newline at end of file
import { tf } from '../../src'; import * as tf from '@tensorflow/tfjs-core';
import { NetInput } from '../../src/NetInput'; import { NetInput } from '../../src/NetInput';
import { toNetInput } from '../../src/toNetInput'; import { toNetInput } from '../../src/toNetInput';
import { bufferToImage, createCanvasFromMedia } from '../../src/utils'; import { bufferToImage, createCanvasFromMedia } from '../../src/utils';
......
import * as tf from '@tensorflow/tfjs-core'; import * as tf from '@tensorflow/tfjs-core';
import { IRect } from '../build/Rect';
import * as faceapi from '../src/'; import * as faceapi from '../src/';
import { NeuralNetwork } from '../src/commons/NeuralNetwork'; import { NeuralNetwork } from '../src/commons/NeuralNetwork';
import { IPoint } from '../src/';
export function zeros(length: number): Float32Array { export function zeros(length: number): Float32Array {
return new Float32Array(length) return new Float32Array(length)
...@@ -25,6 +27,27 @@ export function tensor3D() { ...@@ -25,6 +27,27 @@ export function tensor3D() {
return tf.tensor3d([[[0]]]) return tf.tensor3d([[[0]]])
} }
export function expectPointClose(
result: IPoint,
expectedPoint: IPoint,
maxDelta: number
) {
const { x, y } = result
expectMaxDelta(x, expectedPoint.x, maxDelta)
expectMaxDelta(y, expectedPoint.y, maxDelta)
}
export function expectRectClose(
result: IRect,
expectedBox: IRect,
maxDelta: number
) {
const { width, height } = result
expectPointClose(result, expectedBox, maxDelta)
expectMaxDelta(width, expectedBox.width, maxDelta)
expectMaxDelta(height, expectedBox.height, maxDelta)
}
export type WithNetOptions = { export type WithNetOptions = {
quantized?: boolean quantized?: boolean
} }
...@@ -33,12 +56,14 @@ export type InjectNetArgs = { ...@@ -33,12 +56,14 @@ export type InjectNetArgs = {
faceDetectionNet: faceapi.FaceDetectionNet faceDetectionNet: faceapi.FaceDetectionNet
faceLandmarkNet: faceapi.FaceLandmarkNet faceLandmarkNet: faceapi.FaceLandmarkNet
faceRecognitionNet: faceapi.FaceRecognitionNet faceRecognitionNet: faceapi.FaceRecognitionNet
mtcnn: faceapi.Mtcnn
} }
export type DescribeWithNetsOptions = { export type DescribeWithNetsOptions = {
withFaceDetectionNet?: WithNetOptions withFaceDetectionNet?: WithNetOptions
withFaceLandmarkNet?: WithNetOptions withFaceLandmarkNet?: WithNetOptions
withFaceRecognitionNet?: WithNetOptions withFaceRecognitionNet?: WithNetOptions
withMtcnn?: WithNetOptions
} }
async function loadNetWeights(uri: string): Promise<Float32Array> { async function loadNetWeights(uri: string): Promise<Float32Array> {
...@@ -63,16 +88,17 @@ export function describeWithNets( ...@@ -63,16 +88,17 @@ export function describeWithNets(
) { ) {
describe(description, () => { describe(description, () => {
let faceDetectionNet: faceapi.FaceDetectionNet = new faceapi.FaceDetectionNet() let faceDetectionNet: faceapi.FaceDetectionNet = new faceapi.FaceDetectionNet()
let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet() let faceLandmarkNet: faceapi.FaceLandmarkNet = new faceapi.FaceLandmarkNet()
let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet() let faceRecognitionNet: faceapi.FaceRecognitionNet = new faceapi.FaceRecognitionNet()
let mtcnn: faceapi.Mtcnn = new faceapi.Mtcnn()
beforeAll(async () => { beforeAll(async () => {
const { const {
withFaceDetectionNet, withFaceDetectionNet,
withFaceLandmarkNet, withFaceLandmarkNet,
withFaceRecognitionNet withFaceRecognitionNet,
withMtcnn
} = options } = options
if (withFaceDetectionNet) { if (withFaceDetectionNet) {
...@@ -93,15 +119,22 @@ export function describeWithNets( ...@@ -93,15 +119,22 @@ export function describeWithNets(
!withFaceRecognitionNet.quantized && 'face_recognition_model.weights' !withFaceRecognitionNet.quantized && 'face_recognition_model.weights'
) )
} }
if (withMtcnn) {
await initNet<faceapi.Mtcnn>(
mtcnn,
!withMtcnn.quantized && 'mtcnn_model.weights'
)
}
}) })
afterAll(() => { afterAll(() => {
faceDetectionNet && faceDetectionNet.dispose() faceDetectionNet && faceDetectionNet.dispose()
faceLandmarkNet && faceLandmarkNet.dispose() faceLandmarkNet && faceLandmarkNet.dispose()
faceRecognitionNet && faceRecognitionNet.dispose() faceRecognitionNet && faceRecognitionNet.dispose()
mtcnn && mtcnn.dispose()
}) })
specDefinitions({ faceDetectionNet, faceLandmarkNet, faceRecognitionNet }) specDefinitions({ faceDetectionNet, faceLandmarkNet, faceRecognitionNet, mtcnn })
}) })
} }
[{"paths":["mtcnn_model-shard1"],"weights":[{"dtype":"float32","name":"pnet/conv1/weights","shape":[3,3,3,10]},{"dtype":"float32","name":"pnet/conv1/biases","shape":[10]},{"dtype":"float32","name":"pnet/prelu1_alpha","shape":[10]},{"dtype":"float32","name":"pnet/conv2/weights","shape":[3,3,10,16]},{"dtype":"float32","name":"pnet/conv2/biases","shape":[16]},{"dtype":"float32","name":"pnet/prelu2_alpha","shape":[16]},{"dtype":"float32","name":"pnet/conv3/weights","shape":[3,3,16,32]},{"dtype":"float32","name":"pnet/conv3/biases","shape":[32]},{"dtype":"float32","name":"pnet/prelu3_alpha","shape":[32]},{"dtype":"float32","name":"pnet/conv4-1/weights","shape":[1,1,32,2]},{"dtype":"float32","name":"pnet/conv4-1/biases","shape":[2]},{"dtype":"float32","name":"pnet/conv4-2/weights","shape":[1,1,32,4]},{"dtype":"float32","name":"pnet/conv4-2/biases","shape":[4]},{"dtype":"float32","name":"rnet/conv1/weights","shape":[3,3,3,28]},{"dtype":"float32","name":"rnet/conv1/biases","shape":[28]},{"dtype":"float32","name":"rnet/prelu1_alpha","shape":[28]},{"dtype":"float32","name":"rnet/conv2/weights","shape":[3,3,28,48]},{"dtype":"float32","name":"rnet/conv2/biases","shape":[48]},{"dtype":"float32","name":"rnet/prelu2_alpha","shape":[48]},{"dtype":"float32","name":"rnet/conv3/weights","shape":[2,2,48,64]},{"dtype":"float32","name":"rnet/conv3/biases","shape":[64]},{"dtype":"float32","name":"rnet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"rnet/fc1/weights","shape":[576,128]},{"dtype":"float32","name":"rnet/fc1/biases","shape":[128]},{"dtype":"float32","name":"rnet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"rnet/fc2-1/weights","shape":[128,2]},{"dtype":"float32","name":"rnet/fc2-1/biases","shape":[2]},{"dtype":"float32","name":"rnet/fc2-2/weights","shape":[128,4]},{"dtype":"float32","name":"rnet/fc2-2/biases","shape":[4]},{"dtype":"float32","name":"onet/conv1/weights","shape":[3,3,3,32]},{"dtype":"float32","name":"onet/conv1/biases","shape":[32]},{"dtype":"float32","name":"onet/prelu1_alpha","shape":[32]},{"dtype":"float32","name":"onet/conv2/weights","shape":[3,3,32,64]},{"dtype":"float32","name":"onet/conv2/biases","shape":[64]},{"dtype":"float32","name":"onet/prelu2_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv3/weights","shape":[3,3,64,64]},{"dtype":"float32","name":"onet/conv3/biases","shape":[64]},{"dtype":"float32","name":"onet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv4/weights","shape":[2,2,64,128]},{"dtype":"float32","name":"onet/conv4/biases","shape":[128]},{"dtype":"float32","name":"onet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"onet/fc1/weights","shape":[1152,256]},{"dtype":"float32","name":"onet/fc1/biases","shape":[256]},{"dtype":"float32","name":"onet/prelu5_alpha","shape":[256]},{"dtype":"float32","name":"onet/fc2-1/weights","shape":[256,2]},{"dtype":"float32","name":"onet/fc2-1/biases","shape":[2]},{"dtype":"float32","name":"onet/fc2-2/weights","shape":[256,4]},{"dtype":"float32","name":"onet/fc2-2/biases","shape":[4]},{"dtype":"float32","name":"onet/fc2-3/weights","shape":[256,10]},{"dtype":"float32","name":"onet/fc2-3/biases","shape":[10]}]}] [{"paths":["mtcnn_model-shard1"],"weights":[{"dtype":"float32","name":"pnet/conv1/weights","shape":[3,3,3,10]},{"dtype":"float32","name":"pnet/conv1/bias","shape":[10]},{"dtype":"float32","name":"pnet/prelu1_alpha","shape":[10]},{"dtype":"float32","name":"pnet/conv2/weights","shape":[3,3,10,16]},{"dtype":"float32","name":"pnet/conv2/bias","shape":[16]},{"dtype":"float32","name":"pnet/prelu2_alpha","shape":[16]},{"dtype":"float32","name":"pnet/conv3/weights","shape":[3,3,16,32]},{"dtype":"float32","name":"pnet/conv3/bias","shape":[32]},{"dtype":"float32","name":"pnet/prelu3_alpha","shape":[32]},{"dtype":"float32","name":"pnet/conv4_1/weights","shape":[1,1,32,2]},{"dtype":"float32","name":"pnet/conv4_1/bias","shape":[2]},{"dtype":"float32","name":"pnet/conv4_2/weights","shape":[1,1,32,4]},{"dtype":"float32","name":"pnet/conv4_2/bias","shape":[4]},{"dtype":"float32","name":"rnet/conv1/weights","shape":[3,3,3,28]},{"dtype":"float32","name":"rnet/conv1/bias","shape":[28]},{"dtype":"float32","name":"rnet/prelu1_alpha","shape":[28]},{"dtype":"float32","name":"rnet/conv2/weights","shape":[3,3,28,48]},{"dtype":"float32","name":"rnet/conv2/bias","shape":[48]},{"dtype":"float32","name":"rnet/prelu2_alpha","shape":[48]},{"dtype":"float32","name":"rnet/conv3/weights","shape":[2,2,48,64]},{"dtype":"float32","name":"rnet/conv3/bias","shape":[64]},{"dtype":"float32","name":"rnet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"rnet/fc1/weights","shape":[576,128]},{"dtype":"float32","name":"rnet/fc1/bias","shape":[128]},{"dtype":"float32","name":"rnet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"rnet/fc2_1/weights","shape":[128,2]},{"dtype":"float32","name":"rnet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"rnet/fc2_2/weights","shape":[128,4]},{"dtype":"float32","name":"rnet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/conv1/weights","shape":[3,3,3,32]},{"dtype":"float32","name":"onet/conv1/bias","shape":[32]},{"dtype":"float32","name":"onet/prelu1_alpha","shape":[32]},{"dtype":"float32","name":"onet/conv2/weights","shape":[3,3,32,64]},{"dtype":"float32","name":"onet/conv2/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu2_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv3/weights","shape":[3,3,64,64]},{"dtype":"float32","name":"onet/conv3/bias","shape":[64]},{"dtype":"float32","name":"onet/prelu3_alpha","shape":[64]},{"dtype":"float32","name":"onet/conv4/weights","shape":[2,2,64,128]},{"dtype":"float32","name":"onet/conv4/bias","shape":[128]},{"dtype":"float32","name":"onet/prelu4_alpha","shape":[128]},{"dtype":"float32","name":"onet/fc1/weights","shape":[1152,256]},{"dtype":"float32","name":"onet/fc1/bias","shape":[256]},{"dtype":"float32","name":"onet/prelu5_alpha","shape":[256]},{"dtype":"float32","name":"onet/fc2_1/weights","shape":[256,2]},{"dtype":"float32","name":"onet/fc2_1/bias","shape":[2]},{"dtype":"float32","name":"onet/fc2_2/weights","shape":[256,4]},{"dtype":"float32","name":"onet/fc2_2/bias","shape":[4]},{"dtype":"float32","name":"onet/fc2_3/weights","shape":[256,10]},{"dtype":"float32","name":"onet/fc2_3/bias","shape":[10]}]}]
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment