new

parents
{
"targets": [
{
"target_name": "face-detection",
"sources": [
"main.cpp",
"face.cpp",
"face.h"
],
"cflags!": [
"-fno-exceptions",
"-std=c++11",
"-stdlib=libc++"
],
"cflags_cc!": [
"-fno-exceptions",
"-fno-rtti"
],
"link_settings": {
"libraries": [
"-lopencv_core -lopencv_surface_matching -lopencv_objdetect -lopencv_imgproc -lrealsense"
]
},
"copies": [
{
"destination": "<(PRODUCT_DIR)",
"files": [
"haarcascade_frontalface_alt.xml",
"haarcascade_frontalface_default.xml",
"face-detection.js"
]
}
]
}
]
}
\ No newline at end of file
var d=require("./face-detection.node");
d.setWorkingDir(__dirname);
console.log('set wd');
module.exports=d;
\ No newline at end of file
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2015 Intel Corporation. All Rights Reserved.
//#include <face.h>
#include <librealsense/rs.hpp>
#include <opencv2/opencv.hpp>
//#include "opencv2/core/utility.hpp"
#include "opencv2/surface_matching.hpp"
#include "opencv2/surface_matching/ppf_helpers.hpp"
#include <opencv2/surface_matching/ppf_match_3d.hpp>
#include "face.h"
/*
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <chrono>
#include <vector>
#include <sstream>
#include <iostream>
#include <algorithm>*/
#include <unistd.h>
#include <stdio.h>
using namespace std;
using namespace cv;
using namespace ppf_match_3d;
String face_cascade_name = "";
String eyes_cascade_name = "";
cv::CascadeClassifier face_cascade;
cv::CascadeClassifier eyes_cascade;
struct facePosition {int x, y, width, height; };
using namespace FaceDetection;
int Loop(FaceDetection::Mode, string filename);
void FaceDetector::Init(string wd)
{
face_cascade_name = wd + "/haarcascade_frontalface_alt.xml";
eyes_cascade_name = wd + "/haarcascade_eye_tree_eyeglasses.xml";
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return; };
}
/*
void FaceDetector::Start(std::function<void(int)>&& faceCallback) {
this->started = true;
this->Loop([faceCallback](int i)->void{
faceCallback(i);
});
}*/
///
/// \param filename
void FaceDetector::SaveFace(string filename)
{
Loop(TRAIN, filename);
}
///
/// \param filename
int FaceDetector::CheckFace(string filename)
{
int res=Loop(RECOGNIZE, filename);
return res;
}
///
/// \param frame
/// \param facePos
/// \return
bool detectAndDisplay( Mat frame, facePosition &facePos ) {
if (frame.empty())
return false;
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//-- Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(180, 180));
if (faces.size() > 0) {
//facePos=faces[0];
facePos.x = faces[0].x;
facePos.y = faces[0].y;
facePos.width = faces[0].width;
facePos.height = faces[0].height;
return true;
}
return false;
}
int Loop(Mode currentMode, string filename)
{
try {
ppf_match_3d::PPF3DDetector detector(1.0/3.0);
rs::context ctx;
if (ctx.get_device_count() == 0) throw std::runtime_error("No device detected. Is it plugged in?");
rs::device *device = ctx.get_device(0);
device->enable_stream(rs::stream::depth, rs::preset::best_quality);
device->enable_stream(rs::stream::color, rs::preset::best_quality);
device->enable_stream(rs::stream::infrared, rs::preset::best_quality);
device->start();
int trainClock = 0;
bool looping=true;
while (looping) {
if (device->is_streaming())
device->wait_for_frames();
trainClock += 1;
// Retrieve our images
const uint16_t *depth_image = (const uint16_t *) device->get_frame_data(rs::stream::depth);
const uint8_t *color_image = (const uint8_t *) device->get_frame_data(rs::stream::color);
// Retrieve camera parameters for mapping between depth and color
rs::intrinsics depth_intrin = device->get_stream_intrinsics(rs::stream::depth);
rs::extrinsics depth_to_color = device->get_extrinsics(rs::stream::depth, rs::stream::color);
rs::intrinsics color_intrin = device->get_stream_intrinsics(rs::stream::color);
float scale = (device->get_depth_scale());
bool faceDetected = false;
/////////////////////////////////////////////////////////////////////////////////////////////
facePosition facePos;
if (trainClock % 15 == 0) {
cv::Mat frame(color_intrin.height, color_intrin.width, CV_8UC3, (uchar *) color_image);
if (detectAndDisplay(frame, facePos)) {
faceDetected = true;
}
}
if (!faceDetected)
continue;
cv::Mat protoModel(depth_intrin.width * depth_intrin.height, 6, CV_32F);
/////////////////////////////////////////////////////////////////////////////////////////////
int modelPointsCount = 0;
for (int dy = 0; dy < depth_intrin.height; ++dy) {
for (int dx = 0; dx < depth_intrin.width; ++dx) {
// Retrieve the 16-bit depth value and map it into a depth in meters
uint16_t depth_value = depth_image[dy * depth_intrin.width + dx];
float depth_in_meters = depth_value * scale;
// Skip over pixels with a depth value of zero, which is used to indicate no data
if (depth_value == 0) continue;
// Map from pixel coordinates in the depth image to pixel coordinates in the color image
rs::float2 depth_pixel = {(float) dx, (float) dy};
rs::float3 depth_point = depth_intrin.deproject(depth_pixel, depth_in_meters);
rs::float3 color_point = depth_to_color.transform(depth_point);
rs::float2 color_pixel = color_intrin.project(color_point);
// Use the color from the nearest color pixel, or pure white if this point falls outside the color image
const int cx = (int) std::round(color_pixel.x),
cy = (int) std::round(color_pixel.y);
float a = cx - (facePos.x + facePos.width / 2);
float b = cy - (facePos.y + facePos.height / 2);
float ry = (facePos.height / 2) * 0.9f;
float rx = (facePos.width / 2) * 0.7f;
bool insideEllipse = ((a * a) / (rx * rx) + (b * b) / (ry * ry)) <= 1;
if (insideEllipse) {
protoModel.at<float>(modelPointsCount, 0) = depth_point.x;
protoModel.at<float>(modelPointsCount, 1) = depth_point.y;
protoModel.at<float>(modelPointsCount, 2) = depth_point.z;
protoModel.at<float>(modelPointsCount, 3) = 0;
protoModel.at<float>(modelPointsCount, 4) = 0;
protoModel.at<float>(modelPointsCount, 5) = 0;
modelPointsCount += 1;
}
}
}
if(modelPointsCount==0)
continue;
cv::Mat model(modelPointsCount, 6, CV_32F);
for (int k = 0; k < modelPointsCount; k++) {
model.at<float>(k, 0) = protoModel.at<float>(k, 0);
model.at<float>(k, 1) = protoModel.at<float>(k, 1);
model.at<float>(k, 2) = protoModel.at<float>(k, 2);
model.at<float>(k, 3) = protoModel.at<float>(k, 3);
model.at<float>(k, 4) = protoModel.at<float>(k, 4);
model.at<float>(k, 5) = protoModel.at<float>(k, 5);
}
switch (currentMode) {
case TRAIN:
//detector.trainModel(model);
writePLY(model, filename.c_str());
looping=false;
break;
case RECOGNIZE:
Mat origin = loadPLYSimple(filename.c_str(), 1);
detector.trainModel(origin);
vector<Pose3DPtr> results;
detector.match(model, results);
trainClock = 1;
if (results.size() > 0) {
device->stop();
return results[0]->numVotes;
}
break;
}
}
device->stop();
return 0;
}
catch (const rs::error &e) {
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n "
<< e.what() << std::endl;
return 0;
}
catch (const std::exception &e) {
std::cerr << e.what() << std::endl;
return 0;
}
}
\ No newline at end of file
#include <sstream>
#include <vector>
#include <functional>
namespace FaceDetection {
using namespace std;
enum Mode{
TRAIN,
RECOGNIZE
};
class FaceDetector {
public:
void Init(string);
void SaveFace(string);
int CheckFace(string);
private:
Mode currentMode;
};
}
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
#include <node.h>
#include <uv.h>
#include "face.h"
#include <functional>
namespace FaceDetection
{
using namespace v8;
using v8::Function;
using v8::FunctionCallbackInfo;
using v8::Isolate;
using v8::HandleScope;
using v8::Local;
using v8::Null;
using v8::Symbol;
using v8::Object;
using v8::String;
using v8::Number;
using v8::Value;
using namespace std;
Handle<Function> cb;
uv_thread_t texample_thread;
uv_async_t faceCheckAsyncToken;
uv_async_t faceSaveAsyncToken;
uv_loop_t *loop = uv_default_loop();
struct faceRequest
{
Persistent<Function> callback;
Isolate *isolate;
string filename;
int result;
};
FaceDetector *faceDetector = new FaceDetector();
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// \param req
void AfterSave(uv_async_t *req)
{
auto request = (faceRequest *) req->data;
auto isolate = request->isolate;
HandleScope scope(isolate);
const unsigned argc = 1;
Local<Value> argv[argc] = {Number::New(isolate, request->result)};
Local<Function>::New(isolate, request->callback)->
Call(isolate->GetCurrentContext()->Global(), 1, argv);
}
///
/// \param req
void SaveWorker(void *)
{
faceDetector->SaveFace("testFace");
uv_async_send(&faceSaveAsyncToken);
}
///
/// \param args
void Save(const FunctionCallbackInfo<Value> &args)
{
Isolate *isolate = args.GetIsolate();
faceRequest *request = new faceRequest();
cb = Local<Function>::Cast(args[0]);
request->callback.Reset(isolate, cb);
request->isolate = isolate;
faceSaveAsyncToken.data = request;
uv_async_init(loop, &faceSaveAsyncToken, AfterSave);
uv_thread_create(&texample_thread, SaveWorker, NULL);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// \param req
void AfterCheck(uv_async_t *req)
{
auto request = (faceRequest *) req->data;
auto isolate = request->isolate;
HandleScope scope(isolate);
const unsigned argc = 1;
Local<Value> argv[argc] = {Number::New(isolate, request->result)};
Local<Function>::New(isolate, request->callback)->
Call(isolate->GetCurrentContext()->Global(), 1, argv);
}
///
/// \param req
void CheckWorker(void *)
{
int res = faceDetector->CheckFace("testFace");
auto request = (faceRequest *) faceCheckAsyncToken.data;
request->result=res;
uv_async_send(&faceCheckAsyncToken);
}
///
/// \param args
void Check(const FunctionCallbackInfo<Value> &args)
{
Isolate *isolate = args.GetIsolate();
faceRequest *request = new faceRequest();
cb = Local<Function>::Cast(args[0]);
request->callback.Reset(isolate, cb);
request->isolate = isolate;
faceCheckAsyncToken.data = request;
uv_async_init(loop, &faceCheckAsyncToken, AfterCheck);
uv_thread_create(&texample_thread, CheckWorker, NULL);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// \param args
void SetWorkingDir(const FunctionCallbackInfo<Value> &args)
{
v8::String::Utf8Value param1(args[0]->ToString());
std::string foo = std::string(*param1);
faceDetector->Init(foo);
}
void init(Local<Object> exports, Local<Object> module)
{
NODE_SET_METHOD(exports, "saveFace", Save);
NODE_SET_METHOD(exports, "checkFace", Check);
NODE_SET_METHOD(exports, "setWorkingDir", SetWorkingDir);
}
NODE_MODULE(addon, init)
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment