欢迎访问移动开发之家(rcyd.net),关注移动开发教程。移动开发之家  移动开发问答|  每日更新
页面位置 : > > > 内容正文

Android 人脸特征点检测(主动形状模型) ASM Demo (Active Shape Model on Android)

来源: 开发者 投稿于  被查看 37637 次 评论:230

Android 人脸特征点检测(主动形状模型) ASM Demo (Active Shape Model on Android)


目前Android平台上进行人脸特征识别非常火爆,本人研究生期间一直从事人脸特征的处理,所以曾经用过一段ASM(主动形状模型)提取人脸基础特征点,所以这里采用JNI的方式将ASM在Android平台上进行了实现,同时在本应用实例中,给出了几个其他的图像处理的示例。

由于ASM (主动形状模型,Active Shape Model)的核心算法比较复杂,所以这里不进行算法介绍,我之前写过一篇详细的算法介绍和公式推导,有兴趣的朋友可以参考下面的连接:
ASM(主动形状模型)算法详解

接下来介绍本应用的实现。
首先,给出本应用的项目源码:
Android ASM Demo
在这个项目源码的README中详细介绍了怎么配置运行时环境,请仔细阅读。
本项目即用到了Android JNI开发,又用到了Opencv4Android,所以,配置起来还是很复杂的。Android JNI开发配置请参考:Android JNI,Android 上使用Opencv请参考:Android Opencv

整个应用的代码比较多,所以如果想很好的了解项目原理,最好还是将代码下载下来仔细看看。

首先给出本地cpp代码,下面的本地cpp代码负责调用stasm提供的c语言接口:

#include 
#include 
#include 
#include 
#include 

#include 
#include ./stasm/stasm_lib.h

using namespace cv;
using namespace std;

CascadeClassifier cascade;
bool init = false;
const String APP_DIR = /data/data/com.example.asm/app_data/;

extern C {
/*
 * do Canny edge detect
 */
JNIEXPORT void JNICALL Java_com_example_asm_NativeCode_DoCanny(JNIEnv* env,
        jobject obj, jlong matSrc, jlong matDst, jdouble threshold1 = 50,
        jdouble threshold2 = 150, jint aperatureSize = 3) {

    Mat * img = (Mat *) matSrc;
    Mat * dst = (Mat *) matDst;
    cvtColor(*img, *dst, COLOR_BGR2GRAY);
    Canny(*img, *dst, threshold1, threshold2, aperatureSize);
}

/*
 * face detection
 * matDst: face region
 * scaleFactor = 1.1
 * minNeighbors = 2
 * minSize = 30 * 30
 */
JNIEXPORT void JNICALL Java_com_example_asm_NativeCode_FaceDetect(JNIEnv* env,
        jobject obj, jlong matSrc, jlong matDst, jdouble scaleFactor, jint minNeighbors, jint minSize) {

    Mat * src = (Mat *) matSrc;
    Mat * dst = (Mat *) matDst;

    float factor = 0.3;
    Mat img;
    resize(*src, img, Size((*src).cols * factor, (*src).rows * factor));

    String cascadeFile = APP_DIR + haarcascade_frontalface_alt2.xml;

    if (!init) {
        cascade.load(cascadeFile);
        init = true;
    }

    if (cascade.empty() != true) {
        vector faces;
        cascade.detectMultiScale(img, faces, scaleFactor, minNeighbors, 0
                | CV_HAAR_FIND_BIGGEST_OBJECT
                | CV_HAAR_DO_ROUGH_SEARCH
                | CV_HAAR_SCALE_IMAGE, Size(minSize, minSize));

        for (int i = 0; i < faces.size(); i++) {
            Rect rect = faces[i];
            rect.x /= factor;
            rect.y /= factor;
            rect.width /= factor;
            rect.height /= factor;

            if (i == 0) {
                (*src)(rect).copyTo(*dst);
            }

            rectangle(*src, rect.tl(), rect.br(), Scalar(0, 255, 0, 255), 3);
        }
    }
}

/*
 *  do ASM
 *  error code:
 *  -1: illegal input Mat
 *  -2: ASM initialize error
 *  -3: no face detected
 */
JNIEXPORT jintArray JNICALL Java_com_example_asm_NativeCode_FindFaceLandmarks(
        JNIEnv* env, jobject, jlong matAddr, jfloat ratioW, jfloat ratioH) {
    const char * PATH = APP_DIR.c_str();

    clock_t StartTime = clock();
    jintArray arr = env->NewIntArray(2 * stasm_NLANDMARKS);
    jint *out = env->GetIntArrayElements(arr, 0);

    Mat img = *(Mat *) matAddr;
    cvtColor(img, img, COLOR_BGR2GRAY);

    if (!img.data) {
        out[0] = -1; // error code: -1(illegal input Mat)
        out[1] = -1;
        img.release();
        env->ReleaseIntArrayElements(arr, out, 0);
        return arr;
    }

    int foundface;
    float landmarks[2 * stasm_NLANDMARKS]; // x,y coords

    if (!stasm_search_single(&foundface, landmarks, (const char*) img.data,
            img.cols, img.rows,  , PATH)) {
        out[0] = -2; // error code: -2(ASM initialize failed)
        out[1] = -2;
        img.release();
        env->ReleaseIntArrayElements(arr, out, 0);
        return arr;
    }

    if (!foundface) {
        out[0] = -3; // error code: -3(no face found)
        out[1] = -3;
        img.release();
        env->ReleaseIntArrayElements(arr, out, 0);
        return arr;
    } else {
        for (int i = 0; i < stasm_NLANDMARKS; i++) {
            out[2 * i] = cvRound(landmarks[2 * i] * ratioW);
            out[2 * i + 1] = cvRound(landmarks[2 * i + 1] * ratioH);
        }
    }
    double TotalAsmTime = double(clock() - StartTime) / CLOCKS_PER_SEC;
    __android_log_print(ANDROID_LOG_INFO, com.example.asm.native,
            running in native code, 
Stasm Ver:%s Img:%dx%d ---> Time:%.3f secs., stasm_VERSION,
            img.cols, img.rows, TotalAsmTime);

    img.release();
    env->ReleaseIntArrayElements(arr, out, 0);
    return arr;
}
}

stasm代码比较多,这里不具体给出,这里特别给出一下Android.mk这个Android平台JNI代码的makefile

LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
OPENCV_CAMERA_MODULES:=off
OPENCV_INSTALL_MODULES:=on
OPENCV_LIB_TYPE:=STATIC
ifeq ($(wildcard $(OPENCV_MK_PATH)),)
#try to load OpenCV.mk from default install location
include /home/wesong/software/OpenCV-2.4.10-android-sdk/sdk/native/jni/OpenCV.mk
else
include $(OPENCV_MK_PATH)
endif
LOCAL_MODULE    := Native

FILE_LIST := $(wildcard $(LOCAL_PATH)/stasm/*.cpp) 
 $(wildcard $(LOCAL_PATH)/stasm/MOD_1/*.cpp)

LOCAL_SRC_FILES := Native.cpp 
$(FILE_LIST:$(LOCAL_PATH)/%=%)

LOCAL_LDLIBS +=  -llog -ldl
include $(BUILD_SHARED_LIBRARY)

# other library
include $(CLEAR_VARS)
LOCAL_MODULE := opencv_java-prebuild
LOCAL_SRC_FILES := libopencv_java.so
include $(PREBUILT_SHARED_LIBRARY)

需要特别注意: NDK在Ubuntu平台下build代码时会自动删除已经存在了的动态链接库文件,因为我们需要在Android项目中引用OpenCV4Android提供的libopencv_java.so这个链接库,然而每次build JNI代码的时候NDK都会把这个.so文件删了,所以,需要用一个小trick,就是上面的Android.mk文件中最后一部分,采用prebuild的libopencv_java.so

这个地方当时迷糊了我很久,并且浪费了很多时间进行处理,这个现象在Windows上是不存在的。WTF!

然后是Android中java代码对Native JNI code的调用

package com.example.asm;

public class NativeCode {
    static {
        System.loadLibrary(Native);
    }

    /*
     * Canny edge detect
     * threshold1 = 50
     * threshold2 = 150
     * aperatureSize = 3
     */
    public static native void DoCanny(long matAddr_src, long matAddr_dst, double threshold1,
            double threshold2, int aperatureSize);

    /*
     * do face detect
     * scaleFactor = 1.1
     * minNeighbors = 2
     * minSize = 30 (30 * 30)
     */
    public static native void FaceDetect(long matAddr_src, long matAddr_dst,
            double scaleFactor, int minNeighbors, int minSize);

    /*
     * do ASM
     * find landmarks
     */
    public static native int[] FindFaceLandmarks(long matAddr, float ratioW, float ratioH);
}

然后就是主程序啦,主程序中有很多trick,目的是让Android能够高效的进行计算,因为ASM的计算量非常大,在Android平台上来说,需要消耗大量的时间,所以肯定不能放在UI线程中进行ASM计算。

本应用中通过AsyncTask来进行ASM特征点人脸定位

    private class AsyncAsm extends AsyncTask> {
        private Context context;
        private Mat src;

        public AsyncAsm(Context context) {
            this.context = context;
        }

        @Override
        protected List doInBackground(Mat... mat0) {
            List list = new ArrayList();
            Mat src = mat0[0];
            this.src = src;

            int[] points = NativeImageUtil.FindFaceLandmarks(src, 1, 1);
            for (int i = 0; i < points.length; i++) {
                list.add(points[i]);
            }

            return list;
        }

        // run on UI thread
        @Override
        protected void onPostExecute(List list) {
            MainActivity.this.drawAsmPoints(this.src, list);
        }
    };

并且在主界面中,实时的进行人脸检测,这里人脸检测是通过开启一个新的线程进行的:

    @Override
    public void onPreviewFrame(byte[] data, Camera camera) {
        Log.d(TAG, onPreviewFrame);

        Size size = camera.getParameters().getPreviewSize();
        Bitmap bitmap = ImageUtils.yuv2bitmap(data, size.width, size.height);
        Mat src = new Mat();
        Utils.bitmapToMat(bitmap, src);
        src.copyTo(currentFrame);

        Log.d(com.example.asm.CameraPreview, image size: w:  + src.width()
                +  h:  + src.height());

        // do canny
        Mat canny_mat = new Mat();
        Imgproc.Canny(src, canny_mat, Params.CannyParams.THRESHOLD1,
                Params.CannyParams.THRESHOLD2);
        Bitmap canny_bitmap = ImageUtils.mat2Bitmap(canny_mat);

        iv_canny.setImageBitmap(canny_bitmap);

        // do face detect in Thread
        faceDetectThread.assignTask(Params.DO_FACE_DETECT, src);
    }

线程定义如下:

package com.example.asm;

import org.opencv.core.Mat;

import android.content.Context;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;

public class FaceDetectThread extends Thread {
    private final String TAG = com.example.asm.FaceDetectThread;

    private Context mContext;
    private Handler mHandler;

    private ImageUtils imageUtils;

    public FaceDetectThread(Context context) {
        mContext = context;
        imageUtils = new ImageUtils(context);
    }

    public void assignTask(int id, Mat src) {

        // do face detect
        if (id == Params.DO_FACE_DETECT) {
            Message msg = new Message();
            msg.what = Params.DO_FACE_DETECT;
            msg.obj = src;
            this.mHandler.sendMessage(msg);
        }
    }

    @Override
    public void run() {
        Looper.prepare();
        mHandler = new Handler() {
            @Override
            public void handleMessage(Message msg) {
                if (msg.what == Params.DO_FACE_DETECT) {
                    Mat detected = new Mat();
                    Mat face = new Mat();
                    Mat src = (Mat) msg.obj;
                    detected = imageUtils.detectFacesAndExtractFace(src, face);

                    Message uiMsg = new Message();
                    uiMsg.what = Params.FACE_DETECT_DONE;
                    uiMsg.obj = detected;
                    // send Message to UI
                    ((MainActivity) mContext).mHandler.sendMessage(uiMsg);
                }
            }
        };
        Looper.loop();
    }
}

貌似代码有点多,所以,还是请看源代码吧。
下面给出几个系统的应用截图,由于本人太屌丝,所以用的红米1S,性能不是很好,请见谅。。。
同时感谢Google提供赫本照片,再次申明文明转载,MD.

应用启动之后:
这里写图片描述vc+6xMqxo6zL+dLUzOG5qcHL0ru49rC0xaW21Nfu0MK1xMjLwbO8xsvjQVNNLjwvcD4NCjxwPrzGy+NBU03S1Lrzo7o8YnIgLz4NCjxpbWcgYWx0PQ=="这里写图片描述" src="http://www.2cto.com/uploadfile/Collfiles/20150724/20150724083539163.png" title="\" />

然后点击第四个区域可以进行ASM特征点的图片查看:
这里写图片描述

第二个人脸检测窗口点击以后会进行一个人脸检测的Activity:
这里写图片描述

点击第三个窗口可以进入Canny边缘检测的Activity:
这里写图片描述

 

用户评论