安卓开发(二)人脸识别相册FaceMap
程序员文章站
2022-05-01 17:35:13
本篇主要讲本科时做的一个应用,人脸识别相册。主要包含JNI和业务逻辑。最终代码会公布在github。
算法部分
当时深度学习还没有很火,所以用的是经典的PCA方法,降维之后直接...
本篇主要讲本科时做的一个应用,人脸识别相册。主要包含JNI和业务逻辑。最终代码会公布在github。
算法部分
当时深度学习还没有很火,所以用的是经典的PCA方法,降维之后直接作为特征。人脸检测部分用的也是Opencv的Haar特征人脸检测。现在来看性能比较差了。这里就不介绍算法了,感兴趣的可以看看我的其他博文。
系统架构">系统架构
本次系统架构分成2部分,一是算法;二是界面和业务逻辑。算法部分由于是C++写的,所以需要用Java的JNI接口封装一下。界面方面采用瀑布流界面,三栏极简风格。
业务逻辑是:
1.用户打开相册,自动读取系统照片;
2.用户点击某一张图像,若此照片此前未检测,则检测人脸并提取特征;若此照片此前已经检测,则显示人脸位置。
3.用户点击人脸,手工标注。
4.在第三栏,按照人脸识别实现自动分类。
这个逻辑的缺点是一开始需要用户手动标记,现在看来大概可以用聚类算法代替。
算法的JNI封装
因为我已经不做开发了,所以现在并不知道JNI怎么写了,已经不会写了。还是贴贴代码吧:
package cn.edu.zju.srtp.facemap.algorithm; import java.io.File; import java.util.Vector; import org.opencv.core.Mat; import android.content.Context; import android.util.Log; public class NativeFaceRecognizer { public static final int LBPH_FACERECOGNIZER =1; public static final int FISHER_FACERECOGNIZER =2; public static final int EIGEN_FACERECOGNIZER =3; private final static String TAG ="NativeFaceRecognizer"; public final static String LBPH_FILENAME ="lbph_model.xml"; public final static String FISHER_FILENAME ="fisher_model.xml"; public final static String EIGEN_FILENAME ="eigen_model.xml"; /**Note which faceRecognizer has been created * */ private int mState =1; /**The pointer to faceRecognizer with the initial value -1(important) * */ private long mNativeObj =-1; private Context mContext; static{ Log.d(TAG, "NativeFaceRecognizer static initial block enter"); System.loadLibrary("opencv_java"); System.loadLibrary("face_rec"); Log.d(TAG, "NativeFaceRecognizer static initial block exit"); } public NativeFaceRecognizer(Context context,int state) { mContext=context; mState=state; createFaceRecognizer(); load(); } /**Destroy the model first and create a FisherFaceRecognizer model. * */ public void createFisherFaceRecognizer(){ destroy(); mState=FISHER_FACERECOGNIZER; mNativeObj=nativeCreateFisherFaceRecognizer(); } /**Destroy the model first and create a LBPHFaceRecognizer model. * */ public void createLBPHFaceRecognizer(){ destroy(); mState=LBPH_FACERECOGNIZER; mNativeObj=nativeCreateLBPHFaceRecognizer(); } /**Destroy the model first and create a EigenFaceRecognizer model. * */ public void createEigenFaceRecognizer(){ destroy(); mState=EIGEN_FACERECOGNIZER; mNativeObj=nativeCreateEigenFaceRecognizer(); } /**Train the model.The input image should be grayscale. * The number of input images and labels should be equal. * @param images * @param labels_vec */ public void train(Vectorimages,Vectorlabels_vec){ if(mNativeObj==-1){ Log.d(TAG,"Error:No faceRecognizer for training"); return; } if(images.size()!=labels_vec.size()){ Log.d(TAG, "The number of input images and labels are not eaual!"); return; } //get the array of Mat address long imagesAddr[]=new long[images.size()]; int i=0; int[]labels=new int[labels_vec.size()]; for(Mat image:images){ if(image.width()!=image.height()||image.channels()!=1||image.width()<=0){ Log.d(TAG, "The intput images for training is illegal!" + "image.width="+image.width()+" image.height="+image.height() +"image.channels="+image.channels()); return; } imagesAddr[i]=image.getNativeObjAddr(); labels[i]=labels_vec.elementAt(i).intValue(); i++; } nativeTrain(mNativeObj,imagesAddr,labels); } public int predict(Mat image){ if(mNativeObj==-1){ Log.d(TAG,"Error:No faceRecognizer for predict."); return -1; } return nativePredict(mNativeObj,image.getNativeObjAddr()); } /**Only LBPHfaceRecognizer can be updated. * * @param images * @param labels_vec */ public void update(Vectorimages,Vectorlabels_vec){ if(mState!=LBPH_FACERECOGNIZER){ Log.d(TAG,"Error:This kind of model"+" cannot be updated."); return; } if(mNativeObj==-1){ Log.d(TAG,"Error:No faceRecognizer for model update."); createLBPHFaceRecognizer(); } //get the array of Mat address long imagesAddr[]=new long[images.size()]; int i=0; int[]labels=new int[labels_vec.size()]; for(Mat image:images){ if(image.width()!=image.height()||image.channels()!=1||image.width()<=0){ Log.d(TAG, "The intput images for training is illegal!" + "image.width="+image.width()+" image.height="+image.height() +"image.channels="+image.channels()); return; } imagesAddr[i]=image.getNativeObjAddr(); labels[i]=labels_vec.elementAt(i).intValue(); i++; } nativeUpdate(mNativeObj, imagesAddr, labels); return; } /**Save the model to file. * */ public void save(){ if(mNativeObj==-1){ Log.d(TAG,"Error:No faceRecognizer to save"); return; } String path=mContext.getFilesDir()+"/"+getFileName(); nativeSave(mNativeObj, path); } public void load(){ if(mNativeObj==-1){ Log.d(TAG,"Error:No faceRecognizer to load"); return; } String path=mContext.getFilesDir()+"/"+getFileName(); File f=new File(path); if(f.exists()){ try{ nativeLoad(mNativeObj, path); }catch(Exception e){ Log.d(TAG,"load file Alert:model doesn't exists."); } } } public void destroy(){ if(mNativeObj!=-1){ nativeDestroyObject(mNativeObj); mNativeObj=-1; } else{ Log.d(TAG,"Cannot delete an illegal pointer"); } } /** *Clear the model file. */ public Boolean clear(){ String fileName=getFileName(); File f=new File(mContext.getFilesDir()+"/"+fileName); if(f.exists()){ return mContext.deleteFile(fileName); } return false; } private String getFileName(){ String fileName=new String(); if(mState==LBPH_FACERECOGNIZER){ fileName=LBPH_FILENAME; }else if(mState==FISHER_FACERECOGNIZER){ fileName=FISHER_FILENAME; }else if(mState==EIGEN_FACERECOGNIZER){ fileName=EIGEN_FILENAME; }else{ return null; } return fileName; } private void createFaceRecognizer(){ if(mState==LBPH_FACERECOGNIZER){ createLBPHFaceRecognizer(); }else if(mState==FISHER_FACERECOGNIZER){ createFisherFaceRecognizer(); }else if(mState==EIGEN_FACERECOGNIZER){ createEigenFaceRecognizer(); } } public int getState(){ return mState; } private static native long nativeCreateFisherFaceRecognizer(); private static native long nativeCreateLBPHFaceRecognizer(); private static native long nativeCreateEigenFaceRecognizer(); private static native void nativeTrain(long thiz,long[]images,int[]labels); private static native void nativeUpdate(long thiz,long[]images,int[]labels); private static native int nativePredict(long thiz,long image); private static native void nativeSave(long thiz,String fileName); private static native void nativeLoad(long thiz,String fileName); private static native void nativeDestroyObject(long thiz); }
依稀记得当时写这个碰到了一些问题。主要的问题是,我在C++里写的类,需要动态分配,然后把它的指针保存在Java里。每次Java把这个指针再传给C++。这种方式是以Java为主的编程方式。不知道还有没有其他更好的写法。
图像缩放
当时为了实现图像缩放,在图书馆写了一上午。主要觉得挺好玩的。年少。
package cn.edu.zju.srtp.facemap.album; import java.util.ArrayList; import java.util.Calendar; import org.opencv.core.Rect; import android.app.AlertDialog; import android.content.Context; import android.content.DialogInterface; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Matrix; import android.graphics.Paint; import android.text.format.Time; import android.util.AttributeSet; import android.util.Log; import android.view.MotionEvent; import android.view.View; import android.widget.EditText; import android.widget.Toast; import cn.edu.zju.srtp.facemap.R; import cn.edu.zju.srtp.facemap.algorithm.Face; import cn.edu.zju.srtp.facemap.algorithm.FaceRecManagerSingleInstance; public class ZoomImageView extends View{ private static final String TAG ="ZoomImageView"; /** * 初始化状态常量 */ public static final int STATUS_INIT = 1; /** * 图片放大状态常量 */ public static final int STATUS_ZOOM_OUT = 2; /** * 图片缩小状态常量 */ public static final int STATUS_ZOOM_IN = 3; /** * 图片拖动状态常量 */ public static final int STATUS_MOVE = 4; /** * 用于对图片进行移动和缩放变换的矩阵 */ private Matrix matrix = new Matrix(); /** * 待展示的Bitmap对象 */ private Bitmap sourceBitmap; /** *图片边框 */ private Bitmap borderBitmap; /** * 记录当前操作的状态,可选值为STATUS_INIT、STATUS_ZOOM_OUT、STATUS_ZOOM_IN和STATUS_MOVE */ private int currentStatus; private ArrayList mFaces=new ArrayList(); private int mInSampleSize =1; private Paint mPaint=new Paint(); private Bitmap mBackground; private Time mTime =new Time(); private long mStartTime; private Face mFaceClicked; /** * ZoomImageView控件的宽度 */ private int width; /** * ZoomImageView控件的高度 */ private int height; /** * 记录两指同时放在屏幕上时,中心点的横坐标值 */ private float centerPointX; /** * 记录两指同时放在屏幕上时,中心点的纵坐标值 */ private float centerPointY; /** * 记录当前图片的宽度,图片被缩放时,这个值会一起变动 */ private float currentBitmapWidth; /** * 记录当前图片的高度,图片被缩放时,这个值会一起变动 */ private float currentBitmapHeight; /** * 记录上次手指移动时的横坐标 */ private float lastXMove = -1; /** * 记录上次手指移动时的纵坐标 */ private float lastYMove = -1; /** * 记录手指在横坐标方向上的移动距离 */ private float movedDistanceX; /** * 记录手指在纵坐标方向上的移动距离 */ private float movedDistanceY; /** * 记录图片在矩阵上的横向偏移值 */ private float totalTranslateX; /** * 记录图片在矩阵上的纵向偏移值 */ private float totalTranslateY; /** * 记录图片在矩阵上的总缩放比例 */ private float totalRatio; /** * 记录手指移动的距离所造成的缩放比例 */ private float scaledRatio; /** * 记录图片初始化时的缩放比例 */ private float initRatio; /** * 记录上次两指之间的距离 */ private double lastFingerDis; /** * ZoomImageView构造函数,将当前操作状态设为STATUS_INIT。 * * @param context * @param attrs */ public ZoomImageView(Context context, AttributeSet attrs) { super(context, attrs); currentStatus = STATUS_INIT; mPaint.setColor(Color.WHITE); mPaint.setStrokeWidth(2); mPaint.setStyle(Paint.Style.STROKE); borderBitmap=BitmapFactory.decodeResource(getResources(), R.drawable.picture_frame_default); } /** * 将待展示的图片设置进来。 * * @param bitmap * 待展示的Bitmap对象 */ public void setImageBitmap(Bitmap bitmap) { sourceBitmap = bitmap; invalidate(); } public void setInSampleSize(int inSampleSize){ mInSampleSize=inSampleSize; } public void setFaces(ArrayListfaces){ mFaces=(ArrayList) faces.clone(); invalidate(); } @Override protected void onLayout(boolean changed, int left, int top, int right, int bottom) { super.onLayout(changed, left, top, right, bottom); if (changed) { // 分别获取到ZoomImageView的宽度和高度 width = getWidth(); height = getHeight(); } } @Override public boolean onTouchEvent(MotionEvent event) { if(totalRatio>initRatio){ getParent().requestDisallowInterceptTouchEvent(true); }else{ getParent().requestDisallowInterceptTouchEvent(false); } switch (event.getActionMasked()) { case MotionEvent.ACTION_POINTER_DOWN: if (event.getPointerCount() == 2) { // 当有两个手指按在屏幕上时,计算两指之间的距离 lastFingerDis = distanceBetweenFingers(event); } break; case MotionEvent.ACTION_DOWN: // mTime.setToNow(); mStartTime=Calendar.getInstance().getTimeInMillis(); case MotionEvent.ACTION_MOVE: if (event.getPointerCount() == 1) { // 只有单指按在屏幕上移动时,为拖动状态 float xMove = event.getX(); float yMove = event.getY(); if (lastXMove == -1 && lastYMove == -1) { lastXMove = xMove; lastYMove = yMove; } currentStatus = STATUS_MOVE; movedDistanceX = xMove - lastXMove; movedDistanceY = yMove - lastYMove; // 进行边界检查,不允许将图片拖出边界 if (totalTranslateX + movedDistanceX > 0) { movedDistanceX = 0; } else if (width - (totalTranslateX + movedDistanceX) > currentBitmapWidth) { movedDistanceX = 0; } if (totalTranslateY + movedDistanceY > 0) { movedDistanceY = 0; } else if (height - (totalTranslateY + movedDistanceY) > currentBitmapHeight) { movedDistanceY = 0; } // 调用onDraw()方法绘制图片 invalidate(); lastXMove = xMove; lastYMove = yMove; }else if(event.getPointerCount() == 2){ // 有两个手指按在屏幕上移动时,为缩放状态 centerPointBetweenFingers(event); double fingerDis = distanceBetweenFingers(event); if (fingerDis > lastFingerDis) { currentStatus = STATUS_ZOOM_OUT; } else { currentStatus = STATUS_ZOOM_IN; } // 进行缩放倍数检查,最大只允许将图片放大4倍,最小可以缩小到初始化比例 if ((currentStatus == STATUS_ZOOM_OUT && totalRatio < 4 * initRatio) || (currentStatus == STATUS_ZOOM_IN && totalRatio > initRatio)) { scaledRatio = (float) (fingerDis / lastFingerDis); totalRatio = totalRatio * scaledRatio; if (totalRatio > 4 * initRatio) { totalRatio = 4 * initRatio; } else if (totalRatio < initRatio) { totalRatio = initRatio; } // 调用onDraw()方法绘制图片 invalidate(); lastFingerDis = fingerDis; } } break; case MotionEvent.ACTION_POINTER_UP: if (event.getPointerCount() == 2) { // 手指离开屏幕时将临时值还原 lastXMove = -1; lastYMove = -1; } break; case MotionEvent.ACTION_UP: // 手指离开屏幕时将临时值还原 lastXMove = -1; lastYMove = -1; //Time t=new Time(); //t.setToNow(); long t=Calendar.getInstance().getTimeInMillis(); if(t-mStartTime>=100){ break; } float fX=event.getX(); float fY=event.getY(); mFaceClicked=null; for(Face f:mFaces){ Rect faceRect=f.getFaceRect(); float left=faceRect.x*totalRatio/mInSampleSize+totalTranslateX; float top=faceRect.y*totalRatio/mInSampleSize+totalTranslateY; float right=(faceRect.x+faceRect.width)*totalRatio/mInSampleSize+totalTranslateX; float bottom=(faceRect.y+faceRect.height)*totalRatio/mInSampleSize+totalTranslateY; if(fX>=left&&fX<=right&& fY>=top&&fY<=bottom){ mFaceClicked=f; } } final Face face=mFaceClicked; if(face==null) break; invalidate(); if(face.getPeopleName()==null){ final EditText mText = new EditText( getContext()); AlertDialog.Builder builder = new AlertDialog.Builder( getContext()); builder.setTitle("请输入名字") .setIcon( android.R.drawable.ic_dialog_info) .setView(mText) .setPositiveButton( "确定", new DialogInterface.OnClickListener() { @Override public void onClick( DialogInterface dialog, int which) { FaceRecManagerSingleInstance.getInstance(getContext()).update( face.getFaceId(), mText.getText() .toString()); invalidate(); } }) .setNegativeButton("取消", null).show(); }else{ AlertDialog.Builder builder = new AlertDialog.Builder( getContext()); builder.setMessage("这是 " + face.getPeopleName() + " 吗?") .setIcon( android.R.drawable.ic_dialog_info) .setPositiveButton( "是", new DialogInterface.OnClickListener() { @Override public void onClick( DialogInterface dialog, int which) { FaceRecManagerSingleInstance.getInstance(getContext()).update( face.getFaceId(), face.getPeopleName()); invalidate(); } }) .setNegativeButton( "不是", new DialogInterface.OnClickListener() { @Override public void onClick( DialogInterface dialog, int which) { final EditText mText = new EditText( getContext()); AlertDialog.Builder builder = new AlertDialog.Builder( getContext()); builder.setTitle( "请输入名字") .setIcon(android.R.drawable.ic_dialog_info) .setView(mText) .setPositiveButton( "确定", new DialogInterface.OnClickListener() { @Override public void onClick( DialogInterface dialog, int which) { FaceRecManagerSingleInstance.getInstance(getContext()) .update(face.getFaceId(),mText.getText().toString()); invalidate(); } }) .setNegativeButton("取消",null) .show(); } }).show(); } break; default: break; } return true; } public Bitmap getSrcBitmap(){ return sourceBitmap; } private void drawFaces(Canvas canvas){ Log.d(TAG,"drawFaces"); for(Face face:mFaces){ Rect faceRect=face.getFaceRect(); float left=faceRect.x*totalRatio/mInSampleSize+totalTranslateX; float top=faceRect.y*totalRatio/mInSampleSize+totalTranslateY; float right=(faceRect.x+faceRect.width)*totalRatio/mInSampleSize+totalTranslateX; float bottom=(faceRect.y+faceRect.height)*totalRatio/mInSampleSize+totalTranslateY; if(!face.getVerified()){ mPaint.setColor(Color.GREEN); } if(face.equals(mFaceClicked)){ mPaint.setColor(Color.YELLOW); mFaceClicked=null; } canvas.drawRect(left,top,right,bottom, mPaint); if(face.getPeopleName()!=null){ mPaint.setTextSize((float) ((right-left)*0.15)); mPaint.setStrokeWidth(0); canvas.drawText(face.getPeopleName(),left ,top-4, mPaint); mPaint.setStrokeWidth(2); } mPaint.setColor(Color.WHITE); } } /** * 根据currentStatus的值来决定对图片进行什么样的绘制操作。 */ @Override protected void onDraw(Canvas canvas) { try{ super.onDraw(canvas); switch (currentStatus) { case STATUS_ZOOM_OUT: case STATUS_ZOOM_IN: zoom(canvas); break; case STATUS_MOVE: move(canvas); break; case STATUS_INIT: initBitmap(canvas); default: canvas.drawBitmap(sourceBitmap, matrix, null); drawFaces(canvas); break; } }catch(Exception e){ Log.e(TAG,"Error in onDraw:",e); } } /** * 对图片进行缩放处理。 * * @param canvas */ private void zoom(Canvas canvas) { matrix.reset(); // 将图片按总缩放比例进行缩放 matrix.postScale(totalRatio, totalRatio); float scaledWidth = sourceBitmap.getWidth() * totalRatio; float scaledHeight = sourceBitmap.getHeight() * totalRatio; float translateX = 0f; float translateY = 0f; // 如果当前图片宽度小于屏幕宽度,则按屏幕中心的横坐标进行水平缩放。 //否则按两指的中心点的横坐标进行水平缩放 if (currentBitmapWidth < width) { translateX = (width - scaledWidth) / 2f; } else { translateX = totalTranslateX * scaledRatio + centerPointX * (1 - scaledRatio); // 进行边界检查,保证图片缩放后在水平方向上不会偏移出屏幕 if (translateX > 0) { translateX = 0; } else if (width - translateX > scaledWidth) { translateX = width - scaledWidth; } } // 如果当前图片高度小于屏幕高度,则按屏幕中心的纵 //坐标进行垂直缩放。否则按两指的中心点的纵坐标进行垂直缩放 if (currentBitmapHeight < height) { translateY = (height - scaledHeight) / 2f; }else{ translateY = totalTranslateY * scaledRatio + centerPointY * (1 - scaledRatio); // 进行边界检查,保证图片缩放后在垂直方向上不会偏移出屏幕 if (translateY > 0) { translateY = 0; } else if (height - translateY > scaledHeight) { translateY = height - scaledHeight; } } // 缩放后对图片进行偏移,以保证缩放后中心点位置不变 matrix.postTranslate(translateX, translateY); totalTranslateX = translateX; totalTranslateY = translateY; currentBitmapWidth = scaledWidth; currentBitmapHeight = scaledHeight; canvas.drawBitmap(sourceBitmap, matrix, null); drawFaces(canvas); } /** * 对图片进行平移处理 * * @param canvas */ private void move(Canvas canvas) { matrix.reset(); // 根据手指移动的距离计算出总偏移值 float translateX = totalTranslateX + movedDistanceX; float translateY = totalTranslateY + movedDistanceY; // 先按照已有的缩放比例对图片进行缩放 matrix.postScale(totalRatio, totalRatio); // 再根据移动距离进行偏移 matrix.postTranslate(translateX, translateY); totalTranslateX = translateX; totalTranslateY = translateY; canvas.drawBitmap(sourceBitmap, matrix, null); drawFaces(canvas); } /** * 对图片进行初始化操作,包括让图片居中,以及当图片大于屏幕宽高时对图片进行压缩。 * * @param canvas */ private void initBitmap(Canvas canvas) { if (sourceBitmap != null) { matrix.reset(); int bitmapWidth = sourceBitmap.getWidth(); int bitmapHeight = sourceBitmap.getHeight(); if (bitmapWidth > width || bitmapHeight > height) { if (bitmapWidth - width > bitmapHeight - height) { // 当图片宽度大于屏幕宽度时,将图片等比例压缩,使它可以完全显示出来 float ratio = width / (bitmapWidth * 1.0f); matrix.postScale(ratio, ratio); float translateY = (height - (bitmapHeight * ratio)) / 2f; // 在纵坐标方向上进行偏移,以保证图片居中显示 matrix.postTranslate(0, translateY); totalTranslateY = translateY; totalRatio = initRatio = ratio; }else { // 当图片高度大于屏幕高度时,将图片等比例压缩,使它可以完全显示出来 float ratio = height / (bitmapHeight * 1.0f); matrix.postScale(ratio, ratio); float translateX = (width - (bitmapWidth * ratio)) / 2f; // 在横坐标方向上进行偏移,以保证图片居中显示 matrix.postTranslate(translateX, 0); totalTranslateX = translateX; totalRatio = initRatio = ratio; } currentBitmapWidth = bitmapWidth * initRatio; currentBitmapHeight = bitmapHeight * initRatio; }else{ // 当图片的宽高都小于屏幕宽高时,直接让图片居中显示 float translateX = (width - sourceBitmap.getWidth()) / 2f; float translateY = (height - sourceBitmap.getHeight()) / 2f; matrix.postTranslate(translateX, translateY); totalTranslateX = translateX; totalTranslateY = translateY; totalRatio = initRatio = 1f; currentBitmapWidth = bitmapWidth; currentBitmapHeight = bitmapHeight; } canvas.drawBitmap(sourceBitmap, matrix, null); drawFaces(canvas); } } /** * 计算两个手指之间的距离。 * * @param event * @return 两个手指之间的距离 */ private double distanceBetweenFingers(MotionEvent event) { float disX = Math.abs(event.getX(0) - event.getX(1)); float disY = Math.abs(event.getY(0) - event.getY(1)); return Math.sqrt(disX * disX + disY * disY); } /** * 计算两个手指之间中心点的坐标。 * * @param event */ private void centerPointBetweenFingers(MotionEvent event) { float xPoint0 = event.getX(0); float yPoint0 = event.getY(0); float xPoint1 = event.getX(1); float yPoint1 = event.getY(1); centerPointX = (xPoint0 + xPoint1) / 2; centerPointY = (yPoint0 + yPoint1) / 2; } }
存在的问题
瀑布流渲染似乎有点慢,要等很久的样子。有时候闪退。
上一篇: 一个非典型的Linux路由配置方案
下一篇: 汇法网开启在线法律服务的大数据时代