OPENCV+JAVA实现人脸识别
程序员文章站
2022-06-19 23:42:54
本文实例为大家分享了java实现人脸识别的具体代码,供大家参考,具体内容如下
官方下载 ,以win7为例,下载opencv-2.4.13.3-vc14.exe
安装后...
本文实例为大家分享了java实现人脸识别的具体代码,供大家参考,具体内容如下
官方下载 ,以win7为例,下载opencv-2.4.13.3-vc14.exe
安装后,在build目录下 d:\opencv\build\java,获取opencv-2413.jar,copy至项目目录
同时需要dll文件 与 各 识别xml文件,进行不同特征的识别(人脸,侧脸,眼睛等)
dll目录:d:\opencv\build\java\x64\opencv_java2413.dll
xml目录:d:\opencv\sources\data\haarcascades\haarcascade_frontalface_alt.xml(目录中有各类识别文件)
项目结构:
具体代码:由于需要用到 opencv 的dll文件,故要么放在java library path 中,或放在jre lib 中,windows下可放在system32目录下,也可以在代码中动态加载,如下:
package opencv; import com.sun.scenario.effect.imagedata; import org.opencv.core.*; import org.opencv.core.point; import org.opencv.highgui.highgui; import org.opencv.imgproc.imgproc; import org.opencv.objdetect.cascadeclassifier; import javax.imageio.imageio; import javax.swing.*; import java.awt.*; import java.awt.image.bufferedimage; import java.io.file; import java.io.ioexception; import java.util.arrays; import java.util.vector; /** * created by administrator on 2017/8/17. */ public class test { static{ // 导入opencv的库 string opencvpath = system.getproperty("user.dir") + "\\opencv\\x64\\"; string libpath = system.getproperty("java.library.path"); string a = opencvpath + core.native_library_name + ".dll"; system.load(opencvpath + core.native_library_name + ".dll"); } public static string getcutpath(string filepath){ string[] splitpath = filepath.split("\\."); return splitpath[0]+"cut"+"."+splitpath[1]; } public static void process(string original,string target) throws exception { string originalcut = getcutpath(original); string targetcut = getcutpath(target); if(detectface(original,originalcut) && detectface(target,targetcut)){ } } public static boolean detectface(string imagepath,string outfile) throws exception { system.out.println("\nrunning detectfacedemo"); // 从配置文件lbpcascade_frontalface.xml中创建一个人脸识别器,该文件位于opencv安装目录中 cascadeclassifier facedetector = new cascadeclassifier( "c:\\users\\administrator\\desktop\\opencv\\haarcascade_frontalface_alt.xml"); mat image = highgui.imread(imagepath); // 在图片中检测人脸 matofrect facedetections = new matofrect(); facedetector.detectmultiscale(image, facedetections); system.out.println(string.format("detected %s faces", facedetections.toarray().length)); rect[] rects = facedetections.toarray(); if(rects != null && rects.length > 1){ throw new runtimeexception("超过一个脸"); } // 在每一个识别出来的人脸周围画出一个方框 rect rect = rects[0]; core.rectangle(image, new point(rect.x-2, rect.y-2), new point(rect.x + rect.width, rect.y + rect.height), new scalar(0, 255, 0)); mat sub = image.submat(rect); mat mat = new mat(); size size = new size(300, 300); imgproc.resize(sub, mat, size);//将人脸进行截图并保存 return highgui.imwrite(outfile, mat); // 将结果保存到文件 // string filename = "c:\\users\\administrator\\desktop\\opencv\\facedetection.png"; // system.out.println(string.format("writing %s", filename)); // highgui.imwrite(filename, image); } public static void setalpha(string imagepath,string outfile) { /** * 增加测试项 * 读取图片,绘制成半透明 */ try { imageicon imageicon = new imageicon(imagepath); bufferedimage bufferedimage = new bufferedimage(imageicon.geticonwidth(),imageicon.geticonheight() , bufferedimage.type_4byte_abgr); graphics2d g2d = (graphics2d) bufferedimage.getgraphics(); g2d.drawimage(imageicon.getimage(), 0, 0, imageicon.getimageobserver()); //循环每一个像素点,改变像素点的alpha值 int alpha = 100; for (int j1 = bufferedimage.getminy(); j1 < bufferedimage.getheight(); j1++) { for (int j2 = bufferedimage.getminx(); j2 < bufferedimage.getwidth(); j2++) { int rgb = bufferedimage.getrgb(j2, j1); rgb = ( (alpha + 1) << 24) | (rgb & 0x00ffffff); bufferedimage.setrgb(j2, j1, rgb); } } g2d.drawimage(bufferedimage, 0, 0, imageicon.getimageobserver()); //生成图片为png imageio.write(bufferedimage, "png", new file(outfile)); } catch (exception e) { e.printstacktrace(); } } private static void watermark(string a,string b,string outfile, float alpha) throws ioexception { // 获取底图 bufferedimage buffimg = imageio.read(new file(a)); // 获取层图 bufferedimage waterimg = imageio.read(new file(b)); // 创建graphics2d对象,用在底图对象上绘图 graphics2d g2d = buffimg.creategraphics(); int waterimgwidth = waterimg.getwidth();// 获取层图的宽度 int waterimgheight = waterimg.getheight();// 获取层图的高度 // 在图形和图像中实现混合和透明效果 g2d.setcomposite(alphacomposite.getinstance(alphacomposite.src_atop, alpha)); // 绘制 g2d.drawimage(waterimg, 0, 0, waterimgwidth, waterimgheight, null); g2d.dispose();// 释放图形上下文使用的系统资源 //生成图片为png imageio.write(buffimg, "png", new file(outfile)); } public static boolean mergesimple(bufferedimage image1, bufferedimage image2, int posw, int posh, file fileoutput) { //合并两个图像 int w1 = image1.getwidth(); int h1 = image1.getheight(); int w2 = image2.getwidth(); int h2 = image2.getheight(); bufferedimage imagesaved = new bufferedimage(w1, h1, bufferedimage.type_int_argb); graphics2d g2d = imagesaved.creategraphics(); // 增加下面代码使得背景透明 g2d.drawimage(image1, null, 0, 0); image1 = g2d.getdeviceconfiguration().createcompatibleimage(w1, w2, transparency.translucent); g2d.dispose(); g2d = image1.creategraphics(); // 背景透明代码结束 // for (int i = 0; i < w2; i++) { // for (int j = 0; j < h2; j++) { // int rgb1 = image1.getrgb(i + posw, j + posh); // int rgb2 = image2.getrgb(i, j); // // if (rgb1 != rgb2) { // //rgb2 = rgb1 & rgb2; // } // imagesaved.setrgb(i + posw, j + posh, rgb2); // } // } boolean b = false; try { b = imageio.write(imagesaved, "png", fileoutput); } catch (ioexception ie) { ie.printstacktrace(); } return b; } public static void main(string[] args) throws exception { string a,b,c,d; a = "c:\\users\\administrator\\desktop\\opencv\\zzl.jpg"; d = "c:\\users\\administrator\\desktop\\opencv\\cgx.jpg"; //process(a,d); a = "c:\\users\\administrator\\desktop\\opencv\\zzlcut.jpg"; d = "c:\\users\\administrator\\desktop\\opencv\\cgxcut.jpg"; cascadeclassifier facedetector = new cascadeclassifier( "c:\\users\\administrator\\desktop\\opencv\\haarcascade_frontalface_alt.xml"); cascadeclassifier eyedetector1 = new cascadeclassifier( "c:\\users\\administrator\\desktop\\opencv\\haarcascade_eye.xml"); cascadeclassifier eyedetector2 = new cascadeclassifier( "c:\\users\\administrator\\desktop\\opencv\\haarcascade_eye_tree_eyeglasses.xml"); mat image = highgui.imread("c:\\users\\administrator\\desktop\\opencv\\gakki.jpg"); // 在图片中检测人脸 matofrect facedetections = new matofrect(); //eyedetector2.detectmultiscale(image, facedetections); vector<rect> objects; eyedetector1.detectmultiscale(image, facedetections, 2.0,1,1,new size(20,20),new size(20,20)); rect[] rects = facedetections.toarray(); rect eyea,eyeb; eyea = rects[0];eyeb = rects[1]; system.out.println("a-中心坐标 " + eyea.x + " and " + eyea.y); system.out.println("b-中心坐标 " + eyeb.x + " and " + eyeb.y); //获取两个人眼的角度 double dy=(eyeb.y-eyea.y); double dx=(eyeb.x-eyea.x); double len=math.sqrt(dx*dx+dy*dy); system.out.println("dx is "+dx); system.out.println("dy is "+dy); system.out.println("len is "+len); double angle=math.atan2(math.abs(dy),math.abs(dx))*180.0/math.pi; system.out.println("angle is "+angle); for(rect rect:facedetections.toarray()) { core.rectangle(image, new point(rect.x, rect.y), new point(rect.x + rect.width, rect.y + rect.height), new scalar(0, 255, 0)); } string filename = "c:\\users\\administrator\\desktop\\opencv\\ouput.png"; system.out.println(string.format("writing %s", filename)); highgui.imwrite(filename, image); // watermark(a,d,"c:\\users\\administrator\\desktop\\opencv\\zzltm2.jpg",0.7f); // // // 读取图像,不改变图像的原始信息 // mat image1 = highgui.imread(a); // mat image2 = highgui.imread(d); // mat mat1 = new mat();mat mat2 = new mat(); // size size = new size(300, 300); // imgproc.resize(image1, mat1, size); // imgproc.resize(image2, mat2, size); // mat mat3 = new mat(size,cvtype.cv_64f); // //core.addweighted(mat1, 0.5, mat2, 1, 0, mat3); // // //highgui.imwrite("c:\\users\\administrator\\desktop\\opencv\\add.jpg", mat3); // // mergesimple(imageio.read(new file(a)), // imageio.read(new file(d)),0,0, // new file("c:\\users\\administrator\\desktop\\opencv\\add.jpg")); } }
最终效果:人脸旁有绿色边框,可以将绿色边框图片截取,生成人脸图
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
上一篇: jquery选择器大全 全面详解jquery选择器
下一篇: C#代码实现-冒泡排序