基于OpenCV实现图像分割
程序员文章站
2022-03-03 23:45:07
本文实例为大家分享了基于opencv实现图像分割的具体代码,供大家参考,具体内容如下1、图像阈值化源代码:#include "opencv2/highgui/highgui.hpp"#include...
本文实例为大家分享了基于opencv实现图像分割的具体代码,供大家参考,具体内容如下
1、图像阈值化
源代码:
#include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include <iostream> using namespace std; using namespace cv; int thresholds=50; int model=2; mat image,srcimage; void track(int ,void *) { mat result; threshold(srcimage,result,thresholds,255,cv_thresh_binary); //imshow("原图",result); if(model==0) { threshold(srcimage,result,thresholds,255,cv_thresh_binary); imshow("分割",result); } if(model==1) { threshold(srcimage,result,thresholds,255,thresh_binary_inv); imshow("分割",result); } if(model==2) { threshold(srcimage,result,thresholds,255,thresh_trunc); imshow("分割",result); } if(model==3) { threshold(srcimage,result,thresholds,255,thresh_tozero); imshow("分割",result); } if(model==4) { threshold(srcimage,result,thresholds,255,thresh_tozero_inv); imshow("分割",result); } } int main() { image=imread("2.2.tif"); if(!image.data) { return 0; } cvtcolor(image,srcimage,cv_bgr2gray); namedwindow("分割",window_autosize); cv::createtrackbar("阈a值:","分割",&thresholds,255,track); cv::createtrackbar("模式:","分割",&model,4,track); track(thresholds,0); track(model,0); waitkey(0); return 0; }
实现结果:
2、阈值处理
//阈值处理 #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" using namespace cv; using namespace std; int main() { printf("键盘按键esc--退出程序"); mat g_srcimage = imread("1.tif",0); if(!g_srcimage.data) { printf("读取图片失败"); } imshow("原始图",g_srcimage); //大津法阈值分割显示 /*大津法,简称otsu.它是按图像的灰度特性,将图像分成背景 和目标2部分。背景和目标之间的类间方差越大,说明构成图像 的2部分的差别越大,当部分目标错分为背景或部分背景错分为 目标都会导致2部分差别变小。*/ mat otsuimage; threshold(g_srcimage,otsuimage,0,255,thresh_otsu);//0不起作用,可为任意阈值 imshow("otsuimage",otsuimage); //自适应分割并显示 mat adaptimage; //thresh_binary_inv:参数二值化取反 adaptivethreshold(g_srcimage,adaptimage,255,0,thresh_binary_inv,7,8); imshow("adaptimage",adaptimage); while(1) { int key; key = waitkey(20); if((char)key == 27) { break; } } }
效果图:
3、拉普拉斯检测
//laplacian检测 #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" using namespace cv; using namespace std; /*,在只关心边缘的位置而不考虑其周围的象素灰度差值时比较合适。 laplace 算子对孤立象素的响应要比对边缘或线的响应要更强烈,因此 只适用于无噪声图象。存在噪声情况下,使用 laplacian 算子检测边 缘之前需要先进行低通滤波。*/ int main() { mat src,src_gray,dst,abs_dst; src = imread("1.jpg"); imshow("原始图像",src); //高斯滤波 gaussianblur(src,src,size(3,3),0,0,border_default); //转化为灰度图,输入只能为单通道 cvtcolor(src,src_gray,cv_bgr2gray); laplacian(src_gray,dst,cv_16s,3,1,0,border_default); convertscaleabs(dst,abs_dst); imshow("效果图laplace变换",abs_dst); waitkey(); return 0; }
效果图:
4、canny算法的边缘检测
源代码
#include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" using namespace cv; using namespace std; /*如果某一像素位置的幅值超过高阈值,该像素被保留为边缘像素。如果某 一像素位置的幅值小于低阈值,该像素被排除。如果某一像素位置的幅值在 两个阈值之间,该像素仅仅在连接到一个高于高阈值的像素时被保留。 */ int main() { mat picture2=imread("1.jpg"); mat new_picture2; mat picture2_1=picture2.clone(); mat gray_picture2 , edge , new_edge; imshow("【原始图】canny边缘检测" , picture2); canny(picture2_1 , new_picture2 ,150 , 100 ,3 ); imshow("【效果图】canny边缘检测", new_picture2 ); mat dstimage,grayimage; //dstimage与srcimage同大小类型 dstimage.create(picture2_1.size() , picture2_1.type()); cvtcolor(picture2_1,gray_picture2,cv_bgr2gray);//转化为灰度图 blur(gray_picture2 , edge , size(3,3));//用3x3的内核降噪 canny(edge,edge,3,9,3); dstimage = scalar::all(0);//将dst内所有元素设置为0 //使用canny算子的边缘图edge作为掩码,将原图拷贝到dst中 picture2_1.copyto(dstimage,edge); imshow("效果图canny边缘检测2",dstimage); waitkey(); }
效果图:
5、图像的分水岭算法
源代码:
#include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include <iostream> using namespace cv; using namespace std; #define window_name1 "显示/操作窗口" #define window_name2 "分水岭算法效果图" mat g_maskimage,g_srcimage; point prevpt(-1,-1); static void showhelptext(); static void on_mouse(int event,int x,int y,int flags,void*); //输出一些帮助信息 static void showhelptext() { printf("当前使用的版本为:"cv_version); printf("\n"); printf("分水岭算法---点中图片进行鼠标或按键操作\n"); printf("请先用鼠标在图片窗口中标记出大致的区域,\n然后再按键【1】或者【space】启动算法"); printf("\n按键操作说明:\n" "键盘按键【1】或者【space】--运行的分水岭分割算法\n" "键盘按键【2】--回复原始图片\n" "键盘按键【esc】--退出程序\n"); } static void on_mouse(int event,int x,int y,int flags,void*) { if(x<0||x>=g_srcimage.cols||y<0||y>=g_srcimage.rows) return; if(event == cv_event_lbuttonup||!(flags & cv_event_flag_lbutton)) prevpt = point(-1,-1); else if(event == cv_event_lbuttondown) prevpt= point(x,y); else if(event == cv_event_mousemove && (flags & cv_event_flag_lbutton)) { point pt(x,y); if(prevpt.x<0) prevpt = pt; line(g_maskimage,prevpt,pt,scalar::all(255),5,8,0); line(g_srcimage,prevpt,pt,scalar::all(255),5,8,0); prevpt = pt; imshow(window_name1,g_srcimage); } } int main(int argc,char** argv) { system("color a5"); showhelptext(); g_srcimage = imread("1.jpg",1); imshow(window_name1,g_srcimage); mat srcimage,grayimage; g_srcimage.copyto(srcimage); cvtcolor(g_srcimage,g_maskimage,cv_bgr2gray); cvtcolor(g_maskimage,grayimage,cv_gray2bgr);//灰度图转bgr3通道,但每通道的值都是原先单通道的值,所以也是显示灰色的 g_maskimage = scalar::all(0);//黑 setmousecallback(window_name1,on_mouse,0); while(1) { int c = waitkey(0); if((char)c == 27) break; if((char)c == '2') { g_maskimage = scalar::all(0);//黑 srcimage.copyto(g_srcimage); imshow("image",g_srcimage); } if((char)c == '1'||(char)c == ' ') { int i,j,compcount = 0; vector<vector<point>> contours;//定义轮廓 vector<vec4i> hierarchy;//定义轮廓的层次 findcontours(g_maskimage,contours,hierarchy,retr_ccomp,chain_approx_simple); if(contours.empty()) continue; mat maskimage(g_maskimage.size(),cv_32s); maskimage = scalar::all(0); for(int index = 0;index >= 0;index = hierarchy[index][0],compcount++) drawcontours(maskimage,contours,index,scalar::all(compcount+1),-1,8,hierarchy,int_max); if(compcount == 0) continue; vector<vec3b> colortab; for(i=0;i<compcount;i++) { int b = therng().uniform(0,255); int g = therng().uniform(0,255); int r = therng().uniform(0,255); colortab.push_back(vec3b((uchar)b,(uchar)g,(uchar)r)); } //计算处理时间并输出到窗口中 double dtime = (double)gettickcount(); watershed(srcimage,maskimage); dtime = (double)gettickcount()-dtime; printf("\t处理时间=%gms\n",dtime*1000./gettickfrequency()); //双层循环,将分水岭图像遍历存入watershedimage中 mat watershedimage(maskimage.size(),cv_8uc3); for(i=0;i<maskimage.rows;i++) for(j=0;j<maskimage.cols;j++) { int index = maskimage.at<int>(i,j); if(index == -1) watershedimage.at<vec3b>(i,j) = vec3b(255,255,255); else if(index<=0||index>compcount) watershedimage.at<vec3b>(i,j) = vec3b(0,0,0); else watershedimage.at<vec3b>(i,j) = colortab[index-1]; } //混合灰度图和分水岭效果图并显示最终的窗口 watershedimage = watershedimage*0.5+grayimage*0.5; imshow(window_name2,watershedimage); } } waitkey(); return 0; }
效果图:
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
上一篇: 如何用草根思维快速落地一些事情