OpenCV3学习
程序员文章站
2024-03-25 09:31:28
...
存个代码 先放着 以后再整理
/*#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main() {
// 读入一张图片(图片)
Mat img = imread("gao.jpg");
// 创建一个名为 "Pic"窗口
namedWindow("Pic");
// 在窗口中显示图片
imshow("Pic", img);
// 等待6000 ms后窗口自动关闭
waitKey(6000);
}
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
using namespace cv;
int main()
{
VideoCapture cap(0);
Mat frame;
while (1){
cap >> frame;
imshow("调用摄像头", frame);
waitKey(30);
}
return 0;
}*/
/*图像腐蚀操作
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int main()
{
Mat img = imread("gao.jpg");
imshow("【原始图】", img);
Mat element = getStructuringElement(MORPH_RECT, Size(10, 10));
Mat dstimg;
erode(img, dstimg, element);
imshow("【效果图】", dstimg);
waitKey(0);
return 0;
}*/
/*均值滤波
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int main()
{
Mat img = imread("gao.jpg");
imshow("【原始图】", img);
Mat dstimg;
blur(img, dstimg, Size(7, 7));
imshow("【效果图】", dstimg);
waitKey(0);
return 0;
}*/
/*Canny边缘检测
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int main()
{
Mat img = imread("gao.jpg");
imshow("【原始图】", img);
Mat dstimg;
dstimg.create(img.size(), img.type());
Mat grayimg, edge;
cvtColor(img, grayimg, COLOR_BGR2GRAY);
blur(grayimg, edge, Size(3, 3));
Canny(edge, edge, 3, 9, 3);
imshow("【效果图】", edge);
waitKey(0);
return 0;
}*/
/*读取播放视频
#include <opencv2/opencv.hpp>
using namespace cv;
int main()
{
VideoCapture cap;
cap.open(0);
while (1) {
Mat frame;
cap >> frame;
//imshow("【视频】", frame);
Mat grayframe, edge;
cvtColor(frame, grayframe, COLOR_BGR2GRAY);
blur(grayframe, edge, Size(3, 3));
Canny(edge, edge, 3, 9, 3);
imshow("【效果视频】", edge);
//printf("OpenCV: OpenCV" CV_VERSION);
waitKey(30);
}
return 0;
}*/
/*载入图像
#include <opencv2/opencv.hpp>
using namespace cv;
int main()
{
Mat img1 = imread("gao.jpg", 2 | 4);
Mat img2 = imread("gao.jpg", 0);
Mat img3 = imread("gao.jpg", 1999);
imshow("[1]", img1);
imshow("[2]", img2);
imshow("[3]", img3);
waitKey(0);
return 0;
}*/
/*生成透明alpha图像
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
void createAlphaMat(Mat & mat)
{
for (int i = 0; i < mat.rows; i++) {
for (int j = 0; j < mat.cols; j++) {
Vec4b& rgba = mat.at<Vec4b>(i, j);
rgba[0] = UCHAR_MAX;
rgba[1] = saturate_cast<uchar>((float(mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX);
rgba[2] = saturate_cast<uchar>((float(mat.rows - i)) / ((float)mat.rows) * UCHAR_MAX);
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
}
}
}
int main()
{
Mat mat(480, 640, CV_8UC4);
createAlphaMat(mat);
vector<int>compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
try {
imwrite("透明Alpha值图.png", mat, compression_params);
imshow("生成的PNG图", mat);
fprintf(stdout, "PNG图片保存完成\n可查看");
}
catch(runtime_error& ex){
fprintf(stderr, "图像转换PNG出错:%s/n", ex.what());
return 1;
}
waitKey(0);
return 0;
}*/
/*初级图像混合
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
int main()
{
//载入图像
Mat img_gao = imread("gao.jpg");
namedWindow("【1.图】");
imshow("【1.图】", img_gao);
//初级图像混合
Mat img = imread("gao.jpg");
Mat logo = imread("logo.jpg");
namedWindow("【2.图】");
imshow("【2.图】", img);
namedWindow("【3.logo图】");
imshow("【3.logo图】", logo);
Mat imageROI;
imageROI = img(Rect(100, 200, logo.cols, logo.rows));//Rect(x,y,col,row) xy左上角坐标
//imageROI = img(Range(350, 350 + logo.rows), Range(800, 800 + logo.cols));
addWeighted(imageROI, 0.9, logo, 0.2, 0, imageROI);
namedWindow("【4.图+logo】");
imshow("【4.图+logo】", img);
imwrite("write生成混合图.jpg", img);
waitKey(0);
return 0;
}*/
/*创建轨迹条
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
#define WINDOW_NAME "【线性混合】"
const int g_nMaxAlphaValue = 100;//Alhpa最大值
int g_nAlphaValueSlider;//滑动条对应变量
double g_dAlphaValue;
double g_dBetaValue;
//声明存储图像变量
Mat g_srcImage1;
Mat g_srcImage2;
Mat g_dstImage;
void on_Trackbar(int, void*)
{
//当前Alpha值与最大值的比例
g_dAlphaValue =(double) g_nAlphaValueSlider / g_nMaxAlphaValue;
//beta值为1减去当前alpha值
g_dBetaValue = (1.0 - g_dAlphaValue);
//根据Alpha值和Beta值进行线性混合
//g(x) = (1 - α)f0(x) + αf1(x)
addWeighted(g_srcImage1, g_dAlphaValue, g_srcImage2, g_dBetaValue, 0, g_dstImage);
imshow(WINDOW_NAME, g_dstImage);
}
int main()
{
//两张图片的尺寸必须相同
g_srcImage1 = imread("bar1.jpg");
g_srcImage2 = imread("bar2.jpg");
if (!g_srcImage1.data) { printf("请确认图片1是否存在。\n"); return -1; }
if (!g_srcImage2.data) { printf("请确认图片2是否存在。\n"); return -1; }
//设置当前的滑动条初始值为70
g_nAlphaValueSlider = 70;
//创建窗体
namedWindow(WINDOW_NAME, 1);//1根据图像生成窗口大小,用户不能改变
//在窗体中创建滑动条控件
char TrackBarName[50];
sprintf_s(TrackBarName, "透明值 %d", g_nMaxAlphaValue);
createTrackbar(TrackBarName, WINDOW_NAME, &g_nAlphaValueSlider, g_nMaxAlphaValue, on_Trackbar);
//在回调函数中显示
on_Trackbar(g_nAlphaValueSlider, 0);
waitKey(0);
return 0;
}*/
/*鼠标操作
#include <opencv2/opencv.hpp>
using namespace cv;
#define WINDOW_NAME "【程序窗口】"
void on_MouseHandle(int event, int x, int y, int flags, void* param);
void DrawRectangle(cv::Mat&img, cv::Rect box);
void ShowHelpText();
Rect g_rectangle;
bool g_bDrawingBox = false;
RNG g_rng(12345);//rng随机数类
int main()
{
//1.准备参数
g_rectangle = Rect(-1, -1, 0, 0);
Mat srcImage(600, 800, CV_8UC3), tempImage;
srcImage.copyTo(tempImage);
g_rectangle = Rect(-1, -1, 0, 0);
srcImage = Scalar::all(0);
//2.设置鼠标操作回调函数
namedWindow(WINDOW_NAME);
setMouseCallback(WINDOW_NAME, on_MouseHandle,(void*) &srcImage);
//3.程序主循环,当绘制的标识符为真时,进行绘制;
while (1) {
srcImage.copyTo(tempImage);//复制原图到临时变量
//当进行绘制的标识符为真,进行绘制
if (g_bDrawingBox) DrawRectangle(tempImage, g_rectangle);
imshow(WINDOW_NAME, tempImage);
if (waitKey(10) == 27) break; //按下esc键程序退出
}
return 0;
}
//鼠标回调函数
void on_MouseHandle(int event, int x, int y, int flags, void* param)
{
Mat& img = *(cv::Mat*) param;
switch (event){
case EVENT_MOUSEMOVE: {
if (g_bDrawingBox) {
g_rectangle.width = x - g_rectangle.x;
g_rectangle.height = y - g_rectangle.y;
}
break;
}
//左键按下
case EVENT_LBUTTONDOWN: {
g_bDrawingBox = true;
g_rectangle = Rect(x, y, 0, 0);//记录起始点
break;
}
//左键抬起
case EVENT_LBUTTONUP: {
g_bDrawingBox = false;
if (g_rectangle.width < 0) {
g_rectangle.x += g_rectangle.width;
g_rectangle.width *= -1;
}
if (g_rectangle.height < 0) {
g_rectangle.y += g_rectangle.height;
g_rectangle.height *= -1;
}
DrawRectangle(img, g_rectangle);
break;
}
}
}
//绘制函数
void DrawRectangle(cv::Mat& img, cv::Rect box)
{
//随机颜色
rectangle(img, box.tl(), box.br(), Scalar(g_rng.uniform(0, 255), g_rng.uniform(0, 255), g_rng.uniform(0, 255)));
}
*/
/*Mat数据结构
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
//使用Mat构造函数
Mat A(2, 2, CV_8UC3, Scalar(0, 0, 255));
cout << A << endl;
//在C/C++中通过构造函数初始化
//不能通过<< 输出
int sz[3] = { 2, 2, 2 };
Mat B(3, sz, CV_8UC(1), Scalar::all(0));
Mat r = Mat(10, 3, CV_8UC3);
randu(r, Scalar::all(0), Scalar::all(255));
cout << "r = " << endl << r << endl;
system("pause");
return 0;
}*/
/*绘制图像
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
#define WINDOW_WIDTH 600
#define WINDOW_NAME1 "【绘制图1】"
#define WINDOW_NAME2 "【绘制图2】"
//绘制不同角度相同尺寸的椭圆
void DrawEllipse(Mat img, double angle)
{
int thickness = 2; //线宽
int lineType = 8; //8联通线型
ellipse(img,
Point(WINDOW_WIDTH / 2, WINDOW_WIDTH / 2), //椭圆中心点
Size(WINDOW_WIDTH / 4, WINDOW_WIDTH / 16), //大小
angle, //旋转角度
0, 360, //扩展弧度0°- 360°
Scalar(255, 129, 0), //图形颜色
thickness,
lineType
);
}
//绘制实心圆
void DrawFilledCircle(Mat img, Point center)
{
int thickness = -1;
int lineType = 8;
circle(img,
center,
WINDOW_WIDTH / 32,
Scalar(0, 0, 255),
thickness,
lineType
);
}
//实现凹多边形的绘制
void DrawPolygon(Mat img)
{
int lineType = 8;
//创建一些点
Point rookPoint[1][20];
rookPoint[0][0] = Point(WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH);
rookPoint[0][1] = Point(3 * WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH / 8);
rookPoint[0][2] = Point(3 * WINDOW_WIDTH / 4, 13 * WINDOW_WIDTH / 16);
rookPoint[0][3] = Point(11 * WINDOW_WIDTH / 16, 13 * WINDOW_WIDTH / 16);
rookPoint[0][4] = Point(19 * WINDOW_WIDTH / 32, 3 * WINDOW_WIDTH / 8);
rookPoint[0][5] = Point(3 * WINDOW_WIDTH / 4, 3 * WINDOW_WIDTH / 8);
rookPoint[0][6] = Point(3 * WINDOW_WIDTH / 4, WINDOW_WIDTH / 8);
rookPoint[0][7] = Point(26 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
rookPoint[0][8] = Point(26 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
rookPoint[0][9] = Point(22 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
rookPoint[0][10] = Point(22 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
rookPoint[0][11] = Point(18 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
rookPoint[0][12] = Point(18 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
rookPoint[0][13] = Point(14 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
rookPoint[0][14] = Point(14 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
rookPoint[0][15] = Point(WINDOW_WIDTH / 4, WINDOW_WIDTH / 8);
rookPoint[0][16] = Point(WINDOW_WIDTH / 4, 3 * WINDOW_WIDTH / 8);
rookPoint[0][17] = Point(13 * WINDOW_WIDTH / 32, 3 * WINDOW_WIDTH / 8);
rookPoint[0][18] = Point(5 * WINDOW_WIDTH / 16, 13 * WINDOW_WIDTH / 16);
rookPoint[0][19] = Point(WINDOW_WIDTH / 4, 13 * WINDOW_WIDTH / 16);
const Point* ppt[1] = { rookPoint[0] };
int npt[] = { 20 };
fillPoly(img,
ppt, //多边形顶点集合
npt, //多边形顶点数目
1, //绘制的多边形数量
Scalar(255,255,255), //白色
lineType
);
}
//实现线的绘制
void DrawLine(Mat img, Point start, Point end)
{
int thickness = 2;
int lineType = 8;
line(img,
start,
end,
Scalar(0, 0, 0),
thickness,
lineType
);
}
int main()
{
//Mat img;// = Mat::zeros(WINDOW_WIDTH, WINDOW_WIDTH, CV_8UC3);
//img.create(WINDOW_WIDTH, WINDOW_WIDTH, CV_8UC3);
//DrawEllipse(img, 90);
//imshow("椭圆",img);
//waitKey(0);
Mat atomImage = Mat::zeros(WINDOW_WIDTH, WINDOW_WIDTH, CV_8UC3);
Mat rookImage = Mat::zeros(WINDOW_WIDTH, WINDOW_WIDTH, CV_8UC3);
//绘制原子图
//绘制椭圆
DrawEllipse(atomImage, 0);
DrawEllipse(atomImage, 45);
DrawEllipse(atomImage, 90);
DrawEllipse(atomImage, 135);
//绘制圆心
DrawFilledCircle(atomImage, Point(WINDOW_WIDTH / 2, WINDOW_WIDTH / 2));
//绘制组合图
//先绘制多边形
DrawPolygon(rookImage);
//绘制矩形
rectangle(rookImage,
Point(0, 7 * WINDOW_WIDTH / 8),
Point(WINDOW_WIDTH, WINDOW_WIDTH),
Scalar(0, 255, 255),
-1,
8
);
//绘制线段
DrawLine(rookImage, Point(0, 15 * WINDOW_WIDTH / 16), Point(WINDOW_WIDTH, 15*WINDOW_WIDTH / 16));
DrawLine(rookImage, Point(WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH / 8), Point(WINDOW_WIDTH / 4, WINDOW_WIDTH));
DrawLine(rookImage, Point(WINDOW_WIDTH / 2, 7 * WINDOW_WIDTH / 8), Point(WINDOW_WIDTH / 2, WINDOW_WIDTH));
DrawLine(rookImage, Point(3 * WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH / 8), Point(3 * WINDOW_WIDTH / 4, WINDOW_WIDTH));
//显示绘制的图像
imshow(WINDOW_NAME1, atomImage);
moveWindow(WINDOW_NAME1, 200, 200);//指定窗口出现的位置
imshow(WINDOW_NAME2, rookImage);
moveWindow(WINDOW_NAME2, 200 + WINDOW_WIDTH, 200);
waitKey(0);
return 0;
}*/
/* //访问像素的三种方法
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void colorReduce(Mat& inputImage, Mat& outputImage, int div);
int main()
{
//创建原始图像并显示
Mat srcImage = imread("gao.jpg");
imshow("原始图像", srcImage);
//创建效果图
Mat dstImage;
dstImage.create(srcImage.rows, srcImage.cols, srcImage.type());
//记录起始时间
double time0 = static_cast<double>(getTickCount());
//调用颜色空间缩减函数
colorReduce(srcImage, dstImage, 64);
time0 = ((double)getTickCount() - time0) / getTickFrequency();
cout << "此方法所用时间:" << time0 << endl;
imshow("【效果图】", dstImage);
waitKey(0);
return 0;
}
//通过指针访问 此方法所用时间:0.00159674
void colorReduce(Mat& inputImage, Mat& outputImage, int div)
{
outputImage = inputImage.clone();
int rowNumber = inputImage.rows;
int colNumber = inputImage.cols * inputImage.channels();
for (int i = 0; i < rowNumber; i++) {
//ptr模板函数 返回第i行地址
uchar* data = outputImage.ptr<uchar>(i); //获取第i行的首地址
for (int j = 0; j < colNumber; j++) {
data[j] = data[j] / div * div + div / 2;
}
}
}
//通过迭代器操作像素 此方法所用时间:0.0666549
void colorReduce(Mat& inputImage, Mat& outputImage, int div)
{
outputImage = inputImage.clone();
Mat_<Vec3b>::iterator it = outputImage.begin<Vec3b>();
Mat_<Vec3b>::iterator itend = outputImage.end<Vec3b>();
for (; it != itend; it++) {
(*it)[0] = (*it)[0] / div * div + div / 2;
(*it)[1] = (*it)[1] / div * div + div / 2;
(*it)[2] = (*it)[2] / div * div + div / 2;
}
}
// 通过动态地址操作像素 此方法所用时间:0.0608454
void colorReduce(Mat& inputImage, Mat& outputImage, int div)
{
outputImage = inputImage.clone();
int rowNumber = inputImage.rows;
int colNumber = inputImage.cols;
for (int i = 0; i < rowNumber; i++) {
for (int j = 0; j < colNumber; j++) {
outputImage.at<Vec3b>(i, j)[0] = outputImage.at<Vec3b>(i, j)[0] / div * div + div / 2;
outputImage.at<Vec3b>(i, j)[1] = outputImage.at<Vec3b>(i, j)[1] / div * div + div / 2;
outputImage.at<Vec3b>(i, j)[2] = outputImage.at<Vec3b>(i, j)[2] / div * div + div / 2;
}
}
}
*/
/*区域线性混合
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
using namespace cv;
int main()
{
Mat srcImage = imread("gao.jpg");
Mat logo = imread("logo.jpg");
Mat imageROI = srcImage(Rect(20, 20, logo.cols, logo.rows));
//Mat mask = imread("logo.jpg", 0);
//logo.copyTo(imageROI, mask); //logo中的mask复制到imageROI
addWeighted(imageROI, 0.5, logo, 0.5, 0, imageROI);
namedWindow("【效果图】");
imshow("【效果图】", srcImage);
waitKey(0);
return 0;
}*/
/*多通道混合
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <vector>
using namespace cv;
using namespace std;
void MulitChannelBlending()
{
Mat srcImage;
Mat logoImage;
vector<Mat>channels;
srcImage = imread("gao.jpg");
logoImage = imread("logo.jpg",0);
split(srcImage, channels);
Mat imageBlueChannel = channels.at(0);
addWeighted(imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)), 0.5, logoImage, 0.5,
0, imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)));
merge(channels, srcImage);
namedWindow("【蓝色通道混合图】");
imshow("【蓝色通道混合图】", srcImage);
//重新读入
srcImage = imread("gao.jpg");
logoImage = imread("logo.jpg", 0);
split(srcImage, channels);
imageBlueChannel = channels.at(1);
addWeighted(imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)), 0.5, logoImage, 0.5,
0, imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)));
merge(channels, srcImage);
namedWindow("【绿色通道混合图】");
imshow("【绿色通道混合图】", srcImage);
//
srcImage = imread("gao.jpg");
logoImage = imread("logo.jpg", 0);
split(srcImage, channels);
imageBlueChannel = channels.at(2);
addWeighted(imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)), 0.5, logoImage, 0.5,
0, imageBlueChannel(Rect(20, 20, logoImage.rows, logoImage.cols)));
merge(channels, srcImage);
namedWindow("【红色通道混合图】");
imshow("【红色通道混合图】", srcImage);
waitKey(0);
}
int main()
{
MulitChannelBlending();
return 0;
}*/
/*//图像亮度、对比度调整
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat srcImage = imread("gao.jpg");
//设定亮度和对比度
int g_nBrightValue = 80;
int g_nContrastValue = 80;
void on_ContrastAndBright(int, void*)
{
namedWindow("【原始图窗口】",1);
Mat dstImage = Mat::zeros(srcImage.size(), srcImage.type());
for (int i = 0; i < srcImage.rows; i++) {
for (int j = 0; j < srcImage.cols; j++) {
for (int k = 0; k < 3; k++) {
dstImage.at<Vec3b>(i, j)[k] =
saturate_cast<uchar> ((g_nContrastValue * 0.01) * srcImage.at<Vec3b>(i, j)[k] + g_nBrightValue);
}
}
}
imshow("【原始图窗口】", srcImage);
imshow("【效果图窗口】", dstImage);
}
int main()
{
namedWindow("【效果图窗口】",1);
//创建轨迹条
createTrackbar("对比度", "【效果图窗口】", &g_nContrastValue, 300, on_ContrastAndBright);
createTrackbar("亮度", "【效果图窗口】", &g_nBrightValue, 200, on_ContrastAndBright);
on_ContrastAndBright(g_nContrastValue, 0);
on_ContrastAndBright(g_nBrightValue, 0);
while (char(waitKey(1)) != 'q') {}
return 0;
}*/
/*
//离散傅里叶变换
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core.hpp>
using namespace std;
using namespace cv;
int main()
{
//读入灰度图像
Mat srcImage = imread("gao.jpg", 0);
if (!srcImage.data) { printf("读取图像错误!"); return false; }
imshow("【原始图】", srcImage);
//将输入的图像延扩到最佳尺寸
int m = getOptimalDFTSize(srcImage.rows);
int n = getOptimalDFTSize(srcImage.cols);
//将添加的像素初始化为0 上方和左方不做填充处理
Mat padded;
copyMakeBorder(srcImage, padded, 0, m - srcImage.rows, 0, n - srcImage.cols, BORDER_CONSTANT, Scalar::all(0));
//为傅里叶变换的结果(实部和虚部)分配存储空间
//将planes数组组合合并成一个多通道的数组complexI
Mat planes[] = { Mat_<float>(padded),Mat::zeros(padded.size(),CV_32F) };
Mat complexI;
//第二个参数代表输入矩阵的个数
merge(planes, 2, complexI);
//进行离散傅里叶变换
dft(complexI, complexI);
//将复数转换为幅值 即log(1+sqrt(Re(DFT(I))^2+Im(DFT(I))^2))
//planes[0]实部 planes[1]虚部
split(complexI, planes);
//计算幅值
magnitude(planes[0], planes[1], planes[0]);
Mat magnitudeImage = planes[0];
//进行对数尺度缩放
magnitudeImage += Scalar::all(1);
log(magnitudeImage, magnitudeImage);//求自然对数
//之所以要进行对数转换是因为傅里叶变换后的结果对于在显示器显示来讲范围比较大,
//这样的话对于一些小的变化或者是高的变换值不能进行观察。因此高的变化值将会转变成白点,
//而较小的变化值则会变成黑点。为了能够获得可视化的效果,可以利用灰度值将我们的
//线性尺度(linear scale)转变为对数尺度(logarithmic scale),其计算公式:M=log(1+M)
//剪切和重分布幅度图象限
//若有奇数行或奇数列,进行频谱裁剪
magnitudeImage = magnitudeImage(Rect(0, 0, magnitudeImage.cols&-2, magnitudeImage.rows&-2));
//重新排列傅里叶图像中的象限,使得原点位于图像中心
int cx = magnitudeImage.cols / 2;
int cy = magnitudeImage.rows / 2;
Mat q0(magnitudeImage, Rect(0, 0, cx, cy));//ROI区域的左上
Mat q1(magnitudeImage, Rect(cx, 0, cx, cy));//ROI区域的右上
Mat q2(magnitudeImage, Rect(0, cy, cx, cy));//ROI区域的左下
Mat q3(magnitudeImage, Rect(cx, cy, cx, cy));//ROI区域的右下
//交换象限(左上右下交换)
Mat tmp;
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
//交换象限(右上与左下交换)
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
//归一化 用0-1之间的浮点值将矩阵变换为可视的图像格式
normalize(magnitudeImage,magnitudeImage,0,1,NORM_MINMAX);
//显示效果图
imshow("【效果图】", magnitudeImage);
waitKey(0);
return 0;
}
*/
/*//写入XML文件
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <time.h>
using namespace std;
using namespace cv;
int main()
{
//初始化
FileStorage fs("test.yaml", FileStorage::WRITE);
//开始文件写入
fs << "frameCount" << 5;
//time_t rawtime; time(&rawtime);
//右击工程-->属性-->配置属性 --> C/C++ --> 命令行-->输入"/D _CRT_SECURE_NO_WARNINGS"-->"确定"
//fs << "calibrationDate" << asctime(localtime(&rawtime));
Mat cameraMatrix = (Mat_<double>(3, 3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1);
Mat distCoeffs = (Mat_<double>(5, 1) << 0.1, 0.01, -0.001, 0, 0);
fs << "cameraMatrix" << cameraMatrix << "distCoeffs" << distCoeffs;
fs << "features" << "[";
for (int i = 0; i < 3; i++) {
int x = rand() % 640;
int y = rand() % 480;
uchar lbp = rand() % 256;
fs << "{:" << "x" << x << "y" << y << "lbp" << "[:";
for (int j = 0; j < 8; j++) {
fs << ((lbp >> j) & 1);
}
fs << "]" << "}";
}
fs << "]";
fs.release();
printf("文件读写完毕\n");
getchar();
return 0;
}*/
/*
//滤波操作
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat srcImage = imread("gao.jpg");
namedWindow("【原始图】");
imshow("【原始图】",srcImage);
Mat dstImage;
boxFilter(srcImage, dstImage, -1, Size(3, 3));
namedWindow("【方框滤波效果图】");
imshow("【方框滤波效果图】", dstImage);
blur(srcImage, dstImage, Size(3, 3));
namedWindow("【均值滤波效果图】");
imshow("【均值滤波效果图】", dstImage);
GaussianBlur(srcImage, dstImage, Size(3, 3), 0, 0);
namedWindow("【高斯滤波效果图】");
imshow("【高斯滤波效果图】",dstImage);
waitKey(0);
return 0;
}*/
/*
//通过滑动条控制三种线性滤波操作
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
using namespace std;
using namespace cv;
int g_nboxFilterValue = 3;
int g_nGaussianBlurValue = 3;
int g_nMeanBlurValue = 3;
Mat srcImage = imread("gao.jpg");
Mat dstImage;
void on_boxFilter(int, void*)
{
boxFilter(srcImage, dstImage, -1, Size(g_nboxFilterValue + 1, g_nboxFilterValue + 1));
imshow("【方框滤波效果图】", dstImage);
}
void on_GaussianBlur(int, void*)
{
GaussianBlur(srcImage, dstImage, Size(g_nGaussianBlurValue*2 + 1, g_nGaussianBlurValue*2 + 1), 0, 0);
imshow("【高斯滤波效果图】", dstImage);
}
void on_MeanBlur(int ,void*)
{
blur(srcImage, dstImage, Size(g_nMeanBlurValue + 1, g_nMeanBlurValue + 1),Point(-1,-1));
imshow("【均值滤波效果图】", dstImage);
}
int main()
{
namedWindow("【方框滤波效果图】",1);
createTrackbar("内核值", "【方框滤波效果图】", &g_nboxFilterValue, 40, on_boxFilter);
on_boxFilter(g_nboxFilterValue,0);
imshow("【方框滤波效果图】",dstImage);
namedWindow("【高斯滤波效果图】", 1);
createTrackbar("内核值","【高斯滤波效果图】", &g_nGaussianBlurValue, 40, on_GaussianBlur);
on_GaussianBlur(g_nGaussianBlurValue, 0);
imshow("【高斯滤波效果图】", dstImage);
namedWindow("【均值滤波效果图】", 1);
createTrackbar("内核值", "【均值滤波效果图】", &g_nMeanBlurValue, 40, on_MeanBlur);
on_MeanBlur(g_nMeanBlurValue, 0);
imshow("【均值滤波效果图】", dstImage);
waitKey(0);
return 0;
}*/
上一篇: SLAM十四讲学习历程