opencv3/C++ 机器学习-逻辑回归/Logistic Regression
程序员文章站
2022-07-14 14:38:22
...
逻辑回归/Logistic Regression
逻辑回归是一种与支持向量机(SVM)密切相关的二分类算法。与支持向量机一样,逻辑回归可以扩展为多类分类问题。OpenCV中逻辑回归支持二元和多类分类(创建了多个2类分类)。训练逻辑回归分类器可使用批量梯度下降法或小批量梯度下降法。 在OpenCV中逻辑回归通过cv::ml::LogisticRegression类实现。
在逻辑回归中,我们通过优化训练参数θ,使假设。我们有和作为logistic 或sigmoid函数。 对于类别0和1的二元分类问题的给定数据,如果或,则可以确定给定数据实例属于类别1或0。
在cv::ml::LogisticRegression中,选择正确的参数对于减少训练误差和确保高训练精度至关重要。其中,迭代次数和学习率决定了到达一个可能的解决方案的速度,正则化用于补偿过度拟合。
学习率:学习率可以通过setLearningRate设置,决定了接近解决方案的速度。
迭代次数:优化算法的迭代次数可以通过setIterations设置,即所采取步骤的数量。
训练方法:训练方法通过setTrainMethod指定。逻辑回归提供的训练方法有:
LogisticRegression::BATCH //批量梯度下降法
LogisticRegression::MINI_BATCH //小批量梯度下降法
若训练方法设置为MINI_BATCH,则小批量的大小为setMiniBatchSize设置的正整数。
正则化:正则化类型可通过setRegularization设置。可选类型有:
LogisticRegression::REG_DISABLE //此时禁用正规化
LogisticRegression::REG_L1 //L1正规化
LogisticRegression::REG_L2 //L2正规化
逻辑回归/Logistic Regression示例
opencv3.1.0例程logistic_regression.cpp:
逻辑回归(logistic regression)手写数字0和1分类
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
using namespace cv::ml;
static void showImage(const Mat &data, int columns, const String &name)
{
Mat bigImage;
for(int i = 0; i < data.rows; ++i)
{
bigImage.push_back(data.row(i).reshape(0, columns));
}
imshow(name, bigImage.t());
}
static float calculateAccuracyPercent(const Mat &original, const Mat &predicted)
{
return 100 * (float)countNonZero(original == predicted) / predicted.rows;
}
int main()
{
const String filename = "E:/image/image/data/data01.xml";
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;
cout << "Each of the collected images are of size 28 x 28 re-arranged to 1 x 784 matrix"
<< endl;
cout << "**********************************************************************" << endl;
Mat data, labels;
{
cout << "loading the dataset...";
FileStorage f;
if(f.open(filename, FileStorage::READ))
{
f["datamat"] >> data;
f["labelsmat"] >> labels;
f.release();
}
else
{
cerr << "file can not be opened: " << filename << endl;
return 1;
}
data.convertTo(data, CV_32F);
labels.convertTo(labels, CV_32F);
cout << "read " << data.rows << " rows of data" << endl;
}
Mat data_train, data_test;
Mat labels_train, labels_test;
for(int i = 0; i < data.rows; i++)
{
if(i % 2 == 0)
{
data_train.push_back(data.row(i));
labels_train.push_back(labels.row(i));
}
else
{
data_test.push_back(data.row(i));
labels_test.push_back(labels.row(i));
}
}
cout << "training/testing samples count: " << data_train.rows << "/" << data_test.rows << endl;
// 显示单个图像
showImage(data_train, 28, "train data");
showImage(data_test, 28, "test data");
// 简单的情况与批梯度
cout << "training...";
// 创建模型
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
// 学习率
lr1->setLearningRate(0.001);
// 迭代次数
lr1->setIterations(10);
// L2正则化
lr1->setRegularization(LogisticRegression::REG_L2);
// 训练方法
lr1->setTrainMethod(LogisticRegression::BATCH);
// 小批量的大小
lr1->setMiniBatchSize(1);
// 训练
lr1->train(data_train, ROW_SAMPLE, labels_train);
cout << "done!" << endl;
// 预测
cout << "predicting...";
Mat responses;
lr1->predict(data_test, responses);
cout << "done!" << endl;
// 显示预测报告
cout << "original vs predicted:" << endl;
labels_test.convertTo(labels_test, CV_32S);
cout << labels_test.t() << endl;
cout << responses.t() << endl;
cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses) << "%" << endl;
// 保存分类器
const String saveFilename = "NewLR_Trained.xml";
cout << "saving the classifier to " << saveFilename << endl;
lr1->save(saveFilename);
// 将分类器加载到新对象上
cout << "loading a new classifier from " << saveFilename << endl;
Ptr<LogisticRegression> lr2 = StatModel::load<LogisticRegression>(saveFilename);
// 预测使用加载的分类器
cout << "predicting the dataset using the loaded classfier...";
Mat responses2;
lr2->predict(data_test, responses2);
cout << "done!" << endl;
// 计算准确度
cout << labels_test.t() << endl;
cout << responses2.t() << endl;
cout << "accuracy: " << calculateAccuracyPercent(labels_test, responses2) << "%" << endl;
waitKey(0);
return 0;
}