视觉SLAM理论与实践进阶-手写VIO-5
程序员文章站
2022-04-18 12:45:27
...
视觉SLAM理论与实践进阶-手写VIO-5
一、单目BA求解器
1) Problem::MakeHessian()代码片段
void Problem::MakeHessian() {
TicToc t_h;
// 直接构造大的 H 矩阵
ulong size = ordering_generic_;
MatXX H(MatXX::Zero(size, size));
VecX b(VecX::Zero(size));
for (auto &edge: edges_) {
edge.second->ComputeResidual();
edge.second->ComputeJacobians();
auto jacobians = edge.second->Jacobians();
auto verticies = edge.second->Verticies();
assert(jacobians.size() == verticies.size());
for (size_t i = 0; i < verticies.size(); ++i) {
auto v_i = verticies[i];
if (v_i->IsFixed()) continue; // Hessian 里不需要添加它的信息,也就是它的雅克比为 0
auto jacobian_i = jacobians[i];
ulong index_i = v_i->OrderingId();
ulong dim_i = v_i->LocalDimension();
MatXX JtW = jacobian_i.transpose() * edge.second->Information();
for (size_t j = i; j < verticies.size(); ++j) {
auto v_j = verticies[j];
if (v_j->IsFixed()) continue;
auto jacobian_j = jacobians[j];
ulong index_j = v_j->OrderingId();
ulong dim_j = v_j->LocalDimension();
assert(v_j->OrderingId() != -1);
MatXX hessian = JtW * jacobian_j;
// 所有的信息矩阵叠加起来
H.block(index_i,index_j, dim_i, dim_j).noalias() += hessian;
if (j != i) {
// 对称的下三角
H.block(index_j,index_i, dim_j, dim_i).noalias() += hessian.transpose();
}
}
b.segment(index_i, dim_i).noalias() -= JtW * edge.second->Residual();
}
}
Hessian_ = H;
b_ = b;
t_hessian_cost_ += t_h.toc();
// Eigen::JacobiSVD<Eigen::MatrixXd> svd(H, Eigen::ComputeThinU | Eigen::ComputeThinV);
// std::cout << svd.singularValues() <<std::endl;
if (err_prior_.rows() > 0) {
b_prior_ -= H_prior_ * delta_x_.head(ordering_poses_); // update the error_prior
}
Hessian_.topLeftCorner(ordering_poses_, ordering_poses_) += H_prior_;
b_.head(ordering_poses_) += b_prior_;
delta_x_ = VecX::Zero(size); // initial delta_x = 0_n;
}
2)SolveLinearSystem()部分代码如下:
void Problem::SolveLinearSystem() {
if (problemType_ == ProblemType::GENERIC_PROBLEM) {
// 非 SLAM 问题直接求解
// PCG solver
MatXX H = Hessian_;
for (ulong i = 0; i < Hessian_.cols(); ++i) {
H(i, i) += currentLambda_;
}
// delta_x_ = PCGSolver(H, b_, H.rows() * 2);
delta_x_ = Hessian_.inverse() * b_;
} else {
// SLAM 问题采用舒尔补的计算方式
// step1: schur marginalization --> Hpp, bpp
int reserve_size = ordering_poses_;
int marg_size = ordering_landmarks_;
MatXX Hmm = Hessian_.block(reserve_size, reserve_size, marg_size, marg_size);
MatXX Hpm = Hessian_.block(0, reserve_size, reserve_size, marg_size);
MatXX Hmp = Hessian_.block(reserve_size, 0, marg_size, reserve_size);
VecX bpp = b_.segment(0, reserve_size);
VecX bmm = b_.segment(reserve_size, marg_size);
// Hmm 是对角线矩阵,它的求逆可以直接为对角线块分别求逆,如果是逆深度,对角线块为1维的,则直接为对角线的倒数,这里可以加速
MatXX Hmm_inv(MatXX::Zero(marg_size, marg_size));
for (auto landmarkVertex : idx_landmark_vertices_) {
int idx = landmarkVertex.second->OrderingId() - reserve_size;
int size = landmarkVertex.second->LocalDimension();
Hmm_inv.block(idx, idx, size, size) = Hmm.block(idx, idx, size, size).inverse();
}
MatXX tempH = Hpm * Hmm_inv;
H_pp_schur_ = Hessian_.block(0, 0, ordering_poses_, ordering_poses_) - tempH * Hmp;
b_pp_schur_ = bpp - tempH * bmm;
// step2: solve Hpp * delta_x = bpp
VecX delta_x_pp(VecX::Zero(reserve_size));
// PCG Solver
for (ulong i = 0; i < ordering_poses_; ++i) {
H_pp_schur_(i, i) += currentLambda_;
}
int n = H_pp_schur_.rows() * 2; // 迭代次数
delta_x_pp = PCGSolver(H_pp_schur_, b_pp_schur_, n);
delta_x_.head(reserve_size) = delta_x_pp;
VecX delta_x_ll(marg_size);
delta_x_ll = Hmm_inv * (bmm - Hmp * delta_x_pp);
delta_x_.tail(marg_size) = delta_x_ll;
}
}
二、滑动窗口算法测试
1) Problem::TestMarginalize() 代码片段如下
void Problem::TestMarginalize() {
// Add marg test
int idx = 1; // marg 中间那个变量
int dim = 1; // marg 变量的维度
int reserve_size = 3; // 总共变量的维度
double delta1 = 0.1 * 0.1;
double delta2 = 0.2 * 0.2;
double delta3 = 0.3 * 0.3;
int cols = 3;
MatXX H_marg(MatXX::Zero(cols, cols));
H_marg << 1./delta1, -1./delta1, 0,
-1./delta1, 1./delta1 + 1./delta2 + 1./delta3, -1./delta3,
0., -1./delta3, 1/delta3;
std::cout << "---------- TEST Marg: before marg------------"<< std::endl;
std::cout << H_marg << std::endl;
// TODO:: home work. 将变量移动到右下角
/// 准备工作: move the marg pose to the Hmm bottown right
// 将 row i 移动矩阵最下面
Eigen::MatrixXd temp_rows = H_marg.block(idx, 0, dim, reserve_size);
Eigen::MatrixXd temp_botRows = H_marg.block(idx + dim, 0, reserve_size - idx - dim, reserve_size);
H_marg.block(idx, 0, reserve_size - idx - dim, reserve_size) = temp_botRows;
H_marg.block(reserve_size - dim, 0, dim, reserve_size) = temp_rows;
// 将 col i 移动矩阵最右边
Eigen::MatrixXd temp_cols = H_marg.block(0, idx, reserve_size, dim);
Eigen::MatrixXd temp_rightCols = H_marg.block(0, idx + dim, reserve_size, reserve_size - idx - dim);
H_marg.block(0, idx, reserve_size, reserve_size - idx - dim) = temp_rightCols;
H_marg.block(0, reserve_size - dim, reserve_size, dim) = temp_cols;
std::cout << "---------- TEST Marg: 将变量移动到右下角------------"<< std::endl;
std::cout<< H_marg <<std::endl;
/// 开始 marg : schur
double eps = 1e-8;
int m2 = dim;
int n2 = reserve_size - dim; // 剩余变量的维度
Eigen::MatrixXd Amm = 0.5 * (H_marg.block(n2, n2, m2, m2) + H_marg.block(n2, n2, m2, m2).transpose());
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> saes(Amm);
Eigen::MatrixXd Amm_inv = saes.eigenvectors() * Eigen::VectorXd(
(saes.eigenvalues().array() > eps).select(saes.eigenvalues().array().inverse(), 0)).asDiagonal() *
saes.eigenvectors().transpose();
Eigen::MatrixXd Arm = H_marg.block(0, n2, n2, m2);
Eigen::MatrixXd Amr = H_marg.block(n2, 0, m2, n2);
Eigen::MatrixXd Arr = H_marg.block(0, 0, n2, n2);
Eigen::MatrixXd tempB = Arm * Amm_inv;
Eigen::MatrixXd H_prior = Arr - tempB * Amr;
std::cout << "---------- TEST Marg: after marg------------"<< std::endl;
std::cout << H_prior << std::endl;
}
终端编译之后结果显示如下:
友情提示:代码下载需要C币,请事先判断是否对您有帮助,谨慎下载哦!!!