欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

【SLAM】VINS-MONO解析——后端优化(代码部分)

程序员文章站 2022-05-29 09:29:21
...

7.2 代码

在estimator.cpp的processImage()的最后,代码如下:

    else//solver_flag = NON_LINEAR,进行非线性优化
    {
        solveOdometry();  //<--1
        //边界判断:检测系统运行是否失败,若失败则重置估计器
        if (failureDetection()) //<--2
        {
            ROS_WARN("failure detection!");
            failure_occur = 1;
            clearState();
            setParameter();
            return;
        }

        slideWindow();//执行窗口滑动函数slideWindow();//<--3
        f_manager.removeFailures();//去除估计失败的点并发布关键点位置//<--4
        // prepare output of VINS
        key_poses.clear();//<--5
        for (int i = 0; i <= WINDOW_SIZE; i++)
            key_poses.push_back(Ps[i]);

        last_R = Rs[WINDOW_SIZE]; //<--6
        last_P = Ps[WINDOW_SIZE];
        last_R0 = Rs[0];
        last_P0 = Ps[0];
    }
}


在主流程中,可以发现干了6件事,其中1,3是最关键的2个步骤。而optimization()恰好在solveOdometry()里,所以可以发现,代码是先优化,后marg。

7.2.1 solveOdometry()和optimization() (核心!)

void Estimator::solveOdometry()
{
    if (frame_count < WINDOW_SIZE)
        return;
    if (solver_flag == NON_LINEAR)
    {
        f_manager.triangulate(Ps, tic, ric);//获得最新特征的深度//<--1
        optimization();//<--2
    }
}

这块也干了2件事,第一件事就是根据当前的位姿三角化特征点,这个在初始化的时候也出现过一次,见6.4.3.。

主要看这个optimization()函数。
第一部分:
(1)非线性优化
a. 声明和引入鲁棒核函数

ceres::Problem problem;
ceres::LossFunction *loss_function;//1.引入鲁棒核函数
loss_function = new ceres::CauchyLoss(1.0);

b. 添加各种待优化量X——位姿优化量

  for (int i = 0; i < WINDOW_SIZE + 1; i++)//还包括最新的第11帧
    {
        ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
        problem.AddParameterBlock(para_Pose[i], 7, local_parameterization);
        problem.AddParameterBlock(para_SpeedBias[i], 9);

他把11帧的位姿都加上了。
这块出现了新数据结构para_Pose[i]和para_SpeedBias[i],这是因为ceres传入的都是double类型的,在vector2double()里初始化的。

c. 添加各种待优化量X——相机外参

for (int i = 0; i < NUM_OF_CAM; i++)//2.3.  7维、相机IMU外参
{
    ceres::LocalParameterization *local_parameterization = new PoseLocalParameterization();
    problem.AddParameterBlock(para_Ex_Pose[i], 6, local_parameterization);
    if (!ESTIMATE_EXTRINSIC)//如果IMU-相机外参不需要标定
        problem.SetParameterBlockConstant(para_Ex_Pose[i]);//这个变量固定为constant
}

d. 添加各种待优化量X——IMU-image时间同步误差

if (ESTIMATE_TD)//2.4.  1维,标定同步时间
{
    problem.AddParameterBlock(para_Td[0], 1);
}

待优化量的添加为什么没有视觉部分的待优化量(逆深度)?

e. vector2double()
给ParameterBlock赋值。

f. 添加各种残差——先验信残差

if (last_marginalization_info) 
{
    MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
    problem.AddResidualBlock(marginalization_factor, NULL,last_marginalization_parameter_blocks);
 }

这块出现了2个新的数据结构。在第一次执行这段代码的时候,没有先验信息,所以这段肯定是跳过的。当第二次执行的时候就有了。last_marginalization_info的赋值出现在后面的代码里。

数据结构: last_marginalization_info
这个数据结构定义在marginalization_factor.cpp里面,比较复杂。

class MarginalizationInfo
{
  public:
    ~MarginalizationInfo();
    int localSize(int size) const;
    int globalSize(int size) const;
    void addResidualBlockInfo(ResidualBlockInfo *residual_block_info);//添加参差块相关信息(优化变量,待marg的变量)
    void preMarginalize();//计算每个残差对应的雅克比,并更新parameter_block_data
    void marginalize();
    std::vector<double *> getParameterBlocks(std::unordered_map<long, double *> &addr_shift);

    std::vector<ResidualBlockInfo *> factors;//所有观测项
    int m, n;//m为要边缘化的变量个数,n为要保留下来的变量个数 m表示需marg变量/parameter_block_idx的总localSize n表示需保留变量的总localSize
    std::unordered_map<long, int> parameter_block_size; //global size//<优化变量内存地址,localSize>//为每个优化变量块的变量的大小,以IMU为例,是[7,9,7,9]
    int sum_block_size;
    std::unordered_map<long, int> parameter_block_idx; //local size //<优化变量内存地址,在矩阵中的id>//被merge掉的变量
    std::unordered_map<long, double *> parameter_block_data;//<优化变量内存地址,数据>//每个优化块数据
    //他们的key都同一是long类型的内存地址,而value分别是,各个优化变量的长度,各个优化变量在id以各个优化变量对应的double指针类型的数据。
    std::vector<int> keep_block_size; //global size
    std::vector<int> keep_block_idx;  //local size
    std::vector<double *> keep_block_data;
    //他们是进行边缘化之后保留下来的各个优化变量的长度,各个优化变量在id以各个优化变量对应的double指针类型的数据
    Eigen::MatrixXd linearized_jacobians;//分别指的是边缘化之后从信息矩阵恢复出来雅克比矩阵和残差向量
    Eigen::VectorXd linearized_residuals;
    const double eps = 1e-8;
};                                                                                                                

另一个数据结构就是last_marginalization_parameter_blocks。它指的是先验矩阵对应的状态量X。

数据结构: last_marginalization_parameter_blocks
    vector<double *> last_marginalization_parameter_blocks;//被边缘化留下的先验信息
它的赋值是在,vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);

g. 添加各种残差——IMU残差

for (int i = 0; i < WINDOW_SIZE; i++)
{
    int j = i + 1;
    if (pre_integrations[j]->sum_dt > 10.0)
        continue;
    IMUFactor* imu_factor = new IMUFactor(pre_integrations[j]);
    problem.AddResidualBlock(imu_factor, NULL, para_Pose[i], para_SpeedBias[i], para_Pose[j], para_SpeedBias[j]);

h. 添加各种残差——重投影残差
这里需要再次注意一点,IMU的残差是相邻两帧,但是视觉不是的,见7.1.2-(2)。
分析一下代码,它加入的2帧,这两帧是观测到同一特征的最近两帧。

int f_m_cnt = 0;//统计有多少个特征用于非线性优化
int feature_index = -1;
for (auto &it_per_id : f_manager.feature)
{//遍历每一个特征
    it_per_id.used_num = it_per_id.feature_per_frame.size();
    if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
        continue;//必须满足出现2次以上且在倒数第二帧之前出现过
 
    ++feature_index;//统计有效特征数量          
    //!得到观测到该特征点的首帧
    int imu_i = it_per_id.start_frame, imu_j = imu_i - 1;
    //!得到首帧观测到的特征点的归一化相机坐标
    Vector3d pts_i = it_per_id.feature_per_frame[0].point;

    for (auto &it_per_frame : it_per_id.feature_per_frame)
    {//遍历当前特征在每一帧的信息 
        imu_j++;
        if (imu_i == imu_j)         
            continue;
        Vector3d pts_j = it_per_frame.point;//!得到第二个特征点
        if (ESTIMATE_TD)//在有同步误差的情况下
        {
                ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td, t_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());

                problem.AddResidualBlock(f_td, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]);
        }
        else//在没有同步误差的情况下
        {
            ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);
            problem.AddResidualBlock(f, loss_function, para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]);
        }
        f_m_cnt++;
    }
}

h. 添加各种残差——回环检测

i.求解

ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_SCHUR;
options.trust_region_strategy_type = ceres::DOGLEG;
options.max_num_iterations = NUM_ITERATIONS;
if (marginalization_flag == MARGIN_OLD)
    options.max_solver_time_in_seconds = SOLVER_TIME * 4.0 / 5.0;
else
    options.max_solver_time_in_seconds = SOLVER_TIME;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);

j. double2vector()
这个地方有2个值需要注意,就是:

Vector3d origin_R0 = Utility::R2ypr(Rs[0]);
Vector3d origin_P0 = Ps[0];

这块固定了先验信息。

至此,第一部分完结。我目前没有想通为什么路标点逆深度没有加入优化。

(2)边缘化
第二部分,边缘化。这一部分,只边缘化,不求解,求解留给下一轮优化的第一部分来进行。这部分是非常难懂的地方了。

k.marg_old
1)首先,把上一轮残存的信息加进来:
很明显,我现在要marg了,要构造新的先验H矩阵,那么要把之前的老先验的遗留信息加进来。

if (marginalization_flag == MARGIN_OLD)
{
    MarginalizationInfo *marginalization_info = new MarginalizationInfo();
    vector2double();
    //! 先验误差会一直保存,而不是只使用一次
    //! 如果上一次边缘化的信息存在
    //! 要边缘化的参数块是 para_Pose[0] para_SpeedBias[0] 以及 para_Feature[feature_index](滑窗内的第feature_index个点的逆深度)
    if (last_marginalization_info)
    {
        vector<int> drop_set;
        for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
        {//查询last_marginalization_parameter_blocks中是首帧状态量的序号
            if (last_marginalization_parameter_blocks[i] == para_Pose[0] ||
                last_marginalization_parameter_blocks[i] == para_SpeedBias[0])
                drop_set.push_back(i);
        }
        // construct new marginlization_factor构造边缘化的的Factor
        MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);
        ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL, last_marginalization_parameter_blocks, drop_set);//添加上一次边缘化的参数块
        marginalization_info->addResidualBlockInfo(residual_block_info);
    }

这一段代码用drop_set把最老帧的先验信息干掉了。

2)然后,把这次要marg的IMU信息加进来:

if (pre_integrations[1]->sum_dt < 10.0)
{
    IMUFactor* imu_factor = new IMUFactor(pre_integrations[1]);

    ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(imu_factor, NULL,vector<double *>{para_Pose[0], para_SpeedBias[0], para_Pose[1], para_SpeedBias[1]}, vector<int>{0, 1});

    marginalization_info->addResidualBlockInfo(residual_block_info);
}

很明显,被marg掉的是第0帧信息。

3)然后,把这次要marg的视觉信息加进来:

{//添加视觉的先验,只添加起始帧是旧帧且观测次数大于2的Features
    int feature_index = -1;
    for (auto &it_per_id : f_manager.feature)//遍历滑窗内所有的Features
    {
        it_per_id.used_num = it_per_id.feature_per_frame.size();//该特征点被观测到的次数
        if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2))
            continue;//Feature的观测次数不小于2次,且起始帧不属于最后两帧

        ++feature_index;

        int imu_i = it_per_id.start_frame, imu_j = imu_i - 1;
        if (imu_i != 0)//只选择被边缘化的帧的Features
            continue;
        //得到该Feature在起始下的归一化坐标
        Vector3d pts_i = it_per_id.feature_per_frame[0].point;

        for (auto &it_per_frame : it_per_id.feature_per_frame)
        {
            imu_j++;
            if (imu_i == imu_j)//不需要起始观测帧
                continue;

            Vector3d pts_j = it_per_frame.point;
            if (ESTIMATE_TD)
            {
                ProjectionTdFactor *f_td = new ProjectionTdFactor(pts_i, pts_j, it_per_id.feature_per_frame[0].velocity, it_per_frame.velocity,it_per_id.feature_per_frame[0].cur_td, it_per_frame.cur_td, it_per_id.feature_per_frame[0].uv.y(), it_per_frame.uv.y());

                ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f_td, loss_function, vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index], para_Td[0]}, vector<int>{0, 3});

                marginalization_info->addResidualBlockInfo(residual_block_info);
            }
            else
            {
                ProjectionFactor *f = new ProjectionFactor(pts_i, pts_j);

                ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(f, loss_function, vector<double *>{para_Pose[imu_i], para_Pose[imu_j], para_Ex_Pose[0], para_Feature[feature_index]}, vector<int>{0, 3});

                marginalization_info->addResidualBlockInfo(residual_block_info);
            }
        }
    }
}

vins把第0帧看到的特征点全都扔了。

4) 将三个ResidualBlockInfo中的参数块综合到marginalization_info中
其中,计算所有ResidualBlock(残差项)的残差和雅克比,parameter_block_data是参数块的容器。

        marginalization_info->preMarginalize();//<--1

        marginalization_info->marginalize();//<--2
       
        std::unordered_map<long, double *> addr_shift; //<--3
        for (int i = 1; i <= WINDOW_SIZE; i++)
        {
            addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
            addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
        }
        for (int i = 0; i < NUM_OF_CAM; i++)//<--4
            addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
        if (ESTIMATE_TD) //<--5
        {
            addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
        }
        vector<double*> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);
//<--6
        if (last_marginalization_info) //<--7
            delete last_marginalization_info;
        last_marginalization_info = marginalization_info; //<--8
        last_marginalization_parameter_blocks = parameter_blocks;       
    }

至此,marg_old结束。

l.marg_new
如果第二最新帧不是关键帧的话,则把这帧的视觉测量舍弃掉(边缘化)而保留IMU测量值在滑动窗口中。(其他步骤和上一步骤相同)
else//如果第二最新帧不是关键帧的话,则把这帧的视觉测量舍弃掉(边缘化)而保留IMU测量值在滑动窗口中。(其他步骤和上一步骤相同

{
    if (last_marginalization_info &&
        std::count(std::begin(last_marginalization_parameter_blocks), std::end(last_marginalization_parameter_blocks), para_Pose[WINDOW_SIZE - 1]))
    {

        MarginalizationInfo *marginalization_info = new MarginalizationInfo();
        vector2double();
        if (last_marginalization_info)
        {
            vector<int> drop_set;
            for (int i = 0; i < static_cast<int>(last_marginalization_parameter_blocks.size()); i++)
            {
                if (last_marginalization_parameter_blocks[i] == para_Pose[WINDOW_SIZE - 1])
                    drop_set.push_back(i);
            }
            // construct new marginlization_factor
            MarginalizationFactor *marginalization_factor = new MarginalizationFactor(last_marginalization_info);

            ResidualBlockInfo *residual_block_info = new ResidualBlockInfo(marginalization_factor, NULL, last_marginalization_parameter_blocks, drop_set);

            marginalization_info->addResidualBlockInfo(residual_block_info);
        }

        marginalization_info->preMarginalize();
        marginalization_info->marginalize();
        
        std::unordered_map<long, double *> addr_shift;
        for (int i = 0; i <= WINDOW_SIZE; i++)
        {
            if (i == WINDOW_SIZE - 1)
                continue;
            else if (i == WINDOW_SIZE)
            {
                addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i - 1];
                addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i - 1];
            }
            else
            {
                addr_shift[reinterpret_cast<long>(para_Pose[i])] = para_Pose[i];
                addr_shift[reinterpret_cast<long>(para_SpeedBias[i])] = para_SpeedBias[i];
            }
        }
        for (int i = 0; i < NUM_OF_CAM; i++)
            addr_shift[reinterpret_cast<long>(para_Ex_Pose[i])] = para_Ex_Pose[i];
        if (ESTIMATE_TD)
        {
            addr_shift[reinterpret_cast<long>(para_Td[0])] = para_Td[0];
        }
        
        vector<double *> parameter_blocks = marginalization_info->getParameterBlocks(addr_shift);
        if (last_marginalization_info)
            delete last_marginalization_info;
        last_marginalization_info = marginalization_info;
        last_marginalization_parameter_blocks = parameter_blocks;           
    }
}

至此,optimization()结束。

7.2.2 failureDetection()

bool Estimator::failureDetection()
{
    if (f_manager.last_track_num < 2)
        //return true;

    if (Bas[WINDOW_SIZE].norm() > 2.5)
        return true;
    
    if (Bgs[WINDOW_SIZE].norm() > 1.0)
        return true;
    
    Vector3d tmp_P = Ps[WINDOW_SIZE];
    if ((tmp_P - last_P).norm() > 5)
        return true;
    
    if (abs(tmp_P.z() - last_P.z()) > 1)
        return true; 
    
    Matrix3d tmp_R = Rs[WINDOW_SIZE];
    Matrix3d delta_R = tmp_R.transpose() * last_R;
    Quaterniond delta_Q(delta_R);
    double delta_angle;
    delta_angle = acos(delta_Q.w()) * 2.0 / 3.14 * 180.0;
    if (delta_angle > 50)
        //return true;

    return false;
}

7.2.3 slideWindow()
见8.2.

7.2.4 f_manager.removeFailures()

void FeatureManager::removeFailures()
{
    for (auto it = feature.begin(), it_next = feature.begin();
         it != feature.end(); it = it_next)
    {
        it_next++;
        if (it->solve_flag == 2)
            feature.erase(it);
    }
}
相关标签: 机器视觉