欢迎您访问程序员文章站本站旨在为大家提供分享程序员计算机编程知识!
您现在的位置是: 首页

Apollo 6.0的融合组件分析(fusion_component)

程序员文章站 2022-03-05 18:22:13
...

融合组件perception/onboard/compent/fusion_component.cc调用app/obstacle_multi_sensor_fusion.cc的init和process方法完成整个融合功能。

1. 流程图 / 组织架构图

InterProc流程图

1. 有ErrorCode?
2. 主融合逻辑
序列化一个带ErrorCode的空障碍物
结束
本帧是主传感器帧?
3. 利用高精地图过滤不感兴趣的障碍物
结束
4. 构造可视化数据和障碍物数据
5. 计算延时并记录

高精地图过滤障碍物逻辑流程图

3.1. 获取主帧传感器到地图的转换矩阵
有高精地图且允许过滤障碍物
3.2. 转换本车位置到地图坐标系
障碍物保持不变
3.3 获得自车周围的地图
3.4 更新过滤后的障碍物 - 该行代码被注释

2. 方法解读

  1. Init方法:
bool FusionComponent::Init() {
  FusionComponentConfig comp_config;
  if (!GetProtoConfig(&comp_config)) {
    return false;
  }
  AINFO << "Fusion Component Configs: " << comp_config.DebugString();

  // to load component configs
  fusion_method_ = comp_config.fusion_method();   // 融合方法
  fusion_main_sensor_ = comp_config.fusion_main_sensor(); // 融合主传感器
  object_in_roi_check_ = comp_config.object_in_roi_check(); // 是否过滤障碍物
  radius_for_roi_object_check_ = comp_config.radius_for_roi_object_check(); // 读取地图的大小,用于过滤障碍物?

  // init algorithm plugin
  ACHECK(InitAlgorithmPlugin()) << "Failed to init algorithm plugin."; //初始化融合模块和地图模块
  writer_ = node_->CreateWriter<PerceptionObstacles>(
      comp_config.output_obstacles_channel_name());  // ?两个Writer可能是记录日志用
  inner_writer_ = node_->CreateWriter<SensorFrameMessage>(
      comp_config.output_viz_fused_content_channel_name()); // 这个Writer用于可视化
  return true;
}

其中InitAlgorithmPlugin方法如下所示:

bool FusionComponent::InitAlgorithmPlugin() {
  fusion_.reset(new fusion::ObstacleMultiSensorFusion());
  fusion::ObstacleMultiSensorFusionParam param;
  param.main_sensor = fusion_main_sensor_;
  param.fusion_method = fusion_method_;
  ACHECK(fusion_->Init(param)) << "Failed to init ObstacleMultiSensorFusion"; // 初始化融合模块

  if (FLAGS_obs_enable_hdmap_input && object_in_roi_check_) { // 判断(1)高精地图准备好了? 且 (2)开启障碍物过滤
    hdmap_input_ = map::HDMapInput::Instance();
    ACHECK(hdmap_input_->Init()) << "Failed to init hdmap input."; // 初始化高精地图
  }
  AINFO << "Init algorithm successfully, onboard fusion: " << fusion_method_;
  return true;
}
  1. Proc函数:
bool FusionComponent::Proc(const std::shared_ptr<SensorFrameMessage>& message) {
  if (message->process_stage_ == ProcessStage::SENSOR_FUSION) { // 状态机?
    return true;
  }
  std::shared_ptr<PerceptionObstacles> out_message(new (std::nothrow)
                                                       PerceptionObstacles);
  std::shared_ptr<SensorFrameMessage> viz_message(new (std::nothrow)
                                                      SensorFrameMessage);
  bool status = InternalProc(message, out_message, viz_message);  // 下面详述
  if (status) {
    // TODO(conver sensor id)
    if (message->sensor_id_ != fusion_main_sensor_) {      //是主传感器才给writer传送数据
      AINFO << "Fusion receive from " << message->sensor_id_ << "not from "
            << fusion_main_sensor_ << ". Skip send.";
    } else {
      // Send("/apollo/perception/obstacles", out_message);
      writer_->Write(out_message);                        // Write(log)输出数据和可视化数据
      AINFO << "Send fusion processing output message.";
      // send msg for visualization
      if (FLAGS_obs_enable_visualization) { 
        // Send("/apollo/perception/inner/PrefusedObjects", viz_message);
        inner_writer_->Write(viz_message);
      }
    }
  }
  return status;
}

其中调用了InternalProc方法:

bool FusionComponent::InternalProc(
    const std::shared_ptr<SensorFrameMessage const>& in_message,
    std::shared_ptr<PerceptionObstacles> out_message,
    std::shared_ptr<SensorFrameMessage> viz_message) {
  {
    std::unique_lock<std::mutex> lock(s_mutex_);  //锁
    s_seq_num_++;
  }

  PERF_BLOCK_START();
  const double timestamp = in_message->timestamp_;  //时间戳
  const uint64_t lidar_timestamp = in_message->lidar_timestamp_; //lidar时间戳
  std::vector<base::ObjectPtr> valid_objects;
  if (in_message->error_code_ != apollo::common::ErrorCode::OK) {  // 1 有错误时:
    if (!MsgSerializer::SerializeMsg(               // 1.1 尝试生成PerceptionObstacles物体
            timestamp, lidar_timestamp, in_message->seq_num_, valid_objects,
            in_message->error_code_, out_message.get())) {
      AERROR << "Failed to gen PerceptionObstacles object.";
      return false;
    }
    if (FLAGS_obs_enable_visualization) {          // 1.2 尝试生成可视化消息
      viz_message->process_stage_ = ProcessStage::SENSOR_FUSION;
      viz_message->error_code_ = in_message->error_code_;
    }
    AERROR << "Fusion receive message with error code, skip it.";
    return true;
  }
  base::FramePtr frame = in_message->frame_; //抽取SensorFrameMessage中的Frame数据
  frame->timestamp = in_message->timestamp_; //更新时间戳 ?为什么要在这儿更新?in_message->timestamp_貌似也不是动态变化的呀。。。

  std::vector<base::ObjectPtr> fused_objects;
  if (!fusion_->Process(frame, &fused_objects)) { // 2 调用Process函数
    AERROR << "Failed to call fusion plugin.";
    return false;
  }
  PERF_BLOCK_END_WITH_INDICATOR("fusion_process", in_message->sensor_id_); // 融合结束

  if (in_message->sensor_id_ != fusion_main_sensor_) { // 3 如果不是主传感器帧,融合后就直接返回。
    return true;
  }

  Eigen::Matrix4d sensor2world_pose =
      in_message->frame_->sensor2world_pose.matrix(); // 4 主帧的sensor2world_pose.matrix()
  if (object_in_roi_check_ && FLAGS_obs_enable_hdmap_input) { // 5 有高精地图且允许地图过滤障碍物 
    // get hdmap
    base::HdmapStructPtr hdmap(new base::HdmapStruct());
    if (hdmap_input_) {                     // 6 地图已载入
      base::PointD position;
      position.x = sensor2world_pose(0, 3);  // 6.1 主传感器在世界(地图)坐标系的位置
      position.y = sensor2world_pose(1, 3);
      position.z = sensor2world_pose(2, 3);
      hdmap_input_->GetRoiHDMapStruct(position, radius_for_roi_object_check_, //  6.2 获得主传感器周围的地图
                                      hdmap);
      // TODO(use check)
      // ObjectInRoiSlackCheck(hdmap, fused_objects, &valid_objects); // 6.3 地图检查或者过滤障碍物未实现
      valid_objects.assign(fused_objects.begin(), fused_objects.end());
    } else {
      valid_objects.assign(fused_objects.begin(), fused_objects.end());
    }
  } else {
    valid_objects.assign(fused_objects.begin(), fused_objects.end());   // 7 拷贝一份到融合数据到valid_objects
  }
  PERF_BLOCK_END_WITH_INDICATOR("fusion_roi_check", in_message->sensor_id_);// 高精地图检查结束

  // produce visualization msg
  if (FLAGS_obs_enable_visualization) {                   // 8 填充可视化数据
    viz_message->timestamp_ = in_message->timestamp_;
    viz_message->seq_num_ = in_message->seq_num_;
    viz_message->frame_ = base::FramePool::Instance().Get(); //8.1 与调度/cyber/sceduler/sceduler_factory.cc相关,暂时不管
    viz_message->frame_->sensor2world_pose =
        in_message->frame_->sensor2world_pose;
    viz_message->sensor_id_ = in_message->sensor_id_;
    viz_message->hdmap_ = in_message->hdmap_;
    viz_message->process_stage_ = ProcessStage::SENSOR_FUSION; //8.2 更新处理阶段
    viz_message->error_code_ = in_message->error_code_;
    viz_message->frame_->objects = fused_objects;              //8.3 更新融合障碍物
  }
  // produce pb output msg
  apollo::common::ErrorCode error_code = apollo::common::ErrorCode::OK;
  if (!MsgSerializer::SerializeMsg(timestamp, lidar_timestamp,          // 9 将融合结果valid_objects序列化
                                   in_message->seq_num_, valid_objects,
                                   error_code, out_message.get())) {
    AERROR << "Failed to gen PerceptionObstacles object.";
    return false;
  }
  PERF_BLOCK_END_WITH_INDICATOR("fusion_serialize_message",     // 序列化结束
                                in_message->sensor_id_);

  const double cur_time = ::apollo::cyber::Clock::NowInSeconds(); //10 获得当前时间和延时,输出时间信息和障碍物个数
  const double latency = (cur_time - timestamp) * 1e3;
  AINFO << std::setprecision(16) << "FRAME_STATISTICS:Obstacle:End:msg_time["
        << timestamp << "]:cur_time[" << cur_time << "]:cur_latency[" << latency
        << "]:obj_cnt[" << valid_objects.size() << "]";
  AINFO << "publish_number: " << valid_objects.size() << " obj";
  return true;
}

3. 部分重要结构体解读

InternalProc的输入类型为:SensorFrameMessage,定义来自perception/onboard/inner_component_messages/inner_component_messages.h

class SensorFrameMessage {
 public:
  SensorFrameMessage() { type_name_ = "SensorFrameMessage"; }
  ~SensorFrameMessage() = default;
  std::string GetTypeName() { return type_name_; }
  SensorFrameMessage* New() const { return new SensorFrameMessage; }

 public:
  apollo::common::ErrorCode error_code_ = apollo::common::ErrorCode::OK; //错误码,用于功能安全

  std::string sensor_id_; //传感器id号
  double timestamp_ = 0.0; //时间戳
  uint64_t lidar_timestamp_ = 0; //?lidar UTC时间。主帧是lidar,用硬件触发,所以记录一下?
  uint32_t seq_num_ = 0; //第几帧
  std::string type_name_; //?类型名称
  base::HdmapStructConstPtr hdmap_; //高精地图

  base::FramePtr frame_; //Frame 数据 (/perception/base/frame.h)

  ProcessStage process_stage_ = ProcessStage::UNKNOWN_STAGE; // 处理阶段,枚举见下方
};

enum class ProcessStage {
  LIDAR_PREPROCESS = 0,
  LIDAR_SEGMENTATION = 1,
  LIDAR_RECOGNITION = 2,
  STEREO_CAMERA_DETECTION = 3,
  MONOCULAR_CAMERA_DETECTION = 4,
  LONG_RANGE_RADAR_DETECTION = 5,
  SHORT_RANGE_RADAR_DETECTION = 6,
  ULTRASONIC_DETECTION = 7,
  SENSOR_FUSION = 8,
  UNKNOWN_STAGE = 9,
  PROCESSSTAGE_COUNT = 10,
  LIDAR_DETECTION = 11
};

InternalProc的第一个输出参数的类型为/apollo/modules/dreamview/frontend/src/renderer/obstacles.js

其序列化函数在/perception/onboard/msg_serializer/msg_serializer.cc

export default class PerceptionObstacles {
  constructor() {          //这里仅贴一个构造方法
    this.textRender = new Text3D();
    this.arrows = []; // for indication of direction of moving obstacles
    this.ids = []; // for obstacle id labels
    this.solidCubes = []; // for obstacles with only length/width/height
    this.dashedCubes = []; // for obstacles with only length/width/height
    this.extrusionSolidFaces = []; // for obstacles with polygon points
    this.extrusionDashedFaces = []; // for obstacles with polygon points
    this.laneMarkers = []; // for lane markers
    this.icons = [];
    this.trafficCones = []; // for traffic cone meshes
    this.v2xCubes = [];
    this.v2xSolidFaces = [];

    this.arrowIdx = 0;
    this.cubeIdx = 0;
    this.extrusionFaceIdx = 0;
    this.iconIdx = 0;
    this.trafficConeIdx = 0;
    this.v2xCubeIdx = 0;
    this.v2xSolidFaceIdx = 0;
  }