Apollo 6.0融合模块常用结构体定义汇总
程序员文章站
2022-03-05 19:19:25
...
Apollo 6.0融合模块常用结构体定义汇总
文章目录
含义是作者基于代码猜测,持续更新。
常用结构体定义及其含义
结构体 Frame
文件链接: frame.h
struct alignas(16) Frame {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Frame() { sensor2world_pose.setIdentity(); }
void Reset() {
timestamp = 0.0;
objects.clear();
sensor2world_pose.setIdentity();
sensor_info.Reset();
lidar_frame_supplement.Reset();
radar_frame_supplement.Reset();
camera_frame_supplement.Reset();
}
// @brief sensor information
SensorInfo sensor_info; //传感器信息
double timestamp = 0.0; //帧时间戳
std::vector<std::shared_ptr<Object>> objects; //障碍物
Eigen::Affine3d sensor2world_pose; //时变的传感器到世界的仿射变换(可能与动态标定相关)
// sensor-specific frame supplements
LidarFrameSupplement lidar_frame_supplement; //结构体内主要包括是否开启和传感器原始数据的指针
RadarFrameSupplement radar_frame_supplement;
CameraFrameSupplement camera_frame_supplement;
UltrasonicFrameSupplement ultrasonic_frame_supplement;
};
结构体 SensorInfo
文件链接: sensor_meta.h)
struct SensorInfo {
std::string name = "UNKNONW_SENSOR"; //传感器名称
SensorType type = SensorType::UNKNOWN_SENSOR_TYPE; //传感器类型
SensorOrientation orientation = SensorOrientation::FRONT; //传感器安装位置
std::string frame_id = "UNKNOWN_FRAME_ID"; //?? 帧id字符串(可能与传感器数据帧的结构有关)
void Reset() {
name = "UNKNONW_SENSOR";
type = SensorType::UNKNOWN_SENSOR_TYPE;
orientation = SensorOrientation::FRONT;
frame_id = "UNKNOWN_FRAME_ID";
}
};
结构体 Object
文件链接: object.h
struct alignas(16) Object {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Object();
std::string ToString() const;
void Reset();
// @brief object id per frame, required
int id = -1;
// @brief convex hull of the object, required
PointCloud<PointD> polygon;
// oriented boundingbox information
// @brief main direction of the object, required
Eigen::Vector3f direction = Eigen::Vector3f(1, 0, 0);
/*@brief the yaw angle, theta = 0.0 <=> direction(1, 0, 0),
currently roll and pitch are not considered,
make sure direction and theta are consistent, required
*/
float theta = 0.0f;
// @brief theta variance, required
float theta_variance = 0.0f;
// @brief center of the boundingbox (cx, cy, cz), required
Eigen::Vector3d center = Eigen::Vector3d(0, 0, 0);
// @brief covariance matrix of the center uncertainty, required
Eigen::Matrix3f center_uncertainty;
/* @brief size = [length, width, height] of boundingbox
length is the size of the main direction, required
*/
Eigen::Vector3f size = Eigen::Vector3f(0, 0, 0);
// @brief size variance, required
Eigen::Vector3f size_variance = Eigen::Vector3f(0, 0, 0);
// @brief anchor point, required
Eigen::Vector3d anchor_point = Eigen::Vector3d(0, 0, 0);
// @brief object type, required
ObjectType type = ObjectType::UNKNOWN;
// @brief probability for each type, required
std::vector<float> type_probs;
// @brief object sub-type, optional
ObjectSubType sub_type = ObjectSubType::UNKNOWN;
// @brief probability for each sub-type, optional
std::vector<float> sub_type_probs;
// @brief existence confidence, required
float confidence = 1.0f;
// tracking information
// @brief track id, required
// 传感器的id号记录在这儿
int track_id = -1;
// @brief velocity of the object, required
Eigen::Vector3f velocity = Eigen::Vector3f(0, 0, 0);
// @brief covariance matrix of the velocity uncertainty, required
Eigen::Matrix3f velocity_uncertainty;
// @brief if the velocity estimation is converged, true by default
bool velocity_converged = true;
// @brief velocity confidence, required
float velocity_confidence = 1.0f;
// @brief acceleration of the object, required
Eigen::Vector3f acceleration = Eigen::Vector3f(0, 0, 0);
// @brief covariance matrix of the acceleration uncertainty, required
Eigen::Matrix3f acceleration_uncertainty;
// @brief age of the tracked object, required
double tracking_time = 0.0;
// @brief timestamp of latest measurement, required
double latest_tracked_time = 0.0;
// @brief motion state of the tracked object, required
MotionState motion_state = MotionState::UNKNOWN;
// // Tailgating (trajectory of objects)
std::array<Eigen::Vector3d, 100> drops;
std::size_t drop_num = 0;
// // CIPV
bool b_cipv = false;
// @brief brake light, left-turn light and right-turn light score, optional
CarLight car_light;
// @brief sensor-specific object supplements, optional
LidarObjectSupplement lidar_supplement;
RadarObjectSupplement radar_supplement;
CameraObjectSupplement camera_supplement;
FusionObjectSupplement fusion_supplement;
// @debug feature to be used for semantic mapping
// std::shared_ptr<apollo::prediction::Feature> feature;
};
类 SensorFrameMessage
文件链接: inner_component_messages.h
class SensorFrameMessage {
public:
SensorFrameMessage() { type_name_ = "SensorFrameMessage"; }
~SensorFrameMessage() = default;
std::string GetTypeName() { return type_name_; }
SensorFrameMessage* New() const { return new SensorFrameMessage; }
public:
apollo::common::ErrorCode error_code_ = apollo::common::ErrorCode::OK; //错误码,用于功能安全
std::string sensor_id_; //传感器id号
double timestamp_ = 0.0; //时间戳
uint64_t lidar_timestamp_ = 0; //?lidar UTC时间。主帧是lidar,用硬件触发,所以记录一下?
uint32_t seq_num_ = 0; //第几帧
std::string type_name_; //?类型名称
base::HdmapStructConstPtr hdmap_; //高精地图
base::FramePtr frame_; //Frame 数据 (/perception/base/frame.h)
ProcessStage process_stage_ = ProcessStage::UNKNOWN_STAGE; // 处理阶段,枚举见下方
};
enum class ProcessStage {
LIDAR_PREPROCESS = 0,
LIDAR_SEGMENTATION = 1,
LIDAR_RECOGNITION = 2,
STEREO_CAMERA_DETECTION = 3,
MONOCULAR_CAMERA_DETECTION = 4,
LONG_RANGE_RADAR_DETECTION = 5,
SHORT_RANGE_RADAR_DETECTION = 6,
ULTRASONIC_DETECTION = 7,
SENSOR_FUSION = 8,
UNKNOWN_STAGE = 9,
PROCESSSTAGE_COUNT = 10,
LIDAR_DETECTION = 11
};
类 PerceptionObstacles
文件链接: obstacles.js
序列化函数链接: msg_serializer.cc
export default class PerceptionObstacles {
constructor() { //这里仅贴一个构造方法
this.textRender = new Text3D();
this.arrows = []; // for indication of direction of moving obstacles
this.ids = []; // for obstacle id labels
this.solidCubes = []; // for obstacles with only length/width/height
this.dashedCubes = []; // for obstacles with only length/width/height
this.extrusionSolidFaces = []; // for obstacles with polygon points
this.extrusionDashedFaces = []; // for obstacles with polygon points
this.laneMarkers = []; // for lane markers
this.icons = [];
this.trafficCones = []; // for traffic cone meshes
this.v2xCubes = [];
this.v2xSolidFaces = [];
this.arrowIdx = 0;
this.cubeIdx = 0;
this.extrusionFaceIdx = 0;
this.iconIdx = 0;
this.trafficConeIdx = 0;
this.v2xCubeIdx = 0;
this.v2xSolidFaceIdx = 0;
}
结构体 AssociationResult
文件链接: base_data_association.h
typedef std::pair<size_t, size_t> TrackMeasurmentPair;
struct AssociationResult {
std::vector<TrackMeasurmentPair> assignments; // 注意: 下面三个都是记录下标标号的,不是id号的
std::vector<size_t> unassigned_tracks;
std::vector<size_t> unassigned_measurements;
std::vector<double> track2measurements_dist;
std::vector<double> measurement2track_dist;
};
结构体 SensorFrame
文件链接:sensor_frame.h
class SensorFrame {
public:
// Getter
inline double GetTimestamp() const { return header_->timestamp; }
inline bool GetPose(Eigen::Affine3d* pose) const {
if (pose == nullptr) {
AERROR << "pose is not available";
return false;
}
*pose = header_->sensor2world_pose;
return true;
}
inline std::vector<SensorObjectPtr>& GetForegroundObjects() { // 主要函数
return foreground_objects_;
}
private:
std::vector<SensorObjectPtr> foreground_objects_; // 前景障碍物
std::vector<SensorObjectPtr> background_objects_; // 后景障碍物
// sensor-specific frame supplements
base::LidarFrameSupplement lidar_frame_supplement_;
base::RadarFrameSupplement radar_frame_supplement_;
base::CameraFrameSupplement camera_frame_supplement_;
SensorFrameHeaderPtr header_ = nullptr; // 见下方,检测到这些障碍物所属传感器
};
struct SensorFrameHeader {
base::SensorInfo sensor_info; //传感器的类型,安装位置等信息
double timestamp = 0.0;
Eigen::Affine3d sensor2world_pose;
SensorFrameHeader() = default;
SensorFrameHeader(const base::SensorInfo& info, double ts,
const Eigen::Affine3d& pose)
: sensor_info(info), timestamp(ts), sensor2world_pose(pose) {}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
} EIGEN_ALIGN16;
类 SensorObject
文件链接: sensor_object.h
class SensorObject {
public:
// Getter
// @brief get frame timestamp which might be different with object timestamp
double GetTimestamp() const;
bool GetRelatedFramePose(Eigen::Affine3d* pose) const;
std::string GetSensorId() const;
base::SensorType GetSensorType() const;
inline std::shared_ptr<const base::Object> GetBaseObject() const {
return object_;
}
inline double GetInvisiblePeriod() const { return invisible_period_; }
inline void SetInvisiblePeriod(double period) { invisible_period_ = period; }
private:
FRIEND_TEST(SensorObjectTest, test);
std::shared_ptr<const base::Object> object_; //单个障碍物(描述空间)
double invisible_period_ = 0.0; // 增加的不可见时间(描述跟踪时间)
std::shared_ptr<const SensorFrameHeader> frame_header_ = nullptr; // 所属传感器(描述检测来源)
};
类 FusedObject
文件链接: sensor_object.h
class FusedObject {
public:
inline double GetTimestamp() const { return object_->latest_tracked_time; }
inline std::shared_ptr<base::Object> GetBaseObject() { return object_; }
private:
std::shared_ptr<base::Object> object_;
};
类 Scene
文件链接:scene.h
class Scene {
public:
Scene();
~Scene();
inline std::vector<TrackPtr>& GetForegroundTracks() {
return foreground_tracks_;
}
inline const std::vector<TrackPtr>& GetForegroundTracks() const {
return foreground_tracks_;
}
inline std::vector<TrackPtr>& GetBackgroundTracks() {
return background_tracks_;
}
inline const std::vector<TrackPtr>& GetBackgroundTracks() const {
return background_tracks_;
}
void AddForegroundTrack(TrackPtr track);
void AddBackgroundTrack(TrackPtr track);
protected:
std::vector<TrackPtr> foreground_tracks_; // 前景轨迹
std::vector<TrackPtr> background_tracks_;
};
类 Track
文件链接:track.h
class Track { //定义一个轨迹
public:
bool Initialize(SensorObjectPtr obj, bool is_background = false);
void Reset();
static size_t GenerateNewTrackId(); // 简单递增,到std::numeric_limits<unsigned int>::max()后回1
void UpdateWithSensorObject(const SensorObjectPtr& obj); // 重要函数,
void UpdateWithoutSensorObject(const std::string& sensor_id,
double measurement_timestamp);
std::string DebugString() const;
protected:
// update state
void UpdateSupplementState(const SensorObjectPtr& src_object = nullptr);
void UpdateUnfusedState(const SensorObjectPtr& src_object);
SensorObjectConstPtr GetLatestSensorObject(
const SensorId2ObjectMap& objects) const;
void UpdateSensorObject(SensorId2ObjectMap* objects,
const SensorObjectPtr& obj);
void UpdateSensorObjectWithoutMeasurement(SensorId2ObjectMap* objects,
const std::string& sensor_id,
double measurement_timestamp,
double max_invisible_period);
void UpdateSensorObjectWithMeasurement(SensorId2ObjectMap* objects,
const std::string& sensor_id,
double measurement_timestamp,
double max_invisible_period);
void UpdateWithSensorObjectForBackground(const SensorObjectPtr& obj);
void UpdateWithoutSensorObjectForBackground(const std::string& sensor_id,
double measurement_timestamp);
protected:
//比如左面lidar(sensor_id = "lidar_left")看到了,并且右面lidar(sensor_id = "lidar_right")也看到了,并且他们被同一个track跟踪了。就会被记录在下方的三个map中。不用unordered_map的原因可能是map查找快,为log(N)。
SensorId2ObjectMap lidar_objects_; //typedef std::map<std::string, SensorObjectPtr> SensorId2ObjectMap;
SensorId2ObjectMap radar_objects_;
SensorId2ObjectMap camera_objects_;
FusedObjectPtr fused_object_ = nullptr; // 记录置信的障碍物信息。使用了std::shared_ptr<base::Object> object_。
double tracking_period_ = 0.0; // 跟踪时长
double existence_prob_ = 0.0; // 存在概率
double toic_prob_ = 0.0; // ??
bool is_background_ = false;
bool is_alive_ = true;
size_t tracked_times_ = 0; // 跟踪次数
private:
FRIEND_TEST(TrackTest, test);
static size_t s_track_idx_;
static double s_max_lidar_invisible_period_;
static double s_max_radar_invisible_period_;
static double s_max_camera_invisible_period_;
};
基类 BaseCameraModel
与派生类PinholeCameraModel
文件链接: camera.h
class BaseCameraModel {
protected:
size_t image_width_ = 0; //图像长
size_t image_height_ = 0; //图像宽
};
class PinholeCameraModel : public BaseCameraModel {
protected:
/* fx 0 cx
0 fy cy
0 0 1
*/
Eigen::Matrix3f intrinsic_params_; //相机内参
};
上一篇: OpenSSL命令之算法类大全