? ? ? ? 对于整个工程的分析可以参考上一篇博客
? ? ? ? ? ? ? ? ? ? ? ?https://blog.csdn.net/weixin_61294345/article/details/121679881
????????这里就分享一下两个重要的源码注释:
1.frontend.cpp
//
// Created by gaoxiang on 19-5-2.
//
#include <opencv2/opencv.hpp>
#include "myslam/algorithm.h"
#include "myslam/backend.h"
#include "myslam/config.h"
#include "myslam/feature.h"
#include "myslam/frontend.h"
#include "myslam/g2o_types.h"
#include "myslam/map.h"
#include "myslam/viewer.h"
namespace myslam {
Frontend::Frontend() {
gftt_ =
cv::GFTTDetector::create(Config::Get<int>("num_features"), 0.01, 20);
num_features_init_ = Config::Get<int>("num_features_init");
num_features_ = Config::Get<int>("num_features");
}
//添加一帧并计算结果
bool Frontend::AddFrame(myslam::Frame::Ptr frame) {
current_frame_ = frame;
switch (status_) {
case FrontendStatus::INITING:
///初始化
StereoInit();
break;
case FrontendStatus::TRACKING_GOOD:
case FrontendStatus::TRACKING_BAD:
///跟踪
Track();
break;
case FrontendStatus::LOST:
Reset();
break;
}
last_frame_ = current_frame_;
return true;
}
///正常状态下的跟踪
bool Frontend::Track() {
//用上一帧位姿获得当前位姿估计的初始值
if (last_frame_) {
current_frame_->SetPose(relative_motion_ * last_frame_->Pose());
}
//LK光流匹配上一帧
int num_track_last = TrackLastFrame();
//G2O图优化估计当前帧位姿
tracking_inliers_ = EstimateCurrentPose();
//跟踪点大于50个则状态较好
if (tracking_inliers_ > num_features_tracking_) {
// 修改标志位
status_ = FrontendStatus::TRACKING_GOOD;
}
//小于50大于20跟踪效果差
else if (tracking_inliers_ > num_features_tracking_bad_) {
//修改标志位
status_ = FrontendStatus::TRACKING_BAD;
}
//小于20 认为跟踪丢失
else {
// 丢失
status_ = FrontendStatus::LOST;
}
//将当前帧设置为关键帧
InsertKeyframe();
relative_motion_ = current_frame_->Pose() * last_frame_->Pose().inverse();
//在可视化地图中添加当前帧
if (viewer_) viewer_->AddCurrentFrame(current_frame_);
return true;
}
///插入关键帧
bool Frontend::InsertKeyframe() {
// 关键点足够多,还不需要修改关键帧
if (tracking_inliers_ >= num_features_needed_for_keyframe_) {
return false;
}
// 将当前帧设置为关键帧
current_frame_->SetKeyFrame();
//向地图内增加一个关键帧
map_->InsertKeyFrame(current_frame_);
LOG(INFO) << "Set frame " << current_frame_->id_ << " as keyframe "
<< current_frame_->keyframe_id_;
//将新增的关键帧内的地图点
SetObservationsForKeyFrame();
//出现关键帧后需要重新匹配
//提取当前帧(左)关键点
DetectFeatures();
// 光流跟踪匹配右视角
FindFeaturesInRight();
// 三维重建
TriangulateNewPoints();
// 出发后端:更新地图
backend_->UpdateMap();
if (viewer_) viewer_->UpdateMap();
return true;
}
///将关键帧的特征点点添加到地图
void Frontend::SetObservationsForKeyFrame() {
for (auto &feat : current_frame_->features_left_) {
auto mp = feat->map_point_.lock();
if (mp) mp->AddObservation(feat);
}
}
///三角化新的关键点
int Frontend::TriangulateNewPoints() {
std::vector<SE3> poses{camera_left_->pose(), camera_right_->pose()};
SE3 current_pose_Twc = current_frame_->Pose().inverse();
int cnt_triangulated_pts = 0;
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
if (current_frame_->features_left_[i]->map_point_.expired() &&
current_frame_->features_right_[i] != nullptr) {
// 左图的特征点未关联地图点且存在右图匹配点,尝试三角化
std::vector<Vec3> points{
camera_left_->pixel2camera(
Vec2(current_frame_->features_left_[i]->position_.pt.x,
current_frame_->features_left_[i]->position_.pt.y)),
camera_right_->pixel2camera(
Vec2(current_frame_->features_right_[i]->position_.pt.x,
current_frame_->features_right_[i]->position_.pt.y))};
Vec3 pworld = Vec3::Zero();
if (triangulation(poses, points, pworld) && pworld[2] > 0) {
auto new_map_point = MapPoint::CreateNewMappoint();
pworld = current_pose_Twc * pworld;
new_map_point->SetPos(pworld);
new_map_point->AddObservation(
current_frame_->features_left_[i]);
new_map_point->AddObservation(
current_frame_->features_right_[i]);
current_frame_->features_left_[i]->map_point_ = new_map_point;
current_frame_->features_right_[i]->map_point_ = new_map_point;
map_->InsertMapPoint(new_map_point);
cnt_triangulated_pts++;
}
}
}
LOG(INFO) << "new landmarks: " << cnt_triangulated_pts;
return cnt_triangulated_pts;
}
///估计当前帧位姿
int Frontend::EstimateCurrentPose() {
// 图优化:单节点+多条一元边
typedef g2o::BlockSolver_6_3 BlockSolverType;
typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType>
LinearSolverType;
auto solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<BlockSolverType>(
g2o::make_unique<LinearSolverType>()));
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm(solver);
// 节点:一个节点(位姿)
VertexPose *vertex_pose = new VertexPose();
vertex_pose->setId(0);
vertex_pose->setEstimate(current_frame_->Pose());
optimizer.addVertex(vertex_pose);
// 相机内参
Mat33 K = camera_left_->K();
// 边:每对对应点都是
int index = 1;
std::vector<EdgeProjectionPoseOnly *> edges;
std::vector<Feature::Ptr> features;
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
auto mp = current_frame_->features_left_[i]->map_point_.lock();
if (mp) {
features.push_back(current_frame_->features_left_[i]);
EdgeProjectionPoseOnly *edge =
new EdgeProjectionPoseOnly(mp->pos_, K);
edge->setId(index);
edge->setVertex(0, vertex_pose);
edge->setMeasurement(
toVec2(current_frame_->features_left_[i]->position_.pt));
edge->setInformation(Eigen::Matrix2d::Identity());
edge->setRobustKernel(new g2o::RobustKernelHuber);
edges.push_back(edge);
optimizer.addEdge(edge);
index++;
}
}
// 找到异常值
const double chi2_th = 5.991;
int cnt_outlier = 0;
for (int iteration = 0; iteration < 4; ++iteration) {
vertex_pose->setEstimate(current_frame_->Pose());
optimizer.initializeOptimization();
optimizer.optimize(10);
cnt_outlier = 0;
// 计算异常
for (size_t i = 0; i < edges.size(); ++i) {
auto e = edges[i];
if (features[i]->is_outlier_) {
e->computeError();
}
//阈值:5.991
if (e->chi2() > chi2_th) {
features[i]->is_outlier_ = true;//标记
e->setLevel(1);
cnt_outlier++;
} else {
features[i]->is_outlier_ = false;
e->setLevel(0);
};
if (iteration == 2) {
e->setRobustKernel(nullptr);
}
}
}
LOG(INFO) << "Outlier/Inlier in pose estimating: " << cnt_outlier << "/"
<< features.size() - cnt_outlier;
// 记录位姿及异常的匹配结果
current_frame_->SetPose(vertex_pose->estimate());
LOG(INFO) << "Current Pose = \n" << current_frame_->Pose().matrix();
for (auto &feat : features) {
if (feat->is_outlier_) {
feat->map_point_.reset();
feat->is_outlier_ = false; // maybe we can still use it in future
}
}
//计算成功匹配的数量
return features.size() - cnt_outlier;
}
//跟踪上一帧
int Frontend::TrackLastFrame() {
// 使用LK光流匹配上一帧图像的点
std::vector<cv::Point2f> kps_last, kps_current;
for (auto &kp : last_frame_->features_left_) {
if (kp->map_point_.lock()) {
// use project point
auto mp = kp->map_point_.lock();
auto px =
camera_left_->world2pixel(mp->pos_, current_frame_->Pose());
kps_last.push_back(kp->position_.pt);
kps_current.push_back(cv::Point2f(px[0], px[1]));
} else {
kps_last.push_back(kp->position_.pt);
kps_current.push_back(kp->position_.pt);
}
}
std::vector<uchar> status;
Mat error;
cv::calcOpticalFlowPyrLK(
last_frame_->left_img_, current_frame_->left_img_, kps_last,
kps_current, status, error, cv::Size(11, 11), 3,
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
0.01),
cv::OPTFLOW_USE_INITIAL_FLOW);
int num_good_pts = 0;
for (size_t i = 0; i < status.size(); ++i) {
if (status[i]) {
cv::KeyPoint kp(kps_current[i], 7);
Feature::Ptr feature(new Feature(current_frame_, kp));
feature->map_point_ = last_frame_->features_left_[i]->map_point_;
current_frame_->features_left_.push_back(feature);
num_good_pts++;
}
}
//返回成功匹配的点对数量
LOG(INFO) << "Find " << num_good_pts << " in the last image.";
return num_good_pts;
}
///初始化
bool Frontend::StereoInit() {
//提取初始图像左视角关键点数量
int num_features_left = DetectFeatures();
//LK光流跟踪后结果
int num_coor_features = FindFeaturesInRight();
//匹配结果少于100,初始化失败
if (num_coor_features < num_features_init_) {
return false;
}
bool build_map_success = BuildInitMap();
if (build_map_success) {
status_ = FrontendStatus::TRACKING_GOOD;
if (viewer_) {
//添加当前帧
viewer_->AddCurrentFrame(current_frame_);
//更新地图
viewer_->UpdateMap();
}
return true;
}
return false;
}
/**
*提取关键点(左视角图像)
*return 关键点数量
**/
int Frontend::DetectFeatures() {
cv::Mat mask(current_frame_->left_img_.size(), CV_8UC1, 255);
for (auto &feat : current_frame_->features_left_) {
//绘制边框
cv::rectangle(mask, feat->position_.pt - cv::Point2f(10, 10),
feat->position_.pt + cv::Point2f(10, 10), 0, CV_FILLED);
}
//GFTT角点
std::vector<cv::KeyPoint> keypoints;
gftt_->detect(current_frame_->left_img_, keypoints, mask);
int cnt_detected = 0;
for (auto &kp : keypoints) {
//作为一个关键点Feature类型存储
current_frame_->features_left_.push_back(
Feature::Ptr(new Feature(current_frame_, kp)));
cnt_detected++;
}
//返回关键点数量
LOG(INFO) << "Detect " << cnt_detected << " new features";
return cnt_detected;
}
/**
*当前帧的右侧图像中查找相应的特征(右视角)
*return 关键点数量
**/
int Frontend::FindFeaturesInRight() {
// 用LK光流估计右侧图像中的点
std::vector<cv::Point2f> kps_left, kps_right;
for (auto &kp : current_frame_->features_left_) {
kps_left.push_back(kp->position_.pt);
//检查是否上锁
auto mp = kp->map_point_.lock();
if (mp) {
// 使用投影点作为初始估计值
auto px =
camera_right_->world2pixel(mp->pos_, current_frame_->Pose());
kps_right.push_back(cv::Point2f(px[0], px[1]));
} else {
// 使用统一坐标
kps_right.push_back(kp->position_.pt);
}
}
std::vector<uchar> status;
Mat error;
//opencv自带的光流跟踪函数
cv::calcOpticalFlowPyrLK(
current_frame_->left_img_, current_frame_->right_img_, kps_left,
kps_right, status, error, cv::Size(11, 11), 3,
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30,
0.01),
cv::OPTFLOW_USE_INITIAL_FLOW);
int num_good_pts = 0;
//统计跟踪结果,并存储
for (size_t i = 0; i < status.size(); ++i) {
if (status[i]) {
cv::KeyPoint kp(kps_right[i], 7);
Feature::Ptr feat(new Feature(current_frame_, kp));
feat->is_on_left_image_ = false;
current_frame_->features_right_.push_back(feat);
num_good_pts++;
} else {
current_frame_->features_right_.push_back(nullptr);
}
}
LOG(INFO) << "Find " << num_good_pts << " in the right image.";
return num_good_pts;
}
/**
* 使用单个图像构建初始地图
* @return true if succeed
*/
bool Frontend::BuildInitMap() {
std::vector<SE3> poses{camera_left_->pose(), camera_right_->pose()};
size_t cnt_init_landmarks = 0;
for (size_t i = 0; i < current_frame_->features_left_.size(); ++i) {
if (current_frame_->features_right_[i] == nullptr) continue;
// 利用三角测量创建路标点
std::vector<Vec3> points{
camera_left_->pixel2camera(
Vec2(current_frame_->features_left_[i]->position_.pt.x,
current_frame_->features_left_[i]->position_.pt.y)),
camera_right_->pixel2camera(
Vec2(current_frame_->features_right_[i]->position_.pt.x,
current_frame_->features_right_[i]->position_.pt.y))};
Vec3 pworld = Vec3::Zero();
//三角化
if (triangulation(poses, points, pworld) && pworld[2] > 0) {
//创建地图存储数据
auto new_map_point = MapPoint::CreateNewMappoint();
new_map_point->SetPos(pworld);
new_map_point->AddObservation(current_frame_->features_left_[i]);
new_map_point->AddObservation(current_frame_->features_right_[i]);
current_frame_->features_left_[i]->map_point_ = new_map_point;
current_frame_->features_right_[i]->map_point_ = new_map_point;
cnt_init_landmarks++;
map_->InsertMapPoint(new_map_point);
}
}
current_frame_->SetKeyFrame();
map_->InsertKeyFrame(current_frame_);
backend_->UpdateMap();
LOG(INFO) << "Initial map created with " << cnt_init_landmarks
<< " map points";
return true;
}
///跟踪失败:重置
bool Frontend::Reset() {
LOG(INFO) << "Reset is not implemented. ";
return true;
}
} // namespace myslam
2.backend.cpp:
//
// Created by gaoxiang on 19-5-2.
//
#include "myslam/backend.h"
#include "myslam/algorithm.h"
#include "myslam/feature.h"
#include "myslam/g2o_types.h"
#include "myslam/map.h"
#include "myslam/mappoint.h"
namespace myslam {
/// 构造函数 开启线程
Backend::Backend() {
backend_running_.store(true);
backend_thread_ = std::thread(std::bind(&Backend::BackendLoop, this));
}
void Backend::UpdateMap() {
std::unique_lock<std::mutex> lock(data_mutex_);
map_update_.notify_one();///唤醒阻塞的线程
}
///关闭后端
void Backend::Stop() {
backend_running_.store(false);
map_update_.notify_one();
backend_thread_.join();
}
void Backend::BackendLoop() {
while (backend_running_.load()) {
std::unique_lock<std::mutex> lock(data_mutex_);
map_update_.wait(lock);
/// 后端仅优化激活的Frames和Landmarks
Map::KeyframesType active_kfs = map_->GetActiveKeyFrames();
Map::LandmarksType active_landmarks = map_->GetActiveMapPoints();
Optimize(active_kfs, active_landmarks);
}
}
void Backend::Optimize(Map::KeyframesType &keyframes,
Map::LandmarksType &landmarks) {
// 初始设置
typedef g2o::BlockSolver_6_3 BlockSolverType;
typedef g2o::LinearSolverCSparse<BlockSolverType::PoseMatrixType>
LinearSolverType;
auto solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<BlockSolverType>(
g2o::make_unique<LinearSolverType>()));
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm(solver);
// 添加pose顶点,使用Keyframe id
std::map<unsigned long, VertexPose *> vertices;
unsigned long max_kf_id = 0;
// 共有7个关键帧
for (auto &keyframe : keyframes) {
auto kf = keyframe.second;
VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
vertex_pose->setId(kf->keyframe_id_);
vertex_pose->setEstimate(kf->Pose());
optimizer.addVertex(vertex_pose);
if (kf->keyframe_id_ > max_kf_id) {
max_kf_id = kf->keyframe_id_;
}
vertices.insert({kf->keyframe_id_, vertex_pose});
}
// 添加路标顶点,使用路标id索引
std::map<unsigned long, VertexXYZ *> vertices_landmarks;
// K 和左右外参
Mat33 K = cam_left_->K();
SE3 left_ext = cam_left_->pose();
SE3 right_ext = cam_right_->pose();
// edges
int index = 1;
double chi2_th = 5.991; // robust kernel 阈值
std::map<EdgeProjection *, Feature::Ptr> edges_and_features;
//遍历没一个路标点
for (auto &landmark : landmarks) {
if (landmark.second->is_outlier_) continue;//去除异常点
unsigned long landmark_id = landmark.second->id_;
auto observations = landmark.second->GetObs();
//遍历每个路标点的观测帧
for (auto &obs : observations) {
if (obs.lock() == nullptr) continue;
auto feat = obs.lock();
if (feat->is_outlier_ || feat->frame_.lock() == nullptr) continue;
auto frame = feat->frame_.lock();
EdgeProjection *edge = nullptr;
//构造带有地图和位姿的二元边
if (feat->is_on_left_image_) {
edge = new EdgeProjection(K, left_ext);
} else {
edge = new EdgeProjection(K, right_ext);
}
// 如果landmark还没有被加入优化,则新加一个顶点
if (vertices_landmarks.find(landmark_id) ==
vertices_landmarks.end()) {
VertexXYZ *v = new VertexXYZ;
v->setEstimate(landmark.second->Pos());
v->setId(landmark_id + max_kf_id + 1);
v->setMarginalized(true);
vertices_landmarks.insert({landmark_id, v});
optimizer.addVertex(v);
}
//设置边:每个路标点和其任意一个观测帧的位姿都要建立一条边
edge->setId(index);
edge->setVertex(0, vertices.at(frame->keyframe_id_)); // pose
edge->setVertex(1, vertices_landmarks.at(landmark_id)); // landmark
edge->setMeasurement(toVec2(feat->position_.pt));
edge->setInformation(Mat22::Identity());
auto rk = new g2o::RobustKernelHuber();
rk->setDelta(chi2_th);
edge->setRobustKernel(rk);
edges_and_features.insert({edge, feat});
optimizer.addEdge(edge);
index++;
}
}
// 优化后去除异常数据
optimizer.initializeOptimization();
optimizer.optimize(10);//迭代10次
int cnt_outlier = 0, cnt_inlier = 0;
int iteration = 0;
while (iteration < 5) {
cnt_outlier = 0;
cnt_inlier = 0;
// 根据阈值判断异常值
for (auto &ef : edges_and_features) {
if (ef.first->chi2() > chi2_th) {
cnt_outlier++;
} else {
cnt_inlier++;
}
}
//计算正常点的比例
double inlier_ratio = cnt_inlier / double(cnt_inlier + cnt_outlier);
//调整筛选阈值,直到合格
if (inlier_ratio > 0.5) {
break;
} else {
chi2_th *= 2;
iteration++;
}
}
//记录结果是否异常
for (auto &ef : edges_and_features) {
if (ef.first->chi2() > chi2_th) {
ef.second->is_outlier_ = true;
// 移除异常结果
ef.second->map_point_.lock()->RemoveObservation(ef.second);
} else {
ef.second->is_outlier_ = false;
}
}
LOG(INFO) << "Outlier/Inlier in optimization: " << cnt_outlier << "/"
<< cnt_inlier;
// 存储相机位姿及路标点的位置
for (auto &v : vertices) {
keyframes.at(v.first)->SetPose(v.second->estimate());
}
for (auto &v : vertices_landmarks) {
landmarks.at(v.first)->SetPos(v.second->estimate());
}
}
} // namespace myslam
|