思路
- 第一帧为参考帧
- 对后面每一帧
- 找到极限方向
- 进行极线搜索
- 找出NCC最高的
- 高斯深度滤波
- 计算不确定度
- 高斯融合
- 找到极限方向
dense_mapping.cpp
#include<iostream>
#include<vector>
#include<fstream>
using namespace std;
#include<boost/timer.hpp>
#include<sophus/se3.hpp>
using Sophus::SE3d;
#include<Eigen/Core>
#include<Eigen/Geometry>
using namespace Eigen;
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace cv;
/**********************************************
* 本程序演示了单目相机在已知轨迹下的稠密深度估计
* 使用极线搜索 + NCC 匹配的方式,与书本的 12.2 节对应
***********************************************/
//parameters
const int boarder=20; //边缘宽度
const int width=640; //图像宽度
const int height=480; //图像高度
const double fx=482.1f; //相机内参
const double fy=-480.0f;
const double cx=319.5f;
const double cy=239.5f;
const int ncc_window_size=3; //ncc取的窗口半宽度
const int ncc_area=(2*ncc_window_size+1)*(2*ncc_window_size+1);//NCC窗口面积
const double min_cov=0.1; //收敛判定:最小方差
const double max_cov=10; //发散判定:最大方差
///从REMODE数据集读取数据
bool readDatasetFiles(
const string path,
vector<string> &color_image_files,
vector<SE3d> &poses,
cv::Mat &ref_depth
);
/**
*根据新的图像更新深度估计
*@param ref 参考图像
*@param curr 当前图像
*@param T_C_R 参考图像到当前图像的位姿
*@param depth 深度均值
*@param dept_cov2 深度方差
*@return 是否成功
**/
bool update(
const Mat &ref,
const Mat &curr,
const SE3d &T_C_R,
Mat &depth,
Mat &depth_cov2
);
/**
* @brief 极线搜索
* @param ref: 参考图像
* @param curr: 当前图像
* @param T_C_R: 位姿
* @param pt_ref: 参考图像中点的位置
* @param depth_mu: 深度均值
* @param depth_cov: 深度方差
* @param pt_curr: 当前点
* @param epipolar_direction:极线方向
* @retval 是否成功
*/
bool epipolarSearch(
const Mat &ref,
const Mat &curr,
const SE3d &T_C_R,
const Vector2d &pt_ref,
const double &depth_mu,
const double &depth_cov,
Vector2d &pt_curr,
Vector2d &epipolar_direction
);
/**
* @brief 更新深度滤波器
* @param pt_ref: 参考图像点
* @param pt_curr: 当前图像点
* @param T_C_R: 位姿
* @param epipolar_direction: 级线方向
* @param depth: 深度均值
* @param depth_cov2: 深度方差
* @retval 是否成功
*/
bool updateDepthFilter(
const Vector2d &pt_ref,
const Vector2d &pt_curr,
const SE3d &T_C_R,
const Vector2d &epipolar_direction,
Mat &depth,
Mat &depth_cov2
);
/**
* 计算 NCC 评分
* @param ref 参考图像
* @param curr 当前图像
* @param pt_ref 参考点
* @param pt_curr 当前点
* @retval NCC评分
*/
double NCC(const Mat &ref, const Mat &curr, const Vector2d &pt_ref, const Vector2d &pt_curr);
// 双线性灰度插值
inline double getBilinearInterpolatedValue(const Mat &img, const Vector2d &pt) {
uchar *d = &img.data[int(pt(1, 0)) * img.step + int(pt(0, 0))];
double xx = pt(0, 0) - floor(pt(0, 0));
double yy = pt(1, 0) - floor(pt(1, 0));
return ((1 - xx) * (1 - yy) * double(d[0]) +
xx * (1 - yy) * double(d[1]) +
(1 - xx) * yy * double(d[img.step]) +
xx * yy * double(d[img.step + 1])) / 255.0;
}
// ------------------------------------------------------------------
// 一些小工具
// 显示估计的深度图
void plotDepth(const Mat &depth_truth, const Mat &depth_estimate);
//像素到相机坐标系
inline Vector3d px2cam(const Vector2d px){
return Vector3d(
(px(0,0)-cx)/fx,
(px(1,0)-cy)/fy,
1
);
}
//相机到像素
inline Vector2d cam2px(const Vector3d p_cam){
return Vector2d(
(p_cam(0,0)*fx/p_cam(2,0)+cx),
(p_cam(1,0)*fy/p_cam(2,0)+cy)
);
}
// 检测一个点是否在图像边框内
inline bool inside(const Vector2d &pt) {
return pt(0, 0) >= boarder && pt(1, 0) >= boarder
&& pt(0, 0) + boarder < width && pt(1, 0) + boarder <= height;
}
// 显示极线匹配
void showEpipolarMatch(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_curr);
// 显示极线
void showEpipolarLine(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_min_curr,
const Vector2d &px_max_curr);
/// 评测深度估计
void evaludateDepth(const Mat &depth_truth, const Mat &depth_estimate);
// ------------------------------------------------------------------
int main(int argc,char **argv){
if(argc!=2){
cout<<"Usage:dense_mapping path_to_test_dataset"<<endl;
return -1;
}
//从数据集读取数据
vector<string> color_image_files;
vector<SE3d>pose_TWC;
Mat ref_depth;
bool ret=readDatasetFiles(argv[1],color_image_files,pose_TWC
,ref_depth);
if(ret==false){
cout<<"Reading image files failed!"<<endl;
return -1;
}
cout<<"read total"<<color_image_files.size()<<" files."<<endl;
// 第一张图
Mat ref=imread(color_image_files[0],0);//gray-scale image
SE3d pose_ref_TWC=pose_TWC[0];
double init_depth=3.0;//深度初始值
double init_cov2=3.0;//方差初始值
Mat depth(height,width,CV_64F,init_depth);
Mat depth_cov2(height,width,CV_64F,init_cov2);//深度图方差
for(int index=1;index<color_image_files.size();index++){
cout<<"*** loop "<<index<<"***"<<endl;
Mat curr=imread(color_image_files[index],0);
if (curr.data == nullptr) continue;
SE3d pose_curr_TWC=pose_TWC[index];
SE3d pose_T_C_R=pose_curr_TWC.inverse()*pose_ref_TWC;//TCR=TCW*TWR
update(ref,curr,pose_T_C_R,depth,depth_cov2);
evaludateDepth(ref_depth,depth);
plotDepth(ref_depth, depth);
imshow("image", curr);
waitKey(1);
}
cout << "estimation returns, saving depth map ..." << endl;
imwrite("depth.png", depth);
cout << "done." << endl;
return 0;
}
void evaludateDepth(const Mat &depth_truth, const Mat &depth_estimate) {
double ave_depth_error = 0; // 平均误差
double ave_depth_error_sq = 0; // 平方误差
int cnt_depth_data = 0;
for (int y = boarder; y < depth_truth.rows - boarder; y++)
for (int x = boarder; x < depth_truth.cols - boarder; x++) {
double error = depth_truth.ptr<double>(y)[x] - depth_estimate.ptr<double>(y)[x];
ave_depth_error += error;
ave_depth_error_sq += error * error;
cnt_depth_data++;
}
ave_depth_error /= cnt_depth_data;
ave_depth_error_sq /= cnt_depth_data;
cout << "Average squared error = " << ave_depth_error_sq << ", average error: " << ave_depth_error << endl;
}
bool update(const Mat &ref,const Mat &curr,const SE3d &T_C_R,Mat &depth,Mat &depth_cov2){
for(int x=boarder;x<width-boarder;x++)
for(int y=boarder;y<height-boarder;y++){
//遍历每个像素
if(depth_cov2.ptr<double>(y)[x]<min_cov||
depth_cov2.ptr<double>(y)[x]>max_cov)//深度已收敛或发散
continue;
//在极线上搜索(x,y)的匹配
Vector2d pt_curr;
Vector2d epipolar_direction;
bool ret = epipolarSearch(
ref,
curr,
T_C_R,
Vector2d(x, y),
depth.ptr<double>(y)[x],
sqrt(depth_cov2.ptr<double>(y)[x]),
pt_curr,
epipolar_direction
);
if (ret == false) // 匹配失败
continue;
// 取消该注释以显示匹配
// showEpipolarMatch(ref, curr, Vector2d(x, y), pt_curr);
// 匹配成功,更新深度图
updateDepthFilter(Vector2d(x, y), pt_curr, T_C_R, epipolar_direction, depth, depth_cov2);
}
}
bool readDatasetFiles(
const string path,
vector<string> &color_image_files,
vector<SE3d> &poses,
cv::Mat &ref_depth) {
ifstream fin(path + "/first_200_frames_traj_over_table_input_sequence.txt");
if (!fin) return false;
while (!fin.eof()) {
// 数据格式:图像文件名 tx, ty, tz, qx, qy, qz, qw ,注意是 TWC 而非 TCW
string image;
fin >> image;
double data[7];
for (double &d:data) fin >> d;
color_image_files.push_back(path + string("/images/") + image);
poses.push_back(
SE3d(Quaterniond(data[6], data[3], data[4], data[5]),
Vector3d(data[0], data[1], data[2]))
);
if (!fin.good()) break;
}
fin.close();
// load reference depth
fin.open(path + "/depthmaps/scene_000.depth");
ref_depth = cv::Mat(height, width, CV_64F);
if (!fin) return false;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
double depth = 0;
fin >> depth;
ref_depth.ptr<double>(y)[x] = depth / 100.0;
}
return true;
}
bool epipolarSearch(
const Mat &ref,
const Mat &curr,
const SE3d &T_C_R,
const Vector2d &pt_ref,
const double &depth_mu,
const double &depth_cov,
Vector2d &pt_curr,
Vector2d &epipolar_direction){
//找到参考帧求出的p的
Vector3d f_ref= px2cam(pt_ref);
f_ref.normalize();
Vector3d P_ref=f_ref*depth_mu;///参考帧的p向量
Vector2d px_mean_curr=cam2px(T_C_R*P_ref);
double d_min=depth_mu-3*depth_cov,d_max=depth_mu+3*depth_cov;
if(d_min<0.1) d_min=0.1;
Vector2d px_min_curr=cam2px(T_C_R*(f_ref*d_min));//按最小深度的投影
Vector2d px_max_curr=cam2px(T_C_R*(f_ref*d_max));//按最大深度投影
Vector2d epipolar_line=px_max_curr-px_min_curr;//极线(线段形式)
epipolar_direction=epipolar_line;//极线方向
epipolar_direction.normalize();
double half_length=0.5*epipolar_line.norm();//极线线段的半长度
if(half_length>100)half_length=100;
//在上面搜索
double best_ncc=-1.0;
Vector2d best_px_curr;
for(double l=-half_length;l<=half_length;l+=0.7){//l+=sqrt(2)
Vector2d px_curr=px_mean_curr+l*epipolar_direction;//待匹配点
if(!inside(px_curr))
continue;
//计算待匹配点与参考帧的NCC
double ncc=NCC(ref,curr,pt_ref,pt_curr);
if(ncc>best_ncc){
best_ncc=ncc;
best_px_curr=px_curr;
}
}
if(best_ncc<0.85f) //只相信 NCC 很高的匹配
return false;
pt_curr=best_px_curr;
return true;
}
double NCC(const Mat &ref, const Mat &curr, const Vector2d &pt_ref, const Vector2d &pt_curr){
//零均值-归一化互相关
//先算均值
double mean_ref=0,mean_curr=0;
vector<double>values_ref,values_curr;//参考帧和当前帧的均值
for(int x=-ncc_window_size;x<=ncc_window_size;x++)
for(int y=-ncc_window_size;y<=ncc_window_size;y++){
double value_ref=double(ref.ptr<uchar>(int(y+pt_ref(1,0)))[int(x+pt_ref(0,0))])/255.0;
mean_ref+=value_ref;
double value_curr = getBilinearInterpolatedValue(curr, pt_curr + Vector2d(x, y));
mean_curr += value_curr;
values_ref.push_back(value_ref);
values_curr.push_back(value_curr);
}
mean_ref /= ncc_area;
mean_curr /= ncc_area;
// 计算 Zero mean NCC
double numerator = 0, demoniator1 = 0, demoniator2 = 0;
for (int i = 0; i < values_ref.size(); i++) {
double n = (values_ref[i] - mean_ref) * (values_curr[i] - mean_curr);
numerator += n;
demoniator1 += (values_ref[i] - mean_ref) * (values_ref[i] - mean_ref);
demoniator2 += (values_curr[i] - mean_curr) * (values_curr[i] - mean_curr);
}
return numerator / sqrt(demoniator1 * demoniator2 + 1e-10); // 防止分母出现零
}
bool updateDepthFilter(const Vector2d &pt_ref,
const Vector2d &pt_curr,
const SE3d &T_C_R,
const Vector2d &epipolar_direction,
Mat &depth,
Mat &depth_cov2){
//三角化计算深度
SE3d T_R_C=T_C_R.inverse();
Vector3d f_ref=px2cam(pt_ref);
f_ref.normalize();
Vector3d f_curr=px2cam(pt_curr);
f_curr.normalize();
Vector3d t = T_R_C.translation();
Vector3d f2 = T_R_C.so3() * f_curr;
Vector2d b = Vector2d(t.dot(f_ref), t.dot(f2));
Matrix2d A;
A(0, 0) = f_ref.dot(f_ref);
A(0, 1) = -f_ref.dot(f2);
A(1, 0) = -A(0, 1);
A(1, 1) = -f2.dot(f2);
Vector2d ans=A.inverse()*b;
Vector3d xm=ans[0]*f_ref; //ref侧求得p位置结果
Vector3d xn=t+ans[1]*f2; //cur侧求得p位置结果
Vector3d p_esti=(xm+xn)/2.0; //P的位置取两者平均
double depth_estimation = p_esti.norm(); // 深度值
// 计算不确定性(以一个像素为误差)
Vector3d p=f_ref*depth_estimation;//p
Vector3d a=p-t;//a
double t_norm = t.norm();
double a_norm = a.norm();
double alpha = acos(f_ref.dot(t) / t_norm);
double beta = acos(-a.dot(t) / (a_norm * t_norm));
Vector3d f_curr_prime=px2cam(pt_curr+epipolar_direction);
f_curr_prime.normalize();
double beta_prime=acos(f_curr_prime.dot(-t)/t_norm);//t也是相机坐标系下的吗?是
double gamma=M_PI-alpha-beta_prime;
double p_prime=t_norm*sin(beta_prime)/sin(gamma);
double d_cov=p_prime-depth_estimation;
double d_cov2=d_cov*d_cov;
//高斯融合
double mu = depth.ptr<double>(int(pt_ref(1, 0)))[int(pt_ref(0, 0))];
double sigma2 = depth_cov2.ptr<double>(int(pt_ref(1, 0)))[int(pt_ref(0, 0))];
double mu_fuse = (d_cov2 * mu + sigma2 * depth_estimation) / (sigma2 + d_cov2);
double sigma_fuse2 = (sigma2 * d_cov2) / (sigma2 + d_cov2);
depth.ptr<double>(int(pt_ref(1, 0)))[int(pt_ref(0, 0))] = mu_fuse;
depth_cov2.ptr<double>(int(pt_ref(1, 0)))[int(pt_ref(0, 0))] = sigma_fuse2;
return true;
}
void plotDepth(const Mat &depth_truth, const Mat &depth_estimate) {
imshow("depth_truth", depth_truth * 0.4);
imshow("depth_estimate", depth_estimate * 0.4);
imshow("depth_error", depth_truth - depth_estimate);
waitKey(1);
}
void showEpipolarMatch(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_curr) {
Mat ref_show, curr_show;
cv::cvtColor(ref, ref_show, CV_GRAY2BGR);
cv::cvtColor(curr, curr_show, CV_GRAY2BGR);
cv::circle(ref_show, cv::Point2f(px_ref(0, 0), px_ref(1, 0)), 5, cv::Scalar(0, 0, 250), 2);
cv::circle(curr_show, cv::Point2f(px_curr(0, 0), px_curr(1, 0)), 5, cv::Scalar(0, 0, 250), 2);
imshow("ref", ref_show);
imshow("curr", curr_show);
waitKey(1);
}
void showEpipolarLine(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_min_curr,
const Vector2d &px_max_curr) {
Mat ref_show, curr_show;
cv::cvtColor(ref, ref_show, CV_GRAY2BGR);
cv::cvtColor(curr, curr_show, CV_GRAY2BGR);
cv::circle(ref_show, cv::Point2f(px_ref(0, 0), px_ref(1, 0)), 5, cv::Scalar(0, 255, 0), 2);
cv::circle(curr_show, cv::Point2f(px_min_curr(0, 0), px_min_curr(1, 0)), 5, cv::Scalar(0, 255, 0), 2);
cv::circle(curr_show, cv::Point2f(px_max_curr(0, 0), px_max_curr(1, 0)), 5, cv::Scalar(0, 255, 0), 2);
cv::line(curr_show, Point2f(px_min_curr(0, 0), px_min_curr(1, 0)), Point2f(px_max_curr(0, 0), px_max_curr(1, 0)),
Scalar(0, 255, 0), 1);
imshow("ref", ref_show);
imshow("curr", curr_show);
waitKey(1);
}
CMakeLists.txt
cmake_minimum_required(VERSION 2.8)
project(dense_mapping)
set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++14 -march=native -O3")
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
include_directories("/usr/include/eigen3")
#opencv2
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
#sophus
find_package(Sophus REQUIRED)
include_directories(${Sophus_INCLUDE_DIRS})
find_package( FMT REQUIRED )
add_executable(dense_mapping dense_mapping.cpp)
target_link_libraries(dense_mapping
${Sophus_LIBRARIES} ${OpenCV_LIBS})
target_link_libraries(dense_mapping fmt::fmt)
标签:curr,pt,double,depth,建图,SLAM,const,单目,ref
From: https://www.cnblogs.com/code-fun/p/16791245.html