首页 > 其他分享 >mono_inertial_euroc.cc

mono_inertial_euroc.cc

时间:2023-07-07 17:35:36浏览次数:33  
标签:readParameter cc mono inertial _- fSettings Camera1 found false

从main函数开始讲解,##表示源码

判断参数是否大于等于5个,否则,输入有误,直接返回,正确执行参数如下

./Examples/Monocular-Inertial/mono_inertial_euroc  ./Vocabulary/ORBvoc.txt ./Examples/Monocular-Inertial/EuRoC.yaml ${dir}/MH01 ./Examples/Monocular-Inertial/EuRoC_TimeStamps/MH01.txt dataset-MH01_monoi  
##    

if(argc < 5) { cerr << endl << "Usage: ./mono_inertial_euroc path_to_vocabulary path_to_settings path_to_sequence_folder_1 path_to_times_file_1 (path_to_image_folder_2 path_to_times_file_2 ... path_to_image_folder_N path_to_times_file_N) " << endl; return 1; }

 

计算有多少个可执行序列

## const int num_seq = (argc-3)/2;

将图像数据和IMU

##
for (seq = 0; seq<num_seq; seq++) {//数据路径,指向参数${dir}/MH01 string pathSeq(argv[(2*seq) + 3]); //数据时间戳路径,./Examples/Monocular-Inertial/EuRoC_TimeStamps/MH01.txt string pathTimeStamps(argv[(2*seq) + 4]); string pathCam0 = pathSeq + "/mav0/cam0/data"; string pathImu = pathSeq + "/mav0/imu0/data.csv";      //将加载进来的图像数据和对应时间戳放入数组vstrImageFilenames以及vTimestampsCam中 LoadImages(pathCam0, pathTimeStamps, vstrImageFilenames[seq], vTimestampsCam[seq]);//获取三轴加速度计和三轴陀螺仪数据以及时间戳 LoadIMU(pathImu, vTimestampsImu[seq], vAcc[seq], vGyro[seq]);//保存对应序列的图像的数据量 nImages[seq] = vstrImageFilenames[seq].size(); //如果有多个图像序列,那么将总的图像数据量保存下来 tot_images += nImages[seq]; //imu对应的数据量 nImu[seq] = vTimestampsImu[seq].size();// Find first imu to be considered, supposing imu measurements start first //找到IMU的每一个序列第一个大于图像的imu时间戳 //图像序列 - - - - - - //IMU序列 - - - - - - 把第一个大于图像序列的时间戳找到 while(vTimestampsImu[seq][first_imu[seq]]<=vTimestampsCam[seq][0]) first_imu[seq]++; //将大于第一个图像的imu时间戳回滚回来,为方便后续做预积分 first_imu[seq]--; // first imu measurement to be considered }

初始化SLAM系统变量

##
    // Create SLAM system. It initializes all system threads and gets ready to process frames.
    //argv[1] = ./Vocabulary/ORBvoc.txt
    //argv[2] = ./Examples/Monocular-Inertial/EuRoC.yaml
    ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::IMU_MONOCULAR, true);

进入到上面ORB_SLAM3::System的构造函数,只看相对重点部分

##
/** * @brief 系统构造函数,会启动其他线程 * @param strVocFile 词袋文件路径./Vocabulary/ORBvoc.txt * @param strSettingsFile 配置文件路径./Examples/Monocular-Inertial/EuRoC.yaml * @param sensor 传感器类型 * @param bUseViewer 是否使用可视化界面,默认为true * @param initFr initFr表示初始化帧的id,默认值0 * @param strSequence 序列名,在跟踪线程和局部建图线程用得到,默认值std::string() */ System::System(const string &strVocFile, const string &strSettingsFile, const eSensor sensor, const bool bUseViewer, const int initFr, const string &strSequence): mSensor(sensor), mpViewer(static_cast<Viewer*>(NULL)), mbReset(false), mbResetActiveMap(false), mbActivateLocalizationMode(false), mbDeactivateLocalizationMode(false), mbShutDown(false) {// 读取配置文件,strSettingsFile配置文件所在路径./Examples/Monocular-Inertial/EuRoC.yaml cv::FileStorage fsSettings(strSettingsFile.c_str(), cv::FileStorage::READ); // 查看配置文件版本,不同版本有不同处理方法 cv::FileNode node = fsSettings["File.version"]; if(!node.empty() && node.isString() && node.string() == "1.0") { settings_ = new Settings(strSettingsFile,mSensor);//需要进入该函数,TODO1 // 保存及加载地图的名字,在配置文件里面没有对应的内容,后续可以自行添加用于定位 mStrLoadAtlasFromFile = settings_->atlasLoadFile(); mStrSaveAtlasToFile = settings_->atlasSaveFile(); } // ORBSLAM3新加的多地图管理功能,这里加载Atlas标识符 bool loadedAtlas = false; if(mStrLoadAtlasFromFile.empty()) {// 建立一个新的ORB字典 mpVocabulary = new ORBVocabulary(); // 读取预训练好的ORB字典 bool bVocLoad = mpVocabulary->loadFromTextFile(strVocFile);//创建关键帧数据库 mpKeyFrameDatabase = new KeyFrameDatabase(*mpVocabulary);//创建多地图,参数0表示初始化关键帧id为0 mpAtlas = new Atlas(0);//需要进入该函数,TODO2 }// 如果是有imu的传感器类型,设置mbIsInertial标志为true if (mSensor==IMU_STEREO || mSensor==IMU_MONOCULAR || mSensor==IMU_RGBD) mpAtlas->SetInertialSensor();
// 创建跟踪线程(位于主线程) mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer, mpAtlas, mpKeyFrameDatabase, strSettingsFile, mSensor, settings_, strSequence);//需要进入该函数,TODO3 //创建并开启local mapping线程 mpLocalMapper = new LocalMapping(this, mpAtlas, mSensor==MONOCULAR || mSensor==IMU_MONOCULAR, mSensor==IMU_MONOCULAR || mSensor==IMU_STEREO || mSensor==IMU_RGBD, strSequence);//后续讲解的时候,再进入分析 mptLocalMapping = new thread(&ORB_SLAM3::LocalMapping::Run,mpLocalMapper); mpLocalMapper->mInitFr = initFr;//初始化帧的id,默认为0 // 设置最远3D地图点的深度值,如果超过阈值,说明可能三角化不太准确,丢弃 if(settings_) mpLocalMapper->mThFarPoints = settings_->thFarPoints();//20 else mpLocalMapper->mThFarPoints = fsSettings["thFarPoints"];// 创建并开启闭环线程 mpLoopCloser = new LoopClosing(mpAtlas, mpKeyFrameDatabase, mpVocabulary, mSensor!=MONOCULAR, activeLC); //后续讲解时,再进入分析 mptLoopClosing = new thread(&ORB_SLAM3::LoopClosing::Run, mpLoopCloser); // 设置线程间的指针 mpTracker->SetLocalMapper(mpLocalMapper); mpTracker->SetLoopClosing(mpLoopCloser); mpLocalMapper->SetTracker(mpTracker); mpLocalMapper->SetLoopCloser(mpLoopCloser); mpLoopCloser->SetTracker(mpTracker); mpLoopCloser->SetLocalMapper(mpLocalMapper); }

进入到构造函数,TODO1

##
// configFile=./Examples/Monocular-Inertial/EuRoC.yaml Settings::Settings(const std::string &configFile, const int &sensor) : bNeedToUndistort_(false), bNeedToRectify_(false), bNeedToResize1_(false), bNeedToResize2_(false) {// Open settings file./Examples/Monocular-Inertial/EuRoC.yaml cv::FileStorage fSettings(configFile, cv::FileStorage::READ);//读取相机1的内参和畸变参数 readCamera1(fSettings); //需要进入该函数,TODO1-1// 如果是双目,则读取第二个相机的配置 if (sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) { readCamera2(fSettings); }//读取图像信息 readImageInfo(fSettings); //需要进入该函数,TODO1-2//读取IMU参数 readIMU(fSettings); if (sensor_ == System::RGBD || sensor_ == System::IMU_RGBD) {
//读取RGBD配置 readRGBD(fSettings); //需要进入该函数,TODO1-3 } //提取ORB特征相关信息 readORB(fSettings); if (bNeedToRectify_) { precomputeRectificationMaps();  } }
readCamera1(fSettings); //需要进入该函数,TODO1-1
void Settings::readCamera1(cv::FileStorage &fSettings)
{//相机类型
    string cameraModel = readParameter<string>(fSettings, "Camera.type", found);

    vector<float> vCalibration;
//针孔相机类型 if (cameraModel == "PinHole") { cameraType_ = PinHole;//相机内参 float fx = readParameter<float>(fSettings, "Camera1.fx", found); float fy = readParameter<float>(fSettings, "Camera1.fy", found); float cx = readParameter<float>(fSettings, "Camera1.cx", found); float cy = readParameter<float>(fSettings, "Camera1.cy", found); vCalibration = {fx, fy, cx, cy};      //构建针孔相机模型 calibration1_ = new Pinhole(vCalibration);      //畸变参数的获取 vPinHoleDistorsion1_[0] = readParameter<float>(fSettings, "Camera1.k1", found); vPinHoleDistorsion1_[1] = readParameter<float>(fSettings, "Camera1.k2", found); vPinHoleDistorsion1_[2] = readParameter<float>(fSettings, "Camera1.p1", found); vPinHoleDistorsion1_[3] = readParameter<float>(fSettings, "Camera1.p2", found); }else if (cameraModel == "KannalaBrandt8")//鱼眼相机的镜头类型,广角镜头 { cameraType_ = KannalaBrandt; // Read intrinsic parameters float fx = readParameter<float>(fSettings, "Camera1.fx", found); float fy = readParameter<float>(fSettings, "Camera1.fy", found); float cx = readParameter<float>(fSettings, "Camera1.cx", found); float cy = readParameter<float>(fSettings, "Camera1.cy", found); float k0 = readParameter<float>(fSettings, "Camera1.k1", found); float k1 = readParameter<float>(fSettings, "Camera1.k2", found); float k2 = readParameter<float>(fSettings, "Camera1.k3", found); float k3 = readParameter<float>(fSettings, "Camera1.k4", found); vCalibration = {fx, fy, cx, cy, k0, k1, k2, k3}; calibration1_ = new KannalaBrandt8(vCalibration); originalCalib1_ = new KannalaBrandt8(vCalibration); if (sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) { int colBegin = readParameter<int>(fSettings, "Camera1.overlappingBegin", found); int colEnd = readParameter<int>(fSettings, "Camera1.overlappingEnd", found); vector<int> vOverlapping = {colBegin, colEnd};//记录重叠的开始和结束列 static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea = vOverlapping; } } }
readImageInfo(fSettings); //需要进入该函数,TODO1-2
    void Setting::readImageInfo(cv::FileStorage &fSettings){
bool found;//读取原始的相机分辨率 int originalRows = readParameter<int>(fSettings, "Camera.height", found); int originalCols = readParameter<int>(fSettings, "Camera.width", found); originalImSize_.width = originalCols; originalImSize_.height = originalRows; //类型为cv::Size newImSize_ = originalImSize_; //读取新的相机的高度 int newHeigh = readParameter<int>(fSettings, "Camera.newHeight", found, false); //如果有这个参数,则执行下面 if (found) { //需要去修改大小 bNeedToResize1_ = true; newImSize_.height = newHeigh; if (!bNeedToRectify_) { // Update calibration //新的高度除以原始的高度,假设原始高度为4,新的高度为2,则scaleRowFactor为0.5
float scaleRowFactor = (float)newImSize_.height / (float)originalImSize_.height; //对fy和cy进行参数的改变,根据内参矩阵将相机坐标系下的点转换到图像坐标系下,对高度v(或y)方向(也就是行方向),统一做调整
       //v = fy * Y/Z + cy,将相机坐标系下的点映射到经过修改后的像素坐标系下 calibration1_->setParameter(calibration1_->getParameter(1) * scaleRowFactor, 1); calibration1_->setParameter(calibration1_->getParameter(3) * scaleRowFactor, 3); if ((sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) && cameraType_ != Rectified) { calibration2_->setParameter(calibration2_->getParameter(1) * scaleRowFactor, 1); calibration2_->setParameter(calibration2_->getParameter(3) * scaleRowFactor, 3); } } } //读取新的相机的宽度 int newWidth = readParameter<int>(fSettings, "Camera.newWidth", found, false); //有这个值,则执行下面 if (found) { bNeedToResize1_ = true; newImSize_.width = newWidth; if (!bNeedToRectify_) { // Update calibration float scaleColFactor = (float)newImSize_.width / (float)originalImSize_.width; //对fx和cx进行参数的改变,根据内参矩阵将相机坐标系下的点转换到图像坐标系下,对宽度u方向(也就是列方向),统一做调整 calibration1_->setParameter(calibration1_->getParameter(0) * scaleColFactor, 0); calibration1_->setParameter(calibration1_->getParameter(2) * scaleColFactor, 2); if ((sensor_ == System::STEREO || sensor_ == System::IMU_STEREO) && cameraType_ != Rectified) { calibration2_->setParameter(calibration2_->getParameter(0) * scaleColFactor, 0); calibration2_->setParameter(calibration2_->getParameter(2) * scaleColFactor, 2); if (cameraType_ == KannalaBrandt) { static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea[0] *= scaleColFactor; static_cast<KannalaBrandt8 *>(calibration1_)->mvLappingArea[1] *= scaleColFactor; static_cast<KannalaBrandt8 *>(calibration2_)->mvLappingArea[0] *= scaleColFactor; static_cast<KannalaBrandt8 *>(calibration2_)->mvLappingArea[1] *= scaleColFactor; } } } } //读取相机频率 fps_ = readParameter<int>(fSettings, "Camera.fps", found); //图像为RGB格式 bRGB_ = (bool)readParameter<int>(fSettings, "Camera.RGB", found); }
readIMU(fSettings); //需要进入该函数,TODO1-3
void Settings::readRGBD(cv::FileStorage &fSettings)
{
    bool found;

    depthMapFactor_ = readParameter<float>(fSettings, "RGBD.DepthMapFactor", found);
    thDepth_ = readParameter<float>(fSettings, "Stereo.ThDepth", found);
    b_ = readParameter<float>(fSettings, "Stereo.b", found);
    bf_ = b_ * calibration1_->getParameter(0);//焦距与基线的乘积,多说一句视差的概念:d(视差)=UL - UR,左右图的横坐标之差,3D点的深度z = f*b/d;视差与深度z成反比;
}
mpAtlas = new Atlas(0);//需要进入该函数,TODO2
//初始化地图数据集
Atlas::Atlas(int initKFid) : mnLastInitKFidMap(initKFid), mHasViewer(false)
{
    mpCurrentMap = static_cast<Map *>(NULL);
    CreateNewMap();//TODO2-1
}

CreateNewMap();TODO2-1

/**
 * @brief 创建新地图,如果当前活跃地图有效,先存储当前地图为不活跃地图,然后新建地图;否则,可以直接新建地图。
 * 
 */
void Atlas::CreateNewMap()
{
    // 锁住地图集
    unique_lock<mutex> lock(mMutexAtlas);// 如果当前活跃地图有效,先存储当前地图为不活跃地图后退出
    if (mpCurrentMap)
    {
        // mnLastInitKFidMap为当前地图创建时第1个关键帧的id,它是在上一个地图最大关键帧id的基础上增加1
        if (!mspMaps.empty() && mnLastInitKFidMap < mpCurrentMap->GetMaxKFid())
            mnLastInitKFidMap = mpCurrentMap->GetMaxKFid() + 1; // 将当前地图储存起来,其实就是把mIsInUse标记为false
        mpCurrentMap->SetStoredMap();
    }
    mpCurrentMap = new Map(mnLastInitKFidMap);  //新建地图
    mpCurrentMap->SetCurrentMap();              //设置为活跃地图
    mspMaps.insert(mpCurrentMap);               //插入地图集
}
mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer,
                             mpAtlas, mpKeyFrameDatabase, strSettingsFile, mSensor, settings_, strSequence);//需要进入该函数,TODO3
/**
 * @brief 跟踪线程构造函数
 * @param pSys 系统类指针
 * @param pVoc 词典
 * @param pFrameDrawer 画图像的
 * @param pMapDrawer 画地图的
 * @param pAtlas atlas
 * @param pKFDB 关键帧词典数据库
 * @param strSettingPath 参数文件路径
 * @param sensor 传感器类型
 * @param settings 参数类
 * @param _strSeqName 序列名字,没用到
 */
Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer,
    Atlas *pAtlas, KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor, Settings* settings, const string &_nameSeq)
    : mState(NO_IMAGES_YET), mSensor(sensor), mTrackedFr(0), mbStep(false),
    mbOnlyTracking(false), mbMapUpdated(false), mbVO(false), mpORBVocabulary(pVoc), mpKeyFrameDB(pKFDB),
    mbReadyToInitializate(false), mpSystem(pSys), mpViewer(NULL), bStepByStep(false),
    mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpAtlas(pAtlas), mnLastRelocFrameId(0), time_recently_lost(5.0),
    mnInitialFrameId(0), mbCreatedMap(false), mnFirstFrameId(0), mpCamera2(nullptr), mpLastKeyFrame(static_cast<KeyFrame*>(NULL))
{
    // Load camera parameters from settings file
    // Step 1 从配置文件中加载相机参数
    if(settings){
        newParameterLoader(settings);
    }
    else{
        cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);

        bool b_parse_cam = ParseCamParamFile(fSettings);
        if(!b_parse_cam)
        {
            std::cout << "*Error with the camera parameters in the config file*" << std::endl;
        }

        // Load ORB parameters
        bool b_parse_orb = ParseORBParamFile(fSettings);
        if(!b_parse_orb)
        {
            std::cout << "*Error with the ORB parameters in the config file*" << std::endl;
        }

        bool b_parse_imu = true;
        if(sensor==System::IMU_MONOCULAR || sensor==System::IMU_STEREO || sensor==System::IMU_RGBD)
        {
            b_parse_imu = ParseIMUParamFile(fSettings);
            if(!b_parse_imu)
            {
                std::cout << "*Error with the IMU parameters in the config file*" << std::endl;
            }

            mnFramesToResetIMU = mMaxFrames;
        }

        if(!b_parse_cam || !b_parse_orb || !b_parse_imu)
        {
            std::cerr << "**ERROR in the config file, the format is not correct**" << std::endl;
            try
            {
                throw -1;
            }
            catch(exception &e)
            {

            }
        }
    }

    initID = 0; lastID = 0;
    mbInitWith3KFs = false;
    mnNumDataset = 0;

    // 遍历下地图中的相机,然后打印出来了
    vector<GeometricCamera*> vpCams = mpAtlas->GetAllCameras();
    std::cout << "There are " << vpCams.size() << " cameras in the atlas" << std::endl;
    for(GeometricCamera* pCam : vpCams)
    {
        std::cout << "Camera " << pCam->GetId();
        if(pCam->GetType() == GeometricCamera::CAM_PINHOLE)
        {
            std::cout << " is pinhole" << std::endl;
        }
        else if(pCam->GetType() == GeometricCamera::CAM_FISHEYE)
        {
            std::cout << " is fisheye" << std::endl;
        }
        else
        {
            std::cout << " is unknown" << std::endl;
        }
    }

#ifdef REGISTER_TIMES
    vdRectStereo_ms.clear();
    vdResizeImage_ms.clear();
    vdORBExtract_ms.clear();
    vdStereoMatch_ms.clear();
    vdIMUInteg_ms.clear();
    vdPosePred_ms.clear();
    vdLMTrack_ms.clear();
    vdNewKF_ms.clear();
    vdTrackTotal_ms.clear();
#endif
}

 



 

标签:readParameter,cc,mono,inertial,_-,fSettings,Camera1,found,false
From: https://www.cnblogs.com/gary-guo/p/17534926.html

相关文章

  • mac执行npm或yarn命令时报Error: EACCES: permission denied,
    npm下载的时候会告知权限没有这是由于之前使用了sudo去下载一些东西,导致文件拥有者都变成了root,只需要修改权限即可。Error:EACCES:permissiondenied,symlink‘…/lib/node_modules/json-server/lib/cli/bin.js’->‘/usr/local/bin/json-server’解决方法:找到......
  • 有关access_token与refresh_token 理解
    下面是对接各种平台api的流程一般操作流程是下面的1始通过用户授权获取code,2然后通过code获取到  access_token和 refresh_token   3访问api必须携带  access_token 举例access_token:aaaaexpires_in:10分钟refresh_token :  bbbrefresh_expires_......
  • MegEngine 使用小技巧:如何使用 MegCC 进行模型编译
    MegEngine 作为一个训推一体的AI框架,为用户提供了模型训练以及部署的能力。但是在部署模型时,由于会存在对于部署的模型计算来说不必要的代码,导致SDK体积比较大。为了解决上述问题,我们提供了新的工具:AI编译器 MegCC。MegCC有以下特性:只生成完成输入模型计算的必要的ke......
  • mac安装gcc7
    查看gcc版本gcc--version1、安装gccbrewinstallgcc@7cd/usr/local/Cellar改名mvgcc\@7/gcc 2、打开mac的SIP防护,因为要修改文件csrutilstatus为开启状态则不需要修改重启后按command+r打开控制台输入csrutildisable重启后即可修改/usr/local下文件csrutilena......
  • 第三届计算机应用与信息安全国际会议(ICCAIS2023)
    由湖北省众科地质与环境技术服务中心主办的2023第三届计算机应用与信息安全国际会议(ICCAIS2023)将于2023年12月20-22日在中国武汉召开。 ICCAIS2023力图建立 一个国际化的计算机应用与信息安全领域的学术交流平台,分享最新进展和研究成果。期待您的参与。 ★重要信息大会时间:20......
  • 正点原子内存管理实验室,keil mdk 和stm32cubeide gcc的函数替换
    https://www.cnblogs.com/RegressionWorldLine/p/11968467.html转载记录下 STM32.ld链接文件分析及一次bug解决过程问题描述原子板的代码中含有一个关于使用外部SRAM的功能,由于本人的开发板的SRAM只有512K,因此稍微修改了一下代码,同时使用GCC进行编译,但是这里却报错了,源码如......
  • PHP应用出现500 : The page cannot be displayed because an internal server error h
    问题描述PHP应用突然遇见了500 Thepagecannotbedisplayedbecauseaninternalservererrorhasoccurred.错误,但是如果访问一个静态HTML页面,就可以成功。只要是PHP页面,就是500。 问题解答登录AppService的Kudu站点,查看日志发现一句:  scriptProcessorcouldnotbefoun......
  • 【并查集】 HDOJ 4786 Fibonacci Tree
    就是求出搞成最小生成树的最少白边和最多白边的数量。。。。#include<iostream>#include<queue>#include<stack>#include<map>#include<set>#include<bitset>#include<cstdio>#include<algorithm>#include<cstring>#include<......
  • CCLINK转profinet与三菱PLC通讯案例
    三菱PLC控制系统和西门子PLC控制系统需要交换数据,捷米特JM-PN-CCLK是自主研发的PROFINET从站功能的通信网关。该产品的主要功能是连接各种CCLINK总线和PROFINET网络。捷米特JM-PN-CCLK连接到PN总线用作从站,连接到CCLINK总线用作从站。 三菱PLC系统中的网络配置捷米特JM-PN-......
  • [引]CCAA ITSMS 信息技术服务管理体系基础考试大纲
    CCAA-TR-111-01信息技术服务管理体系基础考试大纲_中国认证认可协会 http://www.ccaa.org.cn/ksdg/644.html申请注册信息技术服务管理体系审核员实习级别的人员,需通过“信息技术服务管理体系基础”科目考试。2.2考试方式“信息技术服务管理体系基础”科目考试为闭卷考试,考......