%% loam_livox 建图 ++ 3D_detect 实现
1.loam_livox+3Ddetect的实现流程:
2.编译安装loam_livox:
本机环境:ubuntu 18.04.06 lts ROS melodic
python3.6 python 2.7
gcc/g++ gcc (Ubuntu 9.4.0-1ubuntu1~18.04) 9.4.0
依赖库:Eigen3 (3.3.7) Ceres (1.14.0) PCL(1.9.0)
loam_livox git地址:https://github.com/hku-mars/loam_livox.git
ROS相关安装:
sudo apt-get install ros-melodic-cv-bridge ros-melodic-tf ros-melodic-message-filters ros-melodic-image-transport
根据笔者理解,对于相机部分可以不管,在后续的CMake文件中read_camera.cpp也可以不参与编译
clone完仓库并完成编译:
先将CMakeLists.txt中的第5行:更换为c++17版本:set(CMAKE_CXX_FLAGS "-std=c++17")
如果出现了find_package错误,请检查版本对应问题,以及个人环境的库的多个版本问题
cd ~/catkin_ws/src
git clone https://github.com/hku-mars/loam_livox.git
cd ../
catkin_make
source ~/catkin_ws/devel/setup.bash
3.3D_detect_node的完成(基于OpenPCDet完成的目标点云框选)
可以在原先整个流程的节点订阅情况看出,3D_detect节点可以直接订阅laser_mapping节点发布的点云信息,进行后续处理。参考地址:
https://github.com/Cram3r95/**
https://github.com/Kin_Zhang/**
大概流程:
定义一个处理类:
录入配置文件信息,权重文件,需要用到的变量,detect函数等
class Processor_ROS:
def __init__(self, config_path, model_path):
self.points = None
self.config_path = config_path
self.model_path = model_path
self.device = None
self.net = None
self.voxel_generator = None
self.inputs = None
self.pub_rviz = None
self.no_frame_id = True
self.rate = RATE_VIZ
def set_pub_rviz(self, box3d_pub, marker_frame_id = 'camera_init'):
self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate)
def set_viz_frame_id(self, marker_frame_id):
self.pub_rviz.set_frame_id(marker_frame_id)
def initialize(self):
self.read_config()
def read_config(self):
config_path = self.config_path
cfg_from_yaml_file(self.config_path, cfg)
self.logger = common_utils.create_logger()
self.demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path("/home/kin/workspace/OpenPCDet/tools/000002.bin"),
ext='.bin')
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset)
print("Model path: ", self.model_path)
self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True)
self.net = self.net.to(self.device).eval()
def get_template_prediction(self, num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def run(self, points, frame):
t_t = time.time()
num_features = 4 # X,Y,Z,intensity
self.points = points.reshape([-1, num_features])
timestamps = np.empty((len(self.points),1))
timestamps[:] = frame
# self.points = np.append(self.points, timestamps, axis=1)
self.points[:,0] += move_lidar_center
input_dict = {
'points': self.points,
'frame_id': frame,
}
data_dict = self.demo_dataset.prepare_data(data_dict=input_dict)
data_dict = self.demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
torch.cuda.synchronize()
t = time.time()
pred_dicts, _ = self.net.forward(data_dict)
torch.cuda.synchronize()
inference_time = time.time() - t
inference_time_list.append(inference_time)
mean_inference_time = sum(inference_time_list)/len(inference_time_list)
boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy()
scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy()
types = pred_dicts[0]["pred_labels"].detach().cpu().numpy()
pred_boxes = np.copy(boxes_lidar)
pred_dict = self.get_template_prediction(scores.shape[0])
pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1]
pred_dict['score'] = scores
pred_dict['boxes_lidar'] = pred_boxes
return scores, boxes_lidar, types, pred_dict
创建一个订阅者话题和发布者话题: 订阅者话题在yaml文件设置
sub_ = rospy.Subscriber(sub_lidar_topic[0], PointCloud2, rslidar_callback, queue_size=10, buff_size=2**24)
pub_rviz = rospy.Publisher('detect_3dbox',MarkerArray, queue_size=5)
订阅话题接收到ROS点云信息调用回调函数rslidar_callback(msg)
def rslidar_callback(msg):
select_boxs, select_types = [],[]
if proc_1.no_frame_id:
proc_1.set_viz_frame_id(msg.header.frame_id)
print(f"{bc.OKGREEN} setting marker frame id to lidar: {msg.header.frame_id} {bc.ENDC}")
proc_1.no_frame_id = False
frame = msg.header.seq # frame id -> not timestamp
msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg, squeeze = True)
np_p = get_xyz_points(msg_cloud, True)
scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, frame)
for i, score in enumerate(scores):
if score>threshold:
select_boxs.append(dt_box_lidar[i])
select_types.append(pred_dict['name'][i])
if(len(select_boxs)>0):
proc_1.pub_rviz.publish_3dbox(np.array(select_boxs), -1, pred_dict['name'])
print_str = f"Frame id: {frame}. Prediction results: \n"
for i in range(len(pred_dict['name'])):
print_str += f"Type: {pred_dict['name'][i]:.3s} Prob: {scores[i]:.2f}\n"#print_str打印方式
print(f" 检测到车、行人: ")
print(print_str)
else:
print(f"\n{bc.FAIL} No confident prediction in this time stamp {bc.ENDC}\n")
print(f" --------------------------没有检测结果!!------------------------------ ")
由point_cloud2中的pointcloud2_to_array()函数将接收到的ROS点云信息转成数组存储
再由get_xyz_points()函数提取点云信息的xyz i信息
在处理类中定义一个run函数:
def run(self, points, frame):
t_t = time.time()
num_features = 4 # X,Y,Z,intensity
self.points = points.reshape([-1, num_features])
timestamps = np.empty((len(self.points),1))
timestamps[:] = frame
# self.points = np.append(self.points, timestamps, axis=1)
self.points[:,0] += move_lidar_center
input_dict = {
'points': self.points,
'frame_id': frame,
}
data_dict = self.demo_dataset.prepare_data(data_dict=input_dict)
data_dict = self.demo_dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
torch.cuda.synchronize()
t = time.time()
pred_dicts, _ = self.net.forward(data_dict)
torch.cuda.synchronize()
inference_time = time.time() - t
inference_time_list.append(inference_time)
mean_inference_time = sum(inference_time_list)/len(inference_time_list)
boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy()
scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy()
types = pred_dicts[0]["pred_labels"].detach().cpu().numpy()
pred_boxes = np.copy(boxes_lidar)
pred_dict = self.get_template_prediction(scores.shape[0])
pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1]
pred_dict['score'] = scores
pred_dict['boxes_lidar'] = pred_boxes
return scores, boxes_lidar, types, pred_dict
根据训练的模型,预测样例的possible。最后把目标画出来并发布
def set_pub_rviz(self, box3d_pub, marker_frame_id = 'camera_init'):
self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate)
上述只是笔者当前学习的一个理解,检测节点具体细节实现笔者仍要学习,以上只当做一种参考与学习记录。
最后上张效果图:
标签:18,self,2023,points,dict,time,pred,frame From: https://www.cnblogs.com/honghonghong1-scau/p/17413410.html