1 简介

最近在做livox与rgb相机的外参标定,网上看了很多开源方法大对数是基于ros做的,由于对ros不太熟悉,所以先基于python写一个初始版本,以下是全部代码,后面有有时间再整理,相机的内参是使用matlab工具箱标定的,大致思路是将标定板的点云数据通过左右、上下的坐标替换,深度值转换为灰度值,进而得到灰度图,对灰度图做传统图像处理,找到圆心,然后再逆转回在激光雷达坐标系上的坐标。

2 代码

import pandas as pdimport pclimport open3d as o3dimport numpy as npimport cv2 as cvimport warningsimport xmlfrom sympy import falsewarnings.filterwarnings("ignore")rgb_mtx = np.array([[164.9671, 0., 334.1256,0., 167.1601, 219.2284,0., 0., 1.]]).reshape((3, 3))rgb_dist = np.array([-0.0844, 0.0065, -2.2149e-04, -1.1157e-04, 2.8005e-04]).reshape((1, 5))# rgb_mtx = np.array([[120, 0., 640,#0., 120, 360,#0., 0., 1.]]).reshape((3, 3))# rgb_dist = np.zeros((5, 1), dtype=np.float64) def find_lidar_blobs(input_img, show=False):input_img = 255 - input_imgparams = cv.SimpleBlobDetector_Params()params.minThreshold = 5# params.maxThreshold = 5params.blobColor = 0# Filter by Area.params.filterByArea = Trueparams.minArea = 400params.maxArea = 21000# Filter by Circularityparams.filterByCircularity = Trueparams.minCircularity = 0.1# Filter by Convexityparams.filterByConvexity = Trueparams.minConvexity = 0.87# Filter by Inertiaparams.filterByInertia = Trueparams.minInertiaRatio = 0.1detector = cv.SimpleBlobDetector_create(params)# keypoints是一个列表,其中的每个元素都是一个cv2.KeyPoint# KeyPoint包含两个属性 圆的直径以及圆心的位置keypoints = detector.detect(input_img)# keypoints = [kp for kp in keypoints if 72 <= kp.size <= 88]img_with_keypoints = cv.drawKeypoints(input_img, keypoints, np.array([]), (0,255,0), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)# size_list = [kp.size for kp in keypoints]if show:cv.namedWindow("Keypoints", 0)cv.imshow('Keypoints', img_with_keypoints)cv.waitKey(0)cv.destroyAllWindows()return keypointsdef filter_raw_pcl(data_path, color_change):ori_pcl_data = pd.read_csv(data_path, low_memory=False)x_condition = (ori_pcl_data['X'] > 0.6) & (ori_pcl_data['X'] <2)#纵向y_condition = (ori_pcl_data['Y'] > -0.6) & (ori_pcl_data['Y'] < 0.5) #横向z_condition = (ori_pcl_data['Z'] > 0.3) & (ori_pcl_data['Z'] < 3) #高度ref_condition =(ori_pcl_data['Reflectivity'] > 0) & (ori_pcl_data['Reflectivity'] < 15)# ref_condition =(ori_pcl_data['Reflectivity'] > 10)filtered_data = ori_pcl_data[x_condition & y_condition & z_condition & ref_condition]# filtered_data = ori_pcl_data[x_condition & y_condition & z_condition]if color_change:min = filtered_data['Reflectivity'].min()max = filtered_data['Reflectivity'].max()filtered_data['reflectance_normalized'] = (filtered_data['Reflectivity'] - min) / (max - min)return filtered_datadef show_pcl(data):point_cloud = o3d.geometry.PointCloud()# 根据数据类型显示if isinstance(data, pd.DataFrame):point_cloud.points = o3d.utility.Vector3dVector(data[['X', 'Y', 'Z']].values)else:point_cloud.points = o3d.utility.Vector3dVector(data)o3d.visualization.draw_geometries([point_cloud])def sac_plane(valid_data_df):valid_data = valid_data_df[['X', 'Y', 'Z']].values.astype('float32')cloud = pcl.PointCloud(valid_data)seg = cloud.make_segmenter()seg.set_optimize_coefficients(True)seg.set_model_type(pcl.SACMODEL_PLANE)seg.set_method_type(pcl.SAC_RANSAC)seg.set_distance_threshold(0.01)inliners, coefficients = seg.segment()guess = np.expand_dims(np.array(coefficients), axis=1)res = np.dot(valid_data, guess[:3, :]) + guess[3, 0]plane_cloud = cloud.extract(inliners, negative=False)plane_cloud_arr = plane_cloud.to_array()return plane_cloud_arr, coefficientsdef projecto2D(filter_data):projected_points = filter_data*1000x_min, x_max = projected_points[:, 0].min(), projected_points[:, 0].max()z_min, z_max = projected_points[:, 2].min(), projected_points[:, 2].max()y_min, y_max = projected_points[:, 1].min(), projected_points[:, 1].max()x_range = x_max -x_min# 创建RGB图像image_width = int(y_max - y_min)image_height = int(z_max - z_min)# TODO 当前是用int8保存深度值 后面使用float16格式rgb_image = np.zeros((image_height, image_width, 1), dtype=np.uint8)# 映射颜色到RGB图像for point in projected_points:y, z = int(y_max -point[1]), int(z_max - point[2])if 0 <= y < image_width and 0 <= z < image_height:rgb_image[z, y] = int(255*(point[0]- x_min)/x_range)cv.imwrite("demo.jpg", rgb_image)return[x_min, x_range, y_max, z_max], rgb_imagedef cal_rgbd(img_path, xyz, show=False):if isinstance(img_path, str):ori_img = cv.imread(img_path, cv.IMREAD_UNCHANGED)else:ori_img = img_pathimg_show = cv.cvtColor(ori_img, cv.COLOR_GRAY2BGR)# 图片二值化ret, smoothed_img = cv.threshold(ori_img, 15, 255, cv.THRESH_BINARY)kernel = np.ones((5,5),np.uint8)smoothed_img = cv.dilate(smoothed_img,kernel,iterations = 1)if show:cv.namedWindow('threshold', 0)cv.imshow("threshold", smoothed_img)key = 0while True:key = cv.waitKey()if key == 27:break# 高斯滤波smoothed_img = cv.GaussianBlur(smoothed_img, (5, 5), 0)keypoints = find_lidar_blobs(smoothed_img, show= show)gridcell_list= []for keypoint in keypoints:x, z = int(keypoint.pt[0]), int(keypoint.pt[1])valid_point_num = 0value_sum= 0for i in range(10):for j in range(10):# 加入距离筛选# TODO 现在深度值有问题 深度值从左到右递减if(ori_img[z+j, x+i]) > 30:valid_point_num += 1value_sum += ori_img[z+j, x+i]if valid_point_num == 0:continuemean_value = value_sum / valid_point_num# 还原为之前的深度值 mean_value_trans = mean_value*xyz[1]/255 + xyz[0]# print("mean_value_trans", mean_value_trans)if isinstance(mean_value_trans, np.ndarray):mean_value_trans = mean_value_trans.item()# return[x_min, x_range, y_max, z_max], rgb_imagex_trans= xyz[2] - xz_trans = xyz[3] -zif z_trans > 400: #在纵向上加入高度筛选gridcell_list.append([mean_value_trans,x_trans,z_trans])cv.circle(img_show,(x, z), 1, (0, 0, 255))if show:cv.namedWindow("img_show", 0)cv.imshow('img_show', img_show)key = 0while True:key = cv.waitKey()if key == 27:breakreturn np.array(gridcell_list, dtype=np.float64)def add_world_point(circle_dist=8,boardh=12, boardw=4):world_points = []for i in range(boardh):for j in range(boardw):if i%2 == 0:x = circle_dist * (2 * j)else:x = circle_dist * (2 * j + 1)y = circle_dist * iworld_points.append([x, y, 0])return np.array(world_points, dtype=np.float32)def find_rgb_blobs(input_img, show=False):params = cv.SimpleBlobDetector_Params()params.minThreshold = 5# params.maxThreshold = 5# params.blobColor = 0# Filter by Area.params.filterByArea = Trueparams.minArea = 20params.maxArea = 500# Filter by Circularityparams.filterByCircularity = Trueparams.minCircularity = 0.1# Filter by Convexityparams.filterByConvexity = Trueparams.minConvexity = 0.87# Filter by Inertiaparams.filterByInertia = Trueparams.minInertiaRatio = 0.1detector = cv.SimpleBlobDetector_create(params)keypoints = detector.detect(input_img)img_with_keypoints = cv.drawKeypoints(input_img, keypoints, np.array([]), (0,255,0), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)# size_list = [kp.size for kp in keypoints]if show:cv.namedWindow("Keypoints", 0)cv.imshow('Keypoints', img_with_keypoints)cv.waitKey(0)cv.destroyAllWindows()return keypointsdef cal_rgb(rgb_img_path, show=False):rgb_cell_list = []rgb_img = cv.imread(rgb_img_path)gray = cv.cvtColor(rgb_img,cv.COLOR_BGR2GRAY)rgb_img[0:100, :] = 0rgb_img[:, 0:250] = 0rgb_img[250:480, :] = 0rgb_img[:, 400:] = 0keypoints = find_rgb_blobs(rgb_img, show=show)for keypoint in keypoints:x, y = int(keypoint.pt[0]), int(keypoint.pt[1])rgb_cell_list.append([x, y])rgb_cell_list= np.array(rgb_cell_list)ori = np.copy(rgb_cell_list)# 当前的rgb已经严格按照从上到下,从左到右排列ori[:, 1] = np.round(ori[:, 1] / 8) * 8indices = np.lexsort((ori[:, 0], ori[:, 1]))rgb_cell_list = rgb_cell_list[indices]# word_points = add_world_point()# if show:# i = 0# for point in rgb_cell_list:# cv.putText(rgb_img, "{}".format(i), (point[0], point[1]), cv.FONT_HERSHEY_TRIPLEX,thickness = 1,fontScale= 0.5,color=(0,255,0))# i = i + 1# cv.namedWindow("img_show_whole", 0)# cv.imshow('img_show_whole', rgb_img)# key = 0# while True:# key = cv.waitKey()# if key == 27:# break# rgb_cell_list = np.array(rgb_cell_list, dtype=np.float32).reshape(1, -1, 2)# word_points = np.array(word_points).reshape(1, -1, 3)# ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(word_points, rgb_cell_list, gray.shape[::-1], None, None)# if ret:# print(mtx, dist)rgb_cell_list = rgb_cell_list.tolist()[:12]return np.array(rgb_cell_list, dtype=np.float64)def getRT(world_points,gridcell_list, mtx, dist):_, rvec, tvec = cv.solvePnP(world_points, gridcell_list, mtx, dist,cv.SOLVEPNP_ITERATIVE)rotation_m, _ = cv.Rodrigues(rvec)# 罗德里格斯变换RT = np.transpose(rotation_m)shouldBeIdentity = np.dot(RT, rotation_m)I = np.identity(3, dtype=rotation_m.dtype)n = np.linalg.norm(I - shouldBeIdentity)assert (n < 1e-6)return rotation_m, tvecdef main():show = Truedata_path = '~/cxx_project/lidar_rgb_calib/data/0221/2024-02-21_18-06-03.Csv'filter_data = filter_raw_pcl(data_path, False)# show_pcl(filter_data)calib_board, conf = sac_plane(filter_data)xyz, depth_img = projecto2D(calib_board)depth_cell_list = cal_rgbd(depth_img, xyz, show=show)depth_ori = np.copy(depth_cell_list)# # 当前的rgb已经严格按照从上到下,从左到右排列depth_ori[:, 2] = np.round(depth_ori[:, 2] / 60) * 60indices = np.lexsort((-depth_ori[:, 1], -depth_ori[:, 2]))depth_cell_list = depth_cell_list[indices]/1000# depth_cell_list = depth_cell_list[:, [0, 2, 1]]print("depth_cell_list:", depth_cell_list)rgb_img_path = '~/cxx_project/lidar_rgb_calib/data/0221/BIAODINGBAN/undistort_img/52.jpg'img = cv.imread(rgb_img_path)rgb_cell_list = cal_rgb(rgb_img_path, show=show)print("rgb_cell_list:", rgb_cell_list)R, T= getRT(depth_cell_list, rgb_cell_list, rgb_mtx, rgb_dist)print("R:{}, T{}".format(R, T))image_points, _ = cv.projectPoints(calib_board, R, T, rgb_mtx, rgb_dist)for point in image_points:x, y = point[0]cv.circle(img, (int(x), int(y)), radius=1, color=(0, 255, 0), thickness=-1)# if show:cv.namedWindow("res", 0)cv.imshow('res', img)cv.waitKey(0)# cv.destroyAllWindows()if __name__ == "__main__":main()