419 lines
17 KiB
Python
419 lines
17 KiB
Python
"""
|
||
本模块包含了坐标系转换相关的功能类和函数,主要实现以下功能:
|
||
1. 像素坐标系到机械臂基座坐标系的转换
|
||
2. 相机坐标系到机械臂末端坐标系的转换
|
||
3. 机械臂末端坐标系到机械臂基座坐标系的转换
|
||
4. 坐标系转换过程中的深度处理和验证
|
||
"""
|
||
|
||
import copy
|
||
import cv2
|
||
import numpy as np
|
||
import math
|
||
import yaml
|
||
from pathlib import Path
|
||
from scipy.spatial.transform import Rotation as R
|
||
from scipy.interpolate import CubicSpline
|
||
|
||
from tools.log import CustomLogger
|
||
|
||
try:
|
||
from .vector import SimpleNormalVector
|
||
except:
|
||
from vector import SimpleNormalVector
|
||
|
||
class CoordinateTransformer:
|
||
def __init__(self, intrinsics, color_image, depth_image, position, quaternion, vtxdb=None):
|
||
"""
|
||
初始化坐标转换器
|
||
|
||
Args:
|
||
intrinsics: 相机内参
|
||
color_image: 彩色图像
|
||
depth_image: 深度图像
|
||
position: 机械臂位置
|
||
quaternion: 机械臂姿态四元数
|
||
"""
|
||
# 基础参数初始化
|
||
self.logger = CustomLogger(propagate=True)
|
||
self.intrinsics = intrinsics
|
||
self.color_image = color_image
|
||
self.depth_image = depth_image
|
||
self.color_visualize = None
|
||
self.position = position
|
||
self.quaternion = quaternion
|
||
self.insert_queue = False
|
||
|
||
# 坐标范围初始化
|
||
self.x_range = (-0.3, 0.5)
|
||
self.y_range = (-0.5, 1.7)
|
||
self.z_range = (0.65, 1.0)
|
||
|
||
# 相机坐标系手动偏置初始化
|
||
self.camera_offset_x = 0 # 毫米
|
||
self.camera_offset_y = 0 # 毫米
|
||
self.camera_offset_z = 0 # 毫米
|
||
|
||
self.vtxdb = vtxdb
|
||
|
||
# 初始化相机参数
|
||
self._init_camera_params()
|
||
|
||
# 初始化PoseVector
|
||
self._init_pose_vector()
|
||
self.camera_matrix = None
|
||
self.camera_trans = None
|
||
|
||
self.camera_matrix = np.array(self.vtxdb.get("robot_config","camera.camera_matrix"))
|
||
self.camera_trans = np.array(self.vtxdb.get("robot_config","camera.camera_trans"))
|
||
|
||
camera_offset = self.vtxdb.get("robot_config","camera.camera_offset")
|
||
if camera_offset:
|
||
self.set_camera_offset(camera_offset[0],camera_offset[1],camera_offset[2])
|
||
|
||
def _init_camera_params(self):
|
||
"""初始化相机参数,从配置文件读取"""
|
||
file_path = Path('/home/jsfb/jsfb_ws/global_config/camera_config.yaml')
|
||
|
||
# 设置默认参数
|
||
default_params = {
|
||
'bias_x': 20,
|
||
'bias_y': 20,
|
||
'camera_Upper_limit': 55,
|
||
'camera_Lower_limit': 404
|
||
}
|
||
|
||
if not file_path.exists():
|
||
self._set_camera_params(default_params)
|
||
self.logger.log_warning("相机配置文件不存在,使用默认参数")
|
||
return
|
||
|
||
try:
|
||
with open(file_path, 'r') as file:
|
||
camera_config = yaml.safe_load(file)
|
||
if camera_config.get('camera') == 'new':
|
||
params = {
|
||
'bias_x': 20,
|
||
'bias_y': 0,
|
||
'camera_Upper_limit': 30,
|
||
'camera_Lower_limit': 375
|
||
}
|
||
self._set_camera_params(params)
|
||
self.logger.log_info("使用新相机参数配置")
|
||
elif camera_config.get('camera') == 'old':
|
||
self._set_camera_params(default_params)
|
||
self.logger.log_info("使用旧相机参数配置")
|
||
else:
|
||
self._set_camera_params(default_params)
|
||
self.logger.log_warning("未知相机类型,使用默认参数")
|
||
except yaml.YAMLError as e:
|
||
self._set_camera_params(default_params)
|
||
self.logger.log_error(f"读取相机配置文件出错: {e}")
|
||
|
||
def _set_camera_params(self, params):
|
||
"""设置相机参数"""
|
||
self.bias_x = params['bias_x']
|
||
self.bias_y = params['bias_y']
|
||
self.camera_Upper_limit = params['camera_Upper_limit']
|
||
self.camera_Lower_limit = params['camera_Lower_limit']
|
||
|
||
self.logger.log_info(f"相机参数设置完成: bias_x={self.bias_x}, bias_y={self.bias_y}")
|
||
self.logger.log_info(f"相机限制设置完成: Upper={self.camera_Upper_limit}, Lower={self.camera_Lower_limit}")
|
||
|
||
def _init_pose_vector(self):
|
||
# """初始化PoseVector"""
|
||
# # 保存深度图像为npy文件
|
||
# output_npy_path = 'aucpuncture2point/configs/using_img/result_depth.npy'
|
||
# np.save(output_npy_path, self.depth_image)
|
||
|
||
# self.vector = PoseVector(
|
||
# output_npy_path,
|
||
# self.intrinsics,
|
||
# self.x_range,
|
||
# self.y_range,
|
||
# self.z_range,
|
||
# 'aucpuncture2point/configs/using_img/color.png',
|
||
# is_human_mask=False
|
||
# )
|
||
self.vector = SimpleNormalVector('aucpuncture2point/configs/using_img/depth.png', 'aucpuncture2point/configs/using_img/color.png', self.intrinsics)
|
||
|
||
def pixel_to_robotarm(self, target_pixel_points_list, number):
|
||
"""
|
||
将像素坐标转换为机械臂基座坐标系坐标
|
||
|
||
Args:
|
||
target_pixel_points_list: 目标像素点列表
|
||
number: 图像序号,用于保存可视化结果
|
||
|
||
Returns:
|
||
final_to_arm_point: 转换后的机械臂坐标点列表,如果验证失败返回None
|
||
"""
|
||
# 验证输入点的有效性
|
||
if not self._validate_points(target_pixel_points_list):
|
||
print("target pixel is not valid")
|
||
return None
|
||
|
||
# 处理每个像素点
|
||
points, points_fxl, processed_image = self._process_pixel_points(target_pixel_points_list)
|
||
if self.insert_queue == True:
|
||
return None
|
||
print(f"-------------{number}")
|
||
# 保存可视化结果
|
||
self._save_visualization(processed_image, number)
|
||
|
||
# 计算最终坐标
|
||
return self._calculate_final_coordinates(points, points_fxl)
|
||
|
||
def _validate_points(self, points):
|
||
"""验证输入点的有效性"""
|
||
if points[0][1] <= self.camera_Upper_limit:
|
||
self.logger.log_error(f"传入第一个点在y像素{self.camera_Upper_limit}以下,没有深度,提示用户躺下一点")
|
||
return False
|
||
|
||
# 限制y坐标范围
|
||
for point in points:
|
||
if point[1] > self.camera_Lower_limit:
|
||
point[1] = self.camera_Lower_limit
|
||
|
||
return True
|
||
|
||
# def _process_pixel_points(self, target_pixel_points_list):
|
||
# """处理像素点列表"""
|
||
# color_image = copy.deepcopy(self.color_image)
|
||
# depth_image = copy.deepcopy(self.depth_image)
|
||
# points = []
|
||
# points_fxl = []
|
||
# last_depth = 800
|
||
|
||
# for coordinate in target_pixel_points_list:
|
||
# x, y = int(round(coordinate[0])), int(round(coordinate[1]))
|
||
# depth = self._get_depth(x, y, depth_image, last_depth)
|
||
|
||
# # 在图像上绘制处理点
|
||
# cv2.circle(color_image, (x*2, y*2), 3, (51,102,255), 4)
|
||
|
||
# # 坐标转换
|
||
# camera_coordinate = self.deproject_pixel_to_point(self.intrinsics, (x, y), depth)
|
||
# arm_coordinate = self.calculate_arm_coords(camera_coordinate)
|
||
|
||
# points.append({"coordinates": arm_coordinate, "label": ''})
|
||
# points_fxl.append({"coordinates": camera_coordinate/1000.0, "label": ''})
|
||
|
||
# return points, points_fxl, color_image
|
||
def smooth_curve_with_spline(self,points):
|
||
"""使用三次样条插值平滑曲线."""
|
||
points = np.array(points)
|
||
x = points[:, 0]
|
||
y = points[:, 1]
|
||
|
||
t = np.linspace(0, 1, len(points))
|
||
cs_x = CubicSpline(t, x)
|
||
cs_y = CubicSpline(t, y)
|
||
|
||
t_dense = np.linspace(0, 1, 300)
|
||
x_smooth = cs_x(t_dense)
|
||
y_smooth = cs_y(t_dense)
|
||
|
||
smooth_points = np.array([x_smooth, y_smooth]).T.astype(np.int32)
|
||
return smooth_points
|
||
|
||
def _process_pixel_points(self, target_pixel_points_list):
|
||
"""处理像素点列表"""
|
||
color_image = copy.deepcopy(self.color_image)
|
||
depth_image = copy.deepcopy(self.depth_image)
|
||
if self.color_visualize is not None:
|
||
color_visualize = copy.deepcopy(self.color_visualize)
|
||
else:
|
||
color_visualize = color_image
|
||
points = []
|
||
points_fxl = []
|
||
last_depth = 800
|
||
|
||
# 获取原始图像和可视化图像的尺寸
|
||
h_orig, w_orig = color_image.shape[:2]
|
||
h_visual, w_visual = color_visualize.shape[:2] # 假设 color_visualize 是类属性
|
||
|
||
# 计算缩放比例
|
||
scale_x = w_visual / w_orig
|
||
scale_y = h_visual / h_orig
|
||
draw_points = []
|
||
for coordinate in target_pixel_points_list:
|
||
if self.insert_queue == True:
|
||
return None
|
||
x, y = int(round(coordinate[0])), int(round(coordinate[1]))
|
||
depth = self._get_depth(x, y, depth_image, last_depth)
|
||
|
||
# 在 color_visualize 上绘制点(调整缩放倍数)
|
||
x_scaled = int(round(x * scale_x))
|
||
y_scaled = int(round(y * scale_y))
|
||
if len(target_pixel_points_list) < 2: #只有一个点 直接绘制
|
||
cv2.circle(color_visualize, (x_scaled, y_scaled), 3, (47,255,173), 4) #BGR 173 255 47
|
||
else:
|
||
draw_points.append([x_scaled,y_scaled])
|
||
# print(f"draw points----{(x_scaled,y_scaled)}")
|
||
|
||
# 坐标转换
|
||
camera_coordinate = self.deproject_pixel_to_point(self.intrinsics, (x, y), depth)
|
||
|
||
offset_camera_coordinate = copy.deepcopy(camera_coordinate)
|
||
|
||
# 应用相机坐标系手动偏置
|
||
offset_camera_coordinate[0] += self.camera_offset_x # X轴偏置
|
||
offset_camera_coordinate[1] += self.camera_offset_y # Y轴偏置
|
||
offset_camera_coordinate[2] += self.camera_offset_z # Z轴偏置
|
||
|
||
arm_coordinate = self.calculate_arm_coords(offset_camera_coordinate)
|
||
|
||
points.append({"coordinates": arm_coordinate, "label": ''})
|
||
points_fxl.append({"coordinates": camera_coordinate/1000.0, "label": ''})
|
||
|
||
# 绘制样条插值曲线的图像 (保持不变)
|
||
if len(draw_points) > 0:
|
||
# image_curve_spline = np.zeros((h_visual, h_visual, 3), dtype=np.uint8)
|
||
smooth_points_spline = self.smooth_curve_with_spline(draw_points)
|
||
points_array_spline = smooth_points_spline.reshape((-1, 1, 2))
|
||
cv2.polylines(color_visualize, [points_array_spline], isClosed=False, color=(47, 255, 173), thickness=2, lineType=cv2.LINE_AA)
|
||
|
||
return points, points_fxl, color_visualize # 返回可视化后的图像
|
||
|
||
def _save_visualization(self, image, number):
|
||
"""保存可视化结果"""
|
||
# cv2.imwrite('aucpuncture2point/configs/using_img/Pixel_Visualization.jpg', image)
|
||
cv2.imwrite(f"aucpuncture2point/configs/using_img/Pixel_Visualization_{number}.jpg", image)
|
||
|
||
def _calculate_final_coordinates(self, points, points_fxl):
|
||
"""计算最终坐标"""
|
||
# 计算法向量
|
||
normals_data = self.vector.run(points_fxl, visualize=True)
|
||
|
||
# 合并结果
|
||
combined_results = []
|
||
for i, data in enumerate(normals_data):
|
||
normal_arm = self.calculate_arm_normal(data)
|
||
normals_position = points[i]["coordinates"] + normal_arm["transformed_normal"]
|
||
combined_results.append({
|
||
"label": points[i]["label"],
|
||
"normals_position": normals_position
|
||
})
|
||
|
||
# 计算最终的机械臂坐标
|
||
point = self.calculate_target_Euler(combined_results)
|
||
return [item['position'] for item in point]
|
||
|
||
def _get_depth(self, x, y, depth_image, last_depth):
|
||
"""获取深度值,处理无效深度的情况"""
|
||
if depth_image[y, x] != 0:
|
||
return depth_image[y, x]
|
||
|
||
non_zero_depths = depth_image[max(0, y-5):y+5, max(0, x-5):x+5]
|
||
non_zero_depths = non_zero_depths[non_zero_depths != 0]
|
||
return np.median(non_zero_depths) if non_zero_depths.size > 0 else last_depth
|
||
|
||
def deproject_pixel_to_point(self, intrinsics, pixel, depth):
|
||
"""将像素坐标和深度值转换为相机坐标系下的3D坐标"""
|
||
x, y = pixel
|
||
fx, fy = intrinsics['fx'], intrinsics['fy']
|
||
cx, cy = intrinsics['cx'], intrinsics['cy']
|
||
|
||
X = (x - cx) * depth / fx
|
||
Y = (y - cy) * depth / fy
|
||
Z = depth
|
||
|
||
return np.array([X, Y, Z])
|
||
|
||
# def calculate_arm_coords(self, camera_coordinate):
|
||
# """将相机坐标系转换为机械臂坐标系"""
|
||
# # 相机坐标系到机械臂末端坐标系
|
||
# arm_coordinate = [
|
||
# (camera_coordinate[1] - self.bias_y) / 1000, # x
|
||
# (camera_coordinate[0] - self.bias_x) / 1000, # y
|
||
# (-camera_coordinate[2] - 172) / 1000 # z
|
||
# ]
|
||
|
||
# # 机械臂末端坐标系到基座坐标系
|
||
# return (R.from_quat(self.quaternion).as_matrix() @ arm_coordinate + self.position).tolist()
|
||
|
||
def calculate_arm_coords(self, camera_coordinate):
|
||
"""
|
||
将相机坐标系转换为机械臂坐标系。
|
||
:param camera_coordinate: list, 相机坐标系中的坐标
|
||
:return: list, 机械臂坐标系中的坐标
|
||
"""
|
||
camera_coordinate = np.array(camera_coordinate)
|
||
# 将相机坐标系转换到机械臂末端坐标系
|
||
arm_coordinate = [0, 0, 0]
|
||
if self.camera_matrix is not None and self.camera_trans is not None:
|
||
camera_matrix = copy.deepcopy(self.camera_matrix)
|
||
camera_trans = copy.deepcopy(self.camera_trans)
|
||
else:
|
||
camera_matrix = np.array([[-0.04169144,0.99888175,-0.02229508],[0.99912694,0.04174075,0.00175093],[0.00267958,-0.02220262,-0.9997499]])
|
||
camera_trans = np.array([-0.00209053,-0.01021561,-0.16877656])
|
||
arm_coordinate = camera_matrix @ (camera_coordinate/1000) + camera_trans
|
||
# 将机械臂末端坐标系转为机械臂基座坐标系
|
||
targetPosition = R.from_quat(self.quaternion).as_matrix() @ arm_coordinate + self.position
|
||
return targetPosition.tolist()
|
||
|
||
def calculate_arm_normal(self, data):
|
||
"""将相机坐标系中的法向量转换为机械臂基座坐标系中的法向量"""
|
||
normal_vector = data['normal']
|
||
|
||
# 相机坐标系到机械臂末端坐标系
|
||
arm_normal = [normal_vector[1], normal_vector[0], -normal_vector[2]]
|
||
|
||
# 机械臂末端坐标系到基座坐标系
|
||
transformed_normal = R.from_quat(self.quaternion).as_matrix() @ arm_normal
|
||
|
||
return {
|
||
'label': data['label'],
|
||
'transformed_normal': transformed_normal.tolist()
|
||
}
|
||
|
||
def calculate_target_Euler(self, points):
|
||
"""计算目标欧拉角"""
|
||
converted_points = []
|
||
|
||
for point_data in points:
|
||
point = np.array(point_data['normals_position'], dtype=np.float64).reshape(1, -1)
|
||
temp_point = np.zeros((1, 6), dtype=np.float64)
|
||
|
||
# 复制位置坐标
|
||
temp_point[:, :3] = point[:, :3]
|
||
|
||
# 计算欧拉角
|
||
# temp_point[0][3] = -math.asin(point[0][4])
|
||
# temp_point[0][4] = -math.atan2(-point[0][3], point[0][5])
|
||
# temp_point[0][5] = 0.0
|
||
|
||
temp_point[0][3] = -math.asin(point[0][4])
|
||
temp_point[0][4] = math.atan2(point[0][3], point[0][5])
|
||
temp_point[0][5] = -np.pi/2
|
||
|
||
# 确保角度在[-pi, pi]范围内
|
||
if temp_point[0][5] > np.pi:
|
||
temp_point[0][5] -= 2*np.pi
|
||
|
||
converted_points.append({
|
||
'label': point_data['label'],
|
||
'position': temp_point[0].tolist()
|
||
})
|
||
|
||
return converted_points
|
||
|
||
def set_camera_offset(self, offset_x=0, offset_y=0, offset_z=0):
|
||
"""
|
||
设置相机坐标系下的手动偏置,限制在±50mm范围内
|
||
|
||
Args:
|
||
offset_x: X轴偏置(毫米),范围±50mm
|
||
offset_y: Y轴偏置(毫米),范围±50mm
|
||
offset_z: Z轴偏置(毫米),范围±50mm
|
||
"""
|
||
# 限制偏置值在±50mm范围内
|
||
offset_x = max(-50, min(50, offset_x))
|
||
offset_y = max(-50, min(50, offset_y))
|
||
offset_z = max(-50, min(50, offset_z))
|
||
|
||
self.camera_offset_x = offset_x
|
||
self.camera_offset_y = offset_y
|
||
self.camera_offset_z = offset_z
|
||
self.logger.log_info(f"设置相机坐标系偏置: X={offset_x}mm, Y={offset_y}mm, Z={offset_z}mm") |