MassageRobot_Dobot/Massage/MassageControl/tools/full_auto_visual_calibration.py
2025-05-26 17:10:13 +08:00

551 lines
22 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import cv2
import os
from datetime import datetime
import math
import numpy as np
import sys
import random
from pathlib import Path
import time
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
sys.path.append(str(Path(__file__).resolve().parent.parent))
sys.path.append('/home/jsfb/jsfb_ws/MassageRobot_aubo/Massage/MassageControl')
from hardware.remote_cam import ToolCamera
from hardware.aubo_C5 import AuboC5
np.set_printoptions(precision=8, suppress=True)
# 设置寻找亚像素角点的参数采用的停止准则是最大循环次数30和最大误差容限0.001
criteria = (cv2.TERM_CRITERIA_MAX_ITER | cv2.TERM_CRITERIA_EPS, 30, 0.001)
# 标定计算类
# -- 连接AuboC5
# -- 连接 ToolCamera
# -- 运动到初始位置 计算下一步运动位姿
# -- 开始手眼标定
# -- 输出结果
class Calibration:
def __init__(self, board_size, square_size, file_folder):
"""
初始化标定计算类。
board_size: 标定板的格子数,格式为 (width, height)
square_size: 格子的大小,单位为米
"""
self.board_size = board_size
self.square_size = square_size
self.objp = self._create_object_points()
self.image_points = []
self.robot_poses = []
self.poselist = []
self.cam = ToolCamera(host='127.0.0.1')
self.cam.start()
self.file_address = file_folder
rgb_image, depth_image, camera_intrinsics = self.cam.get_latest_frame()
cv2.imshow("rgb1",rgb_image)
cv2.waitKey(0)
if rgb_image is None :
print(f'===============相机连接失败请检查并重试==============')
else:
print(f'===============相机连接成功==============')
self.size = rgb_image.shape[:2]
self.intrinsics = camera_intrinsics
self.arm = AuboC5()
self.arm.init()
self.obj_points = []
print(f'===============机械臂连接成功==============')
def _create_object_points(self):
"""创建标定板的3D点"""
objp = np.zeros((self.board_size[0] * self.board_size[1], 3), np.float32)
objp[:, :2] = np.mgrid[0:self.board_size[0], 0:self.board_size[1]].T.reshape(-1, 2) * self.square_size
return objp
def collect_data(self):
#移动到初始位置
# move = [(90) * (np.pi / 180), 4.23 * (np.pi / 180), 124.33 * (np.pi / 180), 61.56 * (np.pi / 180), -89.61 * (np.pi / 180), 0 * (np.pi / 180)]
move = [(89.74) * (np.pi / 180), 11.21 * (np.pi / 180), 97.12 * (np.pi / 180), 29.06 * (np.pi / 180), -89.61 * (np.pi / 180), 0 * (np.pi / 180)]
code = self.arm.move_joint(move,4,wait=True)
if code == -1:
print("运动到拍照位置失败")
# time.sleep(3)
#拍照
rgb, depth, intrinsics = self.cam.get_latest_frame()
cv2.imshow("rgb2",rgb)
cv2.waitKey(0)
if self.check_corners_add_sample() is False:
print("初始位置下没有找到标定板")
rgb, depth, intrinsics = self.cam.get_latest_frame()
pose = self.arm.robot_rpc_client.getRobotInterface(self.arm.robot_name).getRobotState().getTcpPose()
cv2.imshow("not right",rgb)
cv2.waitKey(0)
rgb, depth, intrinsics = self.cam.get_latest_frame()
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.board_size, None)
robot_pose = self.get_pose_homomat()
print(f"EE pose:{robot_pose}")
center = self.init_chessboard_center(rgb)
print(f"current center EE: {center}")
''' Z轴抖动 '''
# EE坐标系下的点
trajectory, rpy_values = self.generate_cyclic_trajectory_above_center(center)
self.visualization(center,trajectory,rpy_values)
homo_trajectory = np.hstack((trajectory, np.ones((len(trajectory), 1))))
tcp_pose = self.get_pose_homomat()
# 把生成的trajectory 移动到 世界坐标下
trajectory = (tcp_pose @ homo_trajectory.T).T[:, :3]
rpy_angles = []
for tool_point in trajectory:
vector_center_to_tool = tool_point - center
roll, pitch, yaw = self.get_roll_pitch_yaw(vector_center_to_tool)
rpy_angles.append((roll, pitch, yaw))
rpy_angles = np.array(rpy_angles)
pose = self.concatenate_path_and_rpy(trajectory,rpy_angles )
#世界坐标系下的中心
center_homo = np.append(center, 1).reshape(4, 1)
tcp_pose = self.get_pose_homomat()
center_world_homo = tcp_pose @ center_homo # 4x4 *4x1 =4x1
center_world = center_world_homo[:3].flatten()
print(f"world center: {center_world} \n top pose:\n {pose[:5]}")
lenOfResult = self.traversal_position_direction(pose)
''' Z轴抖动 '''
assert lenOfResult > 20, "没有收集到足够多的数据来完成精确的标定"
def generate_cyclic_trajectory_above_center(self,center, num_steps=12, x_amplitude=0.12, z_amplitude=0.20, frequency=4, min_z_offset=0.60,step_z=0.01):
"""
生成所有轨迹点都在中心点上方的周期性波动轨迹。
"""
trajectory = []
rpy_values = []
y_constant = center[1]
x_values = np.linspace(-x_amplitude, x_amplitude, num_steps)
y_values = [center[1]+0.05,center[1]+0.15]
# y_values = [center[1]+0.05,center[1]+0.15,center[1]+0.30]
# y_values = [center[1]+0.15,center[1]+0.03,center[1]+0.06,center[1]+0.10,center[1]+0.15]
# y_values = [center[1],center[1],center[1]]
for i in range(len(y_values)):
y = y_values[i]
for x in x_values:
# 使用绝对值正弦函数确保 Z 始终在 center[2] 之上
z = z_amplitude * np.abs(np.sin(frequency * x * np.pi)) + center[2] + min_z_offset + step_z*i
tool_point = np.array([x + center[0], y, z])
trajectory.append(tool_point)
# 计算从中心点射向轨迹点的向量
vector_center_to_tool = tool_point - center
roll, pitch, yaw = self.get_roll_pitch_yaw(vector_center_to_tool)
rpy_values.append((roll, pitch, yaw))
print(f"above part center:{center}\n {trajectory[:5]}")
return np.array(trajectory), np.array(rpy_values)
def get_roll_pitch_yaw(self,vector):
"""
计算从原点指向给定向量的 roll, pitch, yaw (弧度).
假设 Z 轴向前Y 轴向上X 轴向右。
"""
x, y, z = vector
roll = math.atan2(y, z)
pitch = math.atan2(-x, math.sqrt(y**2 + z**2))
yaw = 0 # 假设指向 tool 的方向没有偏航,或者根据实际需求调整
return roll, pitch, yaw
def concatenate_path_and_rpy(self,path, rpy_angles):
"""
Concatenates a list of positions [x, y, z] with a corresponding list of RPY angles [roll, pitch, yaw].
Args:
path (list of lists): A list where each inner list contains [x, y, z].
rpy_angles (list of lists or tuples): A list where each inner list or tuple contains [roll, pitch, yaw].
Returns:
list of lists: A new list where each inner list is a concatenation of the
corresponding position and RPY angles: [x, y, z, roll, pitch, yaw].
"""
if len(path) != len(rpy_angles):
raise ValueError("The lengths of the path and rpy_angles lists must be the same.")
combined_list = []
for i in range(len(path)):
position = path[i]
orientation = rpy_angles[i]
# combined = np.concatenate((position,orientation))
combined = np.hstack((position.flatten(), orientation))
combined_list.append(combined)
print(f"combined xyz|rpy :{combined}")
return combined_list
def check_corners_add_sample(self):
rgb, depth, intrinsics = self.cam.get_latest_frame()
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, self.board_size)
pic_name = self.file_address + "/"+str(len(self.image_points)) + ".jpg"
if ret:
robot_pose = self.get_pose_homomat()
self.add_sample(gray,corners,robot_pose)
print(f"save path as: {pic_name}")
# 绘制检测到的角点 (用于可视化,可选)
# cv2.drawChessboardCorners(rgb, self.board_size, corners, ret)
cv2.imwrite(pic_name, rgb)
return True
cv2.imwrite(pic_name, rgb)
return False
def add_sample(self, gray, corners,robot_pose):
"""
添加标定样本。
image: 捕获的图像
robot_pose: 机械臂的位姿,格式为 [x, y, z, roll, pitch, yaw]
"""
# 检测棋盘角点
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) # 在原角点的基础上寻找亚像素角点
# if self._validate_corners(corners2):
if [corners2]:
self.obj_points.append(self.objp)
self.image_points.append(corners2)
self.robot_poses.append(robot_pose)
print(f"insert {robot_pose}")
def get_pose_homomat(self):
pose = self.arm.robot_rpc_client.getRobotInterface(self.arm.robot_name).getRobotState().getTcpPose()
x, y, z, rx, ry, rz = pose
r = R.from_euler('xyz', [rx, ry, rz])
rotation_matrix = r.as_matrix()
translation_vector = np.array([x, y, z]).reshape(3, 1)
homo_mat = np.eye(4) # 创建 4x4 单位矩阵
homo_mat[:3, :3] = rotation_matrix
homo_mat[:3, 3] = translation_vector.flatten()
return homo_mat
def camera_calib(self):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(self.obj_points, self.image_points, (self.size[1],self.size[0]) , None, None)
if ret:
print("内参矩阵:\n", mtx) # 内参数矩阵
print("畸变系数:\n", dist) # 畸变系数 distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
print("++++++++++相机标定完成++++++++++++++")
else:
print("-----------相机标定失败--------------")
return rvecs, tvecs
def split_robot_pose(self):
"""
将包含 4x4 齐次矩阵的 robot_pose 列表拆分成 R_arm 和 t_arm。
Args:
robot_pose: 包含 4x4 齐次矩阵的 Python list。
Returns:
R_arm: 旋转矩阵的 Python list, 每个元素是 3x3 numpy 数组。
t_arm: 平移向量的 Python list, 每个元素是 3x1 numpy 数组。
"""
R_arm = []
t_arm = []
for pose_matrix in self.robot_poses:
# 1. 提取旋转矩阵 (3x3)
rotation_matrix = pose_matrix[:3, :3]
R_arm.append(rotation_matrix)
# 2. 提取平移向量 (3x1)
translation_vector = pose_matrix[:3, 3] # 获取齐次矩阵的平移部分 (1x3 numpy 数组)
translation_vector = translation_vector.reshape(3, 1) # 转换为 3x1 的列向量
t_arm.append(translation_vector)
return R_arm, t_arm
def calibrate(self):
rvecs, tvecs = self.camera_calib()
r_arm, t_arm= self.split_robot_pose()
rvecs_array = np.array(rvecs)
tvecs_array = np.array(tvecs)
t_arma = np.array(t_arm)
r_arma = np.array(r_arm)
print(rvecs_array.shape)
print(tvecs_array.shape)
print(t_arma.shape)
print(r_arma.shape)
R, t = cv2.calibrateHandEye(r_arma, t_arma, rvecs_array, tvecs_array, cv2.CALIB_HAND_EYE_HORAUD)
print("+++++++++++手眼标定完成+++++++++++++++")
return R, t
def init_chessboard_center(self,image):
'''
返回的是EE坐标下的中点
'''
intrinsics = self.intrinsics
# 检测标定板获取初始位置
ret, corners = cv2.findChessboardCorners(image, self.board_size)
if not ret: raise ValueError("标定板未检测到")
# 计算标定板中心在相机坐标系中的位置
obj_pts = np.array([[i*self.square_size, j*self.square_size, 0] for i in range(self.board_size[0]) for j in range(self.board_size[1])], dtype=np.float32)
print(intrinsics)
camera_matrix = np.array([
[intrinsics['fx'], 0, intrinsics['cx']],
[0, intrinsics['fy'], intrinsics['cy']],
[0, 0, 1]
], dtype=np.float32)
dist_coeffs = np.array([
intrinsics['distortion_coeffs']['k1'],
intrinsics['distortion_coeffs']['k2'],
intrinsics['distortion_coeffs']['p1'],
intrinsics['distortion_coeffs']['p2'],
intrinsics['distortion_coeffs']['k3']
], dtype=np.float32) # 同样确保数据类型
print("obj_pts shape:", obj_pts.shape)
print("corners shape:", corners.shape)
ret, rvec, tvec = cv2.solvePnP(obj_pts, corners, camera_matrix, dist_coeffs)
# 这里我们假设标定板的初始位置是原点,没有旋转和平移
rvec_initial = np.zeros((3, 1), dtype=np.float32) # 初始旋转向量
tvec_initial = np.zeros((3, 1), dtype=np.float32) # 初始平移向量
# 初始变换(标定板到初始坐标系)
initial_transform = np.eye(4)
initial_transform[:3, :3], _ = cv2.Rodrigues(rvec_initial)
initial_transform[:3, 3] = tvec_initial.flatten()
print(f"board itselt {initial_transform}")
# PnP 结果(初始坐标系到相机)
cam2initial = np.eye(4)
cam2initial[:3, :3], _ = cv2.Rodrigues(rvec)
cam2initial[:3, 3] = tvec.flatten()
# 合成最终变换:标定板 -> 初始坐标系 -> 相机
board2cam = initial_transform @ cam2initial
print(f"board2cam\n{board2cam}")
# 手眼标定矩阵的逆相机到TCP的固定变换
cam2tool = np.eye(4)
cam2tool[:3, :3] = np.array([[0.0, 1.0 , 0.0],
[1.0 , 0.0 , 0.0],
[0.0, 0.0 , -1.0]])
cam2tool[:3, 3] = np.array([0.00209053, 0.01021561, -0.16877656]).flatten()
###找到棋盘格中心
camera_center = None
chessboard_center_object = self.get_chessboard_center_object_points()
if chessboard_center_object is not None:
# Convert to homogeneous coordinates
chessboard_center_homogeneous = np.append(chessboard_center_object, 1)
# Transform the center to camera coordinates
camera_center_homogeneous = cam2tool@ board2cam @ chessboard_center_homogeneous
camera_center = camera_center_homogeneous[:3]
return camera_center
def get_chessboard_center_object_points(self):
rows, cols = self.board_size
obj_pts = self.objp
if obj_pts is None or len(obj_pts) != rows * cols:
return None
# Get the range of x and y indices
x_indices = obj_pts[:, 0].flatten()
y_indices = obj_pts[:, 1].flatten()
center_x_index = (np.max(x_indices) + np.min(x_indices)) / 2.0
center_y_index = (np.max(y_indices) + np.min(y_indices)) / 2.0
center_z = 0 # Assuming the chessboard is on the Z=0 plane in its frame
return np.array([center_x_index, center_y_index, center_z])
def generate_perturbed_joint_angles_rad(self,original_angle):
"""
Args:
original_angle (list or tuple): 包含6个角度值的列表或元组 (单位:度).
Returns:
numpy.ndarray: 一个形状为 (6, 6) 的 numpy 数组包含6组6个角度值单位弧度
每组数据的含义如下:
- 第1组: 原始角度(转换为弧度)。
- 第2组: 第五个关节角度 + rand5转换为弧度
- 第3组: 第五个关节角度 - rand5转换为弧度
- 第4组: 第四个关节角度 + rand4转换为弧度
- 第5组: 第四个关节角度 - rand4转换为弧度
- 第6组: 第四个关节角度 + rand4第五个关节角度 + rand5转换为弧度
"""
original_angle = np.array(original_angle)
if original_angle.shape != (6,):
raise ValueError("输入的角度列表必须包含6个元素。")
original_angle[5] = 0.0
# 生成随机数
rand5 = random.uniform(-20*np.pi/180, 20*np.pi/180) # 第五个关节的随机数,范围可以调整
rand4 = random.uniform(-10*np.pi/180, 10*np.pi/180) # 第四个关节的随机数,范围可以调整
angle_array = []
# 1. 原始角度
angle_array.append(original_angle.copy())
# 4. += rand4
angle_c = original_angle.copy()
angle_c[3] -= rand4
angle_c[4] -= rand5
angle_array.append(angle_c)
# 5. -= rand4
angle_d = original_angle.copy()
angle_d[3] += rand4
angle_d[4] += rand5
angle_array.append(angle_d)
# 6. +=rand4-= rand5
angle_e = original_angle.copy()
angle_e[3] += rand4
angle_e[4] -= rand5
angle_array.append(angle_e)
# 6. -= rand4第五个关节 += rand5
angle_f = original_angle.copy()
angle_f[3] -= rand4
angle_f[4] += rand5
angle_array.append(angle_f)
# 6. -= rand4第五个关节 += rand5
angle_g = original_angle.copy()
angle_g[3] -= rand4/2
angle_g[4] += rand5/2
angle_array.append(angle_g)
angle_h = original_angle.copy()
angle_h[3] += rand4/2
angle_h[4] -= rand5/2
angle_array.append(angle_h)
# 将所有角度从度转换为弧度
# modified_angles_rad = np.array([np.deg2rad(angles) for angles in angle_array])
# import pdb
# pdb.set_trace()
return angle_array
def traversal_position_direction(self,path):
for pose in path:
current_pose = self.arm.robot_rpc_client.getRobotInterface(self.arm.robot_name).getRobotState().getTcpPose()
print(f" elipse path is {pose}\n current pose is {current_pose}" )
code = self.arm.move_line( pose )
if code == -1:
print(f"运动到拍照位置失败:{pose}")
else:
joint = self.arm.robot_rpc_client.getRobotInterface(self.arm.robot_name).getRobotState().getJointPositions()
angle_list = self.generate_perturbed_joint_angles_rad(joint)
print(f"\n {angle_list}\n Angles Position ")
for joint in angle_list:
print(f"current joint: {joint}")
code = self.arm.move_joint(joint,4,wait=True)
if code != -1:
print(f"move success")
self.check_corners_add_sample()
return len(self.robot_poses)
def visualization(self,center,trajectory,rpy_values):
# 可视化
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
# 绘制中心点
ax.scatter(*center, color='r', marker='o', s=100, label='Center Point')
# 绘制轨迹
ax.plot(trajectory[:, 0], trajectory[:, 1], trajectory[:, 2], color='b', label='Tool Trajectory')
# 可视化 RPY 值 (每隔几个点显示一个)
num_samples_rpy = 20
indices = np.linspace(0, len(rpy_values) - 1, num_samples_rpy, dtype=int)
for i in indices:
tool_point = trajectory[i]
roll, pitch, yaw = rpy_values[i]
vector = tool_point - center
# 将箭头的起始点移动到 tool_point
ax.quiver(tool_point[0], tool_point[1], tool_point[2],
vector[0], vector[1], vector[2],
length=0.2, color='g', alpha=0.7, label='Vector Center to Tool' if i == indices[0] else "")
# 将文本标签的位置移动到 tool_point 附近
ax.text(tool_point[0] + 0.05, tool_point[1] + 0.05, tool_point[2] + 0.05,
f'RPY:({math.degrees(roll):.2f},{math.degrees(pitch):.2f},{math.degrees(yaw):.2f})',
color='g', fontsize=8)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('Cyclic Trajectory Above Center Point')
ax.legend()
ax.view_init(azim=-120, elev=30)
plt.grid(True)
plt.show()
# 打印 RPY 值 (示例)
# print("\nRoll, Pitch, Yaw values (degrees) along the trajectory:")
# for i, (r, p, y) in enumerate(rpy_values):
# if i % 10 == 0: # Print every 10th value
# print(f"Step {i}: Roll: {math.degrees(r):.2f}, Pitch: {math.degrees(p):.2f}, Yaw: {math.degrees(y):.2f}")
if __name__ == '__main__':
# 标定板的width 对应的角点的个数 6
# 标定板的height 对应的角点的个数 3
calibration_board_size = [9,6]
calibration_board_square_size = 0.020 #unit - meter
systemPath = "/home/jsfb/Documents/"
now = datetime.now()
date_time_str = now.strftime("%d_%H_%M")
folder_name = f"calibration_data_{date_time_str}"
systemPath+=folder_name
os.makedirs(systemPath, exist_ok=True)
calib = Calibration(calibration_board_size,calibration_board_square_size,systemPath)
calib.collect_data()
r, t = calib.calibrate()
print("旋转矩阵:")
print(r)
print("平移向量:")
print(t)
# arm = AuboC5()
# arm.init()
# aubo_algorithm = arm.robot_rpc_client.getRobotInterface(arm.robot_name).getRobotAlgorithm().inverseKinematics()
# # 查看 rpc_client 对象的所有属性和方法
# print(dir(aubo_algorithm))