import requests import base64 import cv2 import open3d as o3d import numpy as np import paramiko import time def display_rgb_point_cloud(rgb_image, depth_image, camera_intrinsics): """ 显示RGB点云,将RGB图像和深度图像转换为点云并显示。 参数: rgb_image (np.ndarray): RGB图像。 depth_image (np.ndarray): 深度图像。 camera_intrinsics (dict): 相机的内参字典,包含 'fx', 'fy', 'cx', 'cy'。 返回: None """ # 获取RGB图像和深度图像的尺寸 rgb_h, rgb_w = rgb_image.shape[:2] depth_h, depth_w = depth_image.shape[:2] print("RGB image size: {}x{}".format(rgb_w, rgb_h)) print("Depth image size: {}x{}".format(depth_w, depth_h)) # 计算裁剪区域 start_x = (rgb_w - depth_w) // 2 start_y = (rgb_h - depth_h) // 2 # 裁剪RGB图像以匹配深度图像的尺寸 rgb_image_cropped = rgb_image[start_y:start_y + depth_h, start_x:start_x + depth_w] rgb_image_cropped = cv2.cvtColor(rgb_image_cropped, cv2.COLOR_RGB2BGR) # 将深度图像转换为浮点型并将单位从毫米转换为米(假设深度图像以毫米为单位) depth_image = depth_image.astype(np.float32) / 1000.0 # 创建点云的空数组 points = [] colors = [] # 相机内参 fx = camera_intrinsics['fx'] fy = camera_intrinsics['fy'] cx = camera_intrinsics['cx'] cy = camera_intrinsics['cy'] # 遍历每个像素,将其转换为3D点 for v in range(depth_h): for u in range(depth_w): z = depth_image[v, u] if z > 0: # 排除无效深度 x = (u - cx) * z / fx y = (v - cy) * z / fy points.append([x, y, z]) colors.append(rgb_image_cropped[v, u] / 255.0) # 颜色归一化到[0,1] # 将点云和颜色转换为NumPy数组 points = np.array(points) colors = np.array(colors) # 创建Open3D点云对象 point_cloud = o3d.geometry.PointCloud() point_cloud.points = o3d.utility.Vector3dVector(points) point_cloud.colors = o3d.utility.Vector3dVector(colors) # 显示点云 o3d.visualization.draw_geometries([point_cloud]) intrinsics = {'fx': 454.114136, 'fy': 454.114136, 'cx': 325.119751, 'cy': 235.403824} color_image = cv2.imread('/home/jsfb/jsfb_ws/MassageRobot_Dobot/Massage/aucpuncture2point/configs/using_img/color.png') depth_img = cv2.imread('/home/jsfb/jsfb_ws/MassageRobot_Dobot/Massage/aucpuncture2point/configs/using_img/depth.png', cv2.IMREAD_UNCHANGED) display_rgb_point_cloud(color_image, depth_img, intrinsics)