From d44b28d50d33de52874bb1c6f410c107c6aeb421 Mon Sep 17 00:00:00 2001 From: Jay Paek Date: Fri, 19 Dec 2025 14:07:49 -0800 Subject: [PATCH] Combined pointcloud transforms to prevent rounding errors Equivalent numerical computation but more robust to rounding error if translation values are large (in consideration of UTM coordinates. --- python-sdk/nuscenes/nuscenes.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/python-sdk/nuscenes/nuscenes.py b/python-sdk/nuscenes/nuscenes.py index e6427087..668137e8 100644 --- a/python-sdk/nuscenes/nuscenes.py +++ b/python-sdk/nuscenes/nuscenes.py @@ -889,13 +889,20 @@ def map_pointcloud_to_image(self, # Second step: transform from ego to the global frame. poserecord = self.nusc.get('ego_pose', pointsensor['ego_pose_token']) - pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix) - pc.translate(np.array(poserecord['translation'])) + sensor_timestamp_ego_pose = np.eye(4) + sensor_timestamp_ego_pose[:3, :3] = Quaternion(poserecord['rotation']).rotation_matrix + sensor_timestamp_ego_pose[:3, 3] = np.array(poserecord['translation']) # Third step: transform from global into the ego vehicle frame for the timestamp of the image. poserecord = self.nusc.get('ego_pose', cam['ego_pose_token']) - pc.translate(-np.array(poserecord['translation'])) - pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T) + image_timestamp_ego_pose = np.eye(4) + image_timestamp_ego_pose[:3, :3] = Quaternion(poserecord['rotation']).rotation_matrix + image_timestamp_ego_pose[:3, 3] = np.array(poserecord['translation']) + + # Compute the transformation matrix from point sensor timestamp to image timestamp. + sensor_to_image_matrix = np.linalg.inv(image_timestamp_ego_pose).dot(sensor_timestamp_ego_pose) + pc.rotate(sensor_to_image_matrix[:3, :3]) + pc.translate(sensor_to_image_matrix[:3, 3]) # Fourth step: transform from ego into the camera. cs_record = self.nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])