| import copy |
| from typing import Dict, Optional |
|
|
| import numpy as np |
|
|
| import utils.calibration as calib_utils |
|
|
| |
|
|
|
|
| def create_bb_points(size: np.ndarray) -> np.ndarray: |
| """Create bounding box's 8 corners in the object coordinate |
| |
| Note: we assume that the location of the object is at the center |
| of the object, not the bottom center |
| |
| Args: |
| size: (3, ), length, width and height, obtained from |
| carla_utils.vec2np(obj.bounding_box.extent) |
| |
| Returns: |
| corners: 8 x 4, in the object coordinate |
| |
| Carla use left-handed coordinate, world coordinate: |
| +x -> front |
| +y -> right |
| +z -> up |
| 4 -------- 5 |
| /| /| |
| 7 -------- 6 . |
| | | | | |
| . 0 -------- 1 |
| |/ |/ |
| 3 -------- 2 |
| e.g., if take points of 0, 1, 2, 3, it will return the box in bottom |
| if take points of 0, 3, 4, 7, it will return the box on the left |
| """ |
|
|
| |
| l, w, h = size |
|
|
| |
| corners = np.zeros((8, 4)) |
| corners[0, :] = np.array([l / 2, -w / 2, -h / 2, 1]) |
| corners[1, :] = np.array([l / 2, w / 2, -h / 2, 1]) |
| corners[2, :] = np.array([-l / 2, w / 2, -h / 2, 1]) |
| corners[3, :] = np.array([-l / 2, -w / 2, -h / 2, 1]) |
| corners[4, :] = np.array([l / 2, -w / 2, h / 2, 1]) |
| corners[5, :] = np.array([l / 2, w / 2, h / 2, 1]) |
| corners[6, :] = np.array([-l / 2, w / 2, h / 2, 1]) |
| corners[7, :] = np.array([-l / 2, -w / 2, h / 2, 1]) |
|
|
| return corners |
|
|
|
|
| def box_in_obj_to_world(box_in_object: np.ndarray, transform: np.ndarray) -> np.ndarray: |
| """Convert object boxes in its own coordinate to world coordinate |
| |
| Args: |
| corners: 8 x 4 in the object coordinate |
| transform: 4 x 4, object to world transformation |
| """ |
|
|
| box_in_world = np.dot(transform, np.transpose(box_in_object)).transpose() |
| return box_in_world |
|
|
|
|
| |
|
|
|
|
| def get_box_in_world( |
| location: np.ndarray, rotation: np.ndarray, size: np.ndarray |
| ) -> np.ndarray: |
| """Return 3D bounding box in world coordinate""" |
|
|
| |
| box_in_object: np.ndarray = create_bb_points(size) |
|
|
| |
| transform: np.ndarray = calib_utils.create_transform(location, rotation) |
| box_in_world: np.ndarray = box_in_obj_to_world(box_in_object, transform) |
|
|
| return box_in_world |
|
|
|
|
| |
|
|
|
|
| def box_in_world_to_ego( |
| box_in_world: np.ndarray, ego_transform: np.ndarray |
| ) -> np.ndarray: |
| """Convert bbox in the world coordinate to the ego coordinate |
| |
| Args: |
| box_in_world: (8, 4), 8 corners in the world coordinate |
| ego_transform: (4, 4), transformation from ego to the world coordinate |
| |
| Returns: |
| box_in_ego: (8, 4), 8 corners in the ego coordinate |
| """ |
|
|
| |
| box_in_world = copy.copy(box_in_world) |
|
|
| |
| world2ego_transform = np.linalg.inv(ego_transform) |
| box_in_ego = np.dot( |
| world2ego_transform, np.transpose(box_in_world) |
| ).transpose() |
|
|
| return box_in_ego |
|
|
|
|
| |
|
|
|
|
| def box_in_ego_to_lidar(box_in_ego: np.ndarray, ego2lidar: Optional[np.ndarray] = None) -> np.ndarray: |
| """Convert bbox in the ego coordinate to the lidar coordinate |
| |
| Arguments |
| box_in_ego: 8 x 4 |
| ego2lidar: 4 x 4 |
| """ |
|
|
| |
| box_in_ego = copy.copy(box_in_ego) |
|
|
| |
| if ego2lidar is None: |
| ego2lidar = np.linalg.inv(calib_utils.lidar_to_ego_transform()) |
|
|
| |
| box_in_lidar = np.dot(ego2lidar, np.transpose(box_in_ego)).transpose() |
|
|
| return box_in_lidar |
|
|
|
|
| def box_in_lidar_to_ego(box_in_lidar: np.ndarray, lidar2ego: Optional[np.ndarray] = None) -> np.ndarray: |
| """Convert bbox in the lidar coordinate to the ego coordinate |
| |
| Arguments |
| box_in_lidar: 8 x 4 |
| lidar2ego: 4 x 4 |
| """ |
|
|
| |
| box_in_lidar = copy.copy(box_in_lidar) |
|
|
| |
| if lidar2ego is None: |
| lidar2ego = calib_utils.lidar_to_ego_transform() |
| |
| assert False, 'No lidar2ego is provided!!!' |
| |
| |
| |
|
|
| |
| box_in_ego = np.dot(lidar2ego, np.transpose(box_in_lidar)).transpose() |
|
|
| return box_in_ego |
|
|
|
|
| |
|
|
|
|
| def box_in_ego_to_camera(box_in_ego: np.ndarray, cam_param: Dict) -> np.ndarray: |
| """Convert bbox in the ego coordinate to the camera coordinate""" |
|
|
| |
| box_in_ego = copy.copy(box_in_ego) |
|
|
| |
| if 'ego2cam' in cam_param: |
| ego2cam: np.narray = cam_param['ego2cam'] |
| else: |
| ego2cam = np.linalg.inv( |
| calib_utils.camera_to_ego_transform(cam_param) |
| ) |
|
|
| |
| box_in_camera = np.dot(ego2cam, np.transpose(box_in_ego)).transpose() |
|
|
| return box_in_camera |
|
|
|
|
| def box_projection_to_image( |
| box_in_camera: np.ndarray, |
| intrinsics: np.ndarray, |
| front_dist_threshold: float = 1.5, |
| left_hand_system: bool = True, |
| ) -> np.ndarray: |
| """Project 3D box in camera coordinate to 2D box in image coordinate |
| |
| Args: |
| box_in_camera: (8, 4), 3D box in the camera coordinate |
| intrinsics: (4, 4) camera intrinsic matrix |
| front_dist_threshold: used to filter out bad project due to too close |
| |
| ReturnsL |
| box_2d_in_image: (8, 2), 2D box in the image coordinate |
| """ |
|
|
| box_in_camera = copy.copy(box_in_camera).transpose() |
|
|
| |
| |
| if left_hand_system: |
| box_in_camera = np.vstack( |
| ( |
| box_in_camera[1, :], |
| -box_in_camera[2, :], |
| box_in_camera[0, :], |
| box_in_camera[3, :], |
| ) |
| ) |
|
|
| |
| box_2d_in_image = (intrinsics @ box_in_camera).T |
| box_2d_in_image[:, 0] = box_2d_in_image[:, 0] / box_2d_in_image[:, 2] |
| box_2d_in_image[:, 1] = box_2d_in_image[:, 1] / box_2d_in_image[:, 2] |
|
|
| |
| if np.any(box_2d_in_image[:, 2] > front_dist_threshold): |
| box_2d_in_image = np.array(box_2d_in_image)[:, :2] |
| return box_2d_in_image |
| else: |
| return None |
|
|