| from typing import Dict, List, Optional, Tuple, Union |
|
|
| import cv2 |
| import utils.transform as box_transform |
| import utils.calibration as calib_utils |
| import utils.bev as bev_vis |
| import utils.canvas_3d as vis_3d |
| import utils.color as color_vis |
| import matplotlib.pyplot as plt |
| plt.switch_backend('agg') |
| import numpy as np |
| import scipy |
| from PIL import Image |
| from pathlib import Path |
| import os |
|
|
|
|
| def get_color_object(obj_id: int, color_list: List) -> np.array: |
| """Get color for each object for vis, w.r.t. the ID""" |
|
|
| color_float: Tuple[float, float, float] = color_list[obj_id % len(color_list)] |
| color = np.array( |
| [ |
| int(color_float[0] * 255), |
| int(color_float[1] * 255), |
| int(color_float[2] * 255), |
| ] |
| ) |
|
|
| return color |
|
|
|
|
| def vis_box_on_lidar( |
| lidar: Union[str, np.ndarray], |
| save_path: str, |
| bbox_det_in_ego_list: Optional[List[np.ndarray]] = None, |
| bbox_gt_in_ego_list: Optional[List[np.ndarray]] = None, |
| id_list: Optional[List[int]] = None, |
| state_list: Optional[List[np.ndarray]] = None, |
| fut_traj: Optional[List[np.ndarray]] = None, |
| fut_traj_mask: Optional[List[np.ndarray]] = None, |
| pre_traj: Optional[List[np.ndarray]] = None, |
| pre_traj_mask: Optional[List[np.ndarray]] = None, |
| ego2lidar: Optional[np.ndarray] = None, |
| bev: bool = True, |
| box_order_nuscenes: bool = False, |
| left_hand: bool = True, |
| vis_text: bool = True, |
| command: Optional[int] = 2, |
| command_dict: Optional[Dict[int, str]] = {0: 'TURN RIGHT', 1: 'TURN LEFT', 2: 'KEEP FORWARD'}, |
| bev_map: Optional[np.ndarray] = None, |
| ) -> None: |
| """Visualize detection/GT boxes on the LiDAR point cloud |
| |
| Args: |
| lidar: str -> path to the data, np array -> N x 3 data already loaded |
| state_list: a list of array including velocity/acceleration/angular_velocity |
| fut_traj: N x 12 x 4 or N, 12 x 4 |
| past_traj: N x 8 x 2 or N, 8 x 2 |
| |
| Note that carla lidar coordinate is |
| +x -> left |
| +y -> front |
| +z -> up |
| """ |
|
|
| |
| if isinstance(lidar, str): |
| |
| |
| |
| lidar = np.fromfile(lidar, dtype=np.float32) |
| lidar = lidar.reshape(-1, 4) |
| lidar = lidar[:, :3] |
| else: |
| assert isinstance(lidar, np.ndarray), "the detection is either str or dict" |
|
|
| |
| |
| if id_list is not None and ( |
| bbox_det_in_ego_list is None or bbox_gt_in_ego_list is None |
| ): |
| |
| color_list: List[Tuple[float, float, float]] = color_vis.random_colors() |
|
|
| if bbox_gt_in_ego_list is None: |
| num_obj = len(bbox_det_in_ego_list) |
| else: |
| num_obj = len(bbox_gt_in_ego_list) |
|
|
| colors = np.zeros((num_obj, 3), dtype=np.uint8) |
| assert len(id_list) == num_obj, "number of objects is wrong" |
|
|
| for obj_index, obj_id in enumerate(id_list): |
| color: np.ndarray = get_color_object(obj_id, color_list) |
| colors[obj_index, :] = color |
| else: |
| colors = None |
|
|
| |
| |
| text_corner = 1 |
| if id_list is not None and vis_text: |
| texts = [str(obj_id) for obj_id in id_list] |
|
|
| |
| if state_list is not None: |
| for index in range(len(state_list)): |
| states: Union[np.ndarray, List] = state_list[index] |
| state_str: str = " ".join(["%.2f" % state for state in states]) |
| texts[index] = texts[index] + " " + state_str |
| else: |
| texts = None |
|
|
| |
| if bev: |
| canvas = bev_vis.Canvas_BEV() |
| canvas_xy, valid_mask = canvas.get_canvas_coords(lidar) |
| canvas.draw_canvas_points(canvas_xy[valid_mask]) |
| else: |
| canvas = vis_3d.Canvas_3D() |
| canvas_xyz, valid_mask = canvas.get_canvas_coords(lidar) |
| color_based_on_intensity = ( |
| (lidar[valid_mask, -1] * 255).astype("uint8").reshape((-1, 1)) |
| ) |
| color_based_on_intensity = np.repeat( |
| color_based_on_intensity, 3, axis=1 |
| ) |
| canvas.draw_canvas_points( |
| canvas_xyz[valid_mask], colors=color_based_on_intensity |
| ) |
|
|
| |
| if (bbox_det_in_ego_list is not None) or (bbox_gt_in_ego_list is not None): |
| if bbox_det_in_ego_list is not None: |
| num_obj = len(bbox_det_in_ego_list) |
| else: |
| num_obj = len(bbox_gt_in_ego_list) |
| height_all_lidar = np.zeros((num_obj), dtype='float32') |
|
|
| |
| if bbox_det_in_ego_list is not None: |
| bbox_det_in_lidar_list: List[np.ndarray] = list() |
| count = 0 |
| box_in_ego: np.ndarray |
| for box_in_ego in bbox_det_in_ego_list: |
|
|
| |
| box_in_lidar: np.ndarray = box_transform.box_in_ego_to_lidar( |
| box_in_ego, |
| ego2lidar=ego2lidar, |
| ) |
| height_all_lidar[count] = np.average(box_in_lidar, axis=0)[2] |
|
|
| if bev: |
| if box_order_nuscenes: |
| box_in_lidar = box_in_lidar[ |
| [1, 2, 6, 5], :2 |
| ] |
| else: |
| box_in_lidar = box_in_lidar[:4, :2] |
| else: |
| box_in_lidar = box_in_lidar[:, :3] |
| bbox_det_in_lidar_list.append(np.array(box_in_lidar)) |
| count += 1 |
|
|
| if len(bbox_det_in_lidar_list) > 0: |
| bbox_det_in_lidar = np.stack(bbox_det_in_lidar_list, axis=0) |
| if colors is None: |
| colors_det = np.full( |
| (len(bbox_det_in_lidar), 3), fill_value=0, dtype=np.uint8 |
| ) |
| colors_det[:, :2] = 255 |
| else: |
| colors_det = colors |
| canvas.draw_boxes( |
| corners=bbox_det_in_lidar, |
| colors=colors_det, |
| texts=texts, |
| text_corner=text_corner, |
| ) |
|
|
| |
| if bbox_gt_in_ego_list is not None: |
| bbox_gt_in_lidar_list: List[np.ndarray] = list() |
| box_in_ego: np.ndarray |
| count = 0 |
| for box_in_ego in bbox_gt_in_ego_list: |
|
|
| |
| box_in_lidar: np.ndarray = box_transform.box_in_ego_to_lidar( |
| box_in_ego, |
| ego2lidar=ego2lidar, |
| ) |
| height_all_lidar[count] = np.average(box_in_lidar, axis=0)[2] |
|
|
| |
| if bev: |
| if box_order_nuscenes: |
| box_in_lidar = box_in_lidar[ |
| [1, 2, 6, 5], :2 |
| ] |
| else: |
| box_in_lidar = box_in_lidar[:4, :2] |
| |
| |
| else: |
| box_in_lidar = box_in_lidar[:, :3] |
|
|
| |
| |
|
|
| bbox_gt_in_lidar_list.append(np.array(box_in_lidar)) |
| count += 1 |
|
|
| if len(bbox_gt_in_lidar_list) > 0: |
| bbox_gt_in_lidar = np.stack(bbox_gt_in_lidar_list, axis=0) |
| if colors is None: |
| colors_gt = np.full( |
| (len(bbox_gt_in_lidar), 3), fill_value=0, dtype=np.uint8 |
| ) |
| colors_gt[:, 2] = 255 |
| else: |
| colors_gt = colors |
|
|
| canvas.draw_boxes( |
| corners=bbox_gt_in_lidar, |
| colors=colors_gt, |
| texts=texts, |
| text_corner=text_corner, |
| ) |
|
|
| |
| if fut_traj is not None: |
| fut_traj_obj: np.ndarray |
| obj_index = 0 |
| for fut_traj_obj in fut_traj: |
|
|
| |
| if fut_traj_obj.shape[1] == 2 and not bev: |
| fut_traj_obj_addh = np.concatenate([fut_traj_obj, np.zeros((fut_traj_obj.shape[0], 1))], axis=1) |
| fut_traj_obj_addh[:, -1] = height_all_lidar[obj_index] |
| fut_traj_obj = fut_traj_obj_addh |
|
|
| |
| canvas_xy, valid_mask = canvas.get_canvas_coords( |
| fut_traj_obj |
| ) |
| valid_mask = valid_mask.astype("bool") |
|
|
| |
| fut_traj_mask_obj: np.ndarray = fut_traj_mask[obj_index, :, 0].astype( |
| "bool" |
| ) |
| mask_combined = fut_traj_mask_obj & valid_mask |
|
|
| |
| canvas.draw_canvas_points( |
| canvas_xy[mask_combined], colors=tuple(colors[obj_index]), radius=2 |
| ) |
| obj_index += 1 |
|
|
| if pre_traj is not None: |
| pre_traj_obj: np.ndarray |
| obj_index = 0 |
| for pre_traj_obj in pre_traj: |
|
|
| |
| if pre_traj_obj.shape[1] == 2 and not bev: |
| pre_traj_obj_addh = np.concatenate([pre_traj_obj, np.zeros((pre_traj_obj.shape[0], 1))], axis=1) |
| pre_traj_obj_addh[:, -1] = height_all_lidar[obj_index] |
| pre_traj_obj = pre_traj_obj_addh |
|
|
| canvas_xy, valid_mask = canvas.get_canvas_coords( |
| pre_traj_obj |
| ) |
| valid_mask = valid_mask.astype("bool") |
|
|
| |
| pre_traj_mask_obj: np.ndarray = pre_traj_mask[obj_index, :, 0].astype( |
| "bool" |
| ) |
| mask_combined = pre_traj_mask_obj & valid_mask |
| canvas.draw_canvas_points( |
| canvas_xy[mask_combined], colors=tuple(colors[obj_index]), radius=2 |
| ) |
| obj_index += 1 |
|
|
| |
| |
| if bev: |
| canvas.canvas = np.flip(canvas.canvas, axis=(0)) |
| |
| if left_hand: |
| canvas.canvas = np.flip(canvas.canvas, axis=(1)) |
| |
|
|
| |
| plt.figure(figsize=(20, 20)) |
| plt.axis("off") |
| plt.imshow(canvas.canvas) |
| plt.tight_layout() |
|
|
| |
| plt.text(30, 50, command_dict[int(command)], fontsize=45, color=(1, 1, 1)) |
|
|
| |
| save_dir = Path(save_path).resolve().parent |
| if not os.path.exists(save_dir): |
| os.makedirs(save_dir) |
| plt.savefig(save_path, transparent=False) |
| plt.close() |
|
|
| |
| if bev_map is not None and bev: |
| traj_vis = cv2.imread(save_path) |
|
|
| |
| bev_map = cv2.resize(bev_map, traj_vis.shape[1::-1]) |
| bev_map = (bev_map * 255).astype('uint8') |
|
|
| |
| if len(bev_map.shape) == 2: |
| bev_map = np.expand_dims(bev_map, axis=-1) |
| bev_map = np.repeat(bev_map, 3, axis=-1) |
| |
| |
| if bev: |
| bev_map = np.flip(bev_map, axis=0) |
| if left_hand: |
| bev_map = np.flip(bev_map, axis=1) |
|
|
| |
| dst = cv2.addWeighted(traj_vis, 0.7, bev_map, 0.3, 0) |
| cv2.imwrite(save_path, dst) |
|
|
| def vis_box_on_camera( |
| cameras: Dict[str, np.ndarray], |
| save_path: str, |
| cam_param_all: Dict[str, Dict], |
| bbox_det_in_ego_list: Optional[List[np.ndarray]] = None, |
| bbox_gt_in_ego_list: Optional[List[np.ndarray]] = None, |
| left_hand_system: bool = True, |
| ) -> None: |
| """Visualize detection/GT boxes on the multiple cameras |
| |
| Args: |
| camera: keys can be ["left", "front", "right", "back"] |
| values are H x W x 3 |
| |
| """ |
|
|
| |
| cam_name: str |
| cam_param: Dict[str, float] |
| for cam_name, cam_param in cam_param_all.items(): |
|
|
| |
| if "intrinsics" not in cam_param: |
| intrinsics: np.ndarray = calib_utils.get_camera_intrinsics(cam_param, dim=4) |
| else: |
| intrinsics: np.ndarray = cam_param["intrinsics"] |
|
|
| |
| if bbox_det_in_ego_list is not None: |
| box_in_ego: np.ndarray |
| for box_in_ego in bbox_det_in_ego_list: |
|
|
| |
| |
| box_in_camera: np.ndarray = box_transform.box_in_ego_to_camera( |
| box_in_ego, cam_param |
| ) |
|
|
| |
| box_2d_in_image: np.ndarray = box_transform.box_projection_to_image( |
| box_in_camera, |
| intrinsics, |
| left_hand_system=left_hand_system, |
| ) |
|
|
| |
| if box_2d_in_image is not None: |
| cameras[cam_name], _ = vis_box_on_single_image( |
| cameras[cam_name], |
| box_2d_in_image, |
| img_size=(cam_param["height"], cam_param["width"]), |
| color=(255, 255, 0), |
| thickness=2, |
| ) |
|
|
| |
| if bbox_gt_in_ego_list is not None: |
| box_in_ego: np.ndarray |
| for box_in_ego in bbox_gt_in_ego_list: |
|
|
| |
| |
| box_in_camera: np.ndarray = box_transform.box_in_ego_to_camera( |
| box_in_ego, cam_param |
| ) |
|
|
| |
| box_2d_in_image: np.ndarray = box_transform.box_projection_to_image( |
| box_in_camera, |
| intrinsics, |
| left_hand_system=left_hand_system, |
| ) |
|
|
| |
| if box_2d_in_image is not None: |
| cameras[cam_name], _ = vis_box_on_single_image( |
| cameras[cam_name], |
| box_2d_in_image, |
| img_size=(cam_param["height"], cam_param["width"]), |
| color=(0, 0, 255), |
| thickness=2, |
| ) |
|
|
| |
| |
| |
| image = np.concatenate(list(cameras.values()), axis=1) |
| image = image[:, :, ::-1] |
| image = Image.fromarray(image) |
| image = image.resize( |
| (cam_param["width"] * len(cameras.keys()), cam_param["height"]) |
| ) |
| save_dir = Path(save_path).resolve().parent |
| if not os.path.exists(save_dir): |
| os.makedirs(save_dir) |
| image.save(save_path) |
|
|
|
|
| def check_outside_image(x, y, height, width): |
| if x < 0 or x >= width: |
| return True |
| if y < 0 or y >= height: |
| return True |
|
|
|
|
| def vis_box_on_single_image( |
| image, qs, img_size=(900, 1600), color=(255, 255, 255), thickness=4 |
| ): |
| """Draw 3d bounding box in image |
| qs: (8,2) array of vertices for the 3d box in following order: |
| 1 -------- 0 |
| /| /| |
| 2 -------- 3 . |
| | | | | |
| . 5 -------- 4 |
| |/ |/ |
| 6 -------- 7 |
| """ |
|
|
| |
| pts_outside = 0 |
| for index in range(8): |
| check = check_outside_image( |
| qs[index, 0], qs[index, 1], img_size[0], img_size[1] |
| ) |
| if check: |
| pts_outside += 1 |
| if pts_outside >= 6: |
| return image, False |
|
|
| |
| if qs is not None: |
| qs = qs.astype(np.int32) |
| for k in range(0, 4): |
| i, j = k, (k + 1) % 4 |
| cv2.line( |
| image, |
| (qs[i, 0], qs[i, 1]), |
| (qs[j, 0], qs[j, 1]), |
| color, |
| thickness, |
| cv2.LINE_AA, |
| ) |
|
|
| i, j = k + 4, (k + 1) % 4 + 4 |
| cv2.line( |
| image, |
| (qs[i, 0], qs[i, 1]), |
| (qs[j, 0], qs[j, 1]), |
| color, |
| thickness, |
| cv2.LINE_AA, |
| ) |
|
|
| i, j = k, k + 4 |
| cv2.line( |
| image, |
| (qs[i, 0], qs[i, 1]), |
| (qs[j, 0], qs[j, 1]), |
| color, |
| thickness, |
| cv2.LINE_AA, |
| ) |
|
|
| return image, True |
|
|