diff --git a/README_replay.md b/README_replay.md new file mode 100644 index 00000000..31fce187 --- /dev/null +++ b/README_replay.md @@ -0,0 +1,92 @@ + +OpenCDA Replay += +### Motivations: + +* Open-source dataset [OPV2V](https://mobility-lab.seas.ucla.edu/opv2v/) offers comprehensive simulation data, e.g. RGB images, lidar point clouds and bounding boxes + +* **OpenCOOD** framework provides dataset replay function and allows users to add sensors on vehicles and road-side units to generate images and point clouds, but **no bounding box generation** +* **OpenCDA** framework has bounding box generation function using ML detection libraries, but **does not have dataset replay function** +* This work makes extensions on OpenCDA by integrating replay function from OpenCOOD + +### Remarks + +- Carla provides numerous [pre-made maps](https://carla.readthedocs.io/en/latest/core_map/) for public benchmarking, I chose 'Town05' for its urban environment that better resembles my use cases. More can be included by adjusting line 90 in opencda_replay.py + ```python + target_towns = ['Town05'] + ``` + +- My project premarily focuses on analysis of lidar placement for Road-side Units, thus **only one vehicle actor** is used for simplicity. + + After each simulation in Carla, each actor generates recorded data with randomly assigned actor ID, + - Vehicle actors have ID of *positive* numbers + - RSU actors will have *negative* IDs (following formulation of V2XSet) + + Note that original OPV2V dataset has recordings of multiple (i.e. >1) vehicle actors, the one with smallest *positive* number is chosen as the ego vehicle while others will be treated as background vehicles + +- For the sake of studying RSU lidar configurations, I needed to record and replay dataset with completely consistent environment while only adjusting lidar placement, i.e. replicating the same traffic conditions with the same cars and their trajectories. I added the 'base_dataset' mode to enable this. + +### Workflows: + +Pre-requisite: run Carla simulator with 'make launch' and start by pressing play -> 'Simulate' + +1. Generate bird's eye view images of each scene + ```python + python opencda_replay.py -t logreplay_bev -r ROOT_DIR -o OUTPUT_DIR_BEV + ``` + + Example: (scene: test/2021_08_23_12_58_19) + + BEV + + +2. Select desired intersections and log lidar locations for analysis + + - Specifically, lidars in Carla simulator require position arguments (x, y, z, pitch, yaw, roll). Need to record manually + - *Efforts attempted*: wrote scripts to extract traffic light information and use them to automatically assign lidar coordinates. However, in the pre-defined Carla maps, some intersections with traffic lights may not have traffic at all (i.e. useless to record), while other intersections with no traffic lights are crowded. Therefore, for Carla's provided maps, we need to inspect manually + + ```python + python utils/print_spectactor_coord.py + ``` + + This script (credit: [@damascenodiego/print_spectactor_coord.py](https://gist.github.com/damascenodiego/c53ae6960f1ebdcc25d41384392b6500)) prints real-time current coordinates while user roams around in the simulation world + + Example: + + rsu + + See example results at 'OpenCDA/opencda/scenario_testing/utils/rsu.json' + +3. Replay original OPV2V dataset to generate base dataset + + + + ```python + python opencda_replay.py -t logreplay -b -r ROOT_DIR -o OUTPUT_DIR_BASE + # -b --base_dataset: boolean flag indicating whether replaying on original OPV2V dataset to generate base dataset + ``` + + Example: pitch = -20 + + + base + + See automation bash script for reference: + > OpenCDA/automate_opencda.sh + +4. Replay base dataset to generate variant dataset + + Examples of 'variant dataset' include, different lidar placements with varying **(x, y, z) coordiantes** and **pitch angles** + + \* Currently **only pitch angle** is allowed for variation (see line 75 in opencda_replay.py), other adjustment parameters to be further extended + + ```python + python opencda_replay.py -t logreplay -r OUTPUT_DIR_BASE -o OUTPUT_DIR_VAR + ``` + + Example: pitch = -40 + + + variant + + Notes: original dataset **did not record background vehicle information** (e.g. car model and color). Therefore, we first generate base dataset and these infnormation will be available for extensive replication diff --git a/automate_opencda.sh b/automate_opencda.sh new file mode 100644 index 00000000..2870ea98 --- /dev/null +++ b/automate_opencda.sh @@ -0,0 +1,46 @@ +pitch=("0" "-10" "-20" "-30" "-40" "-50") +replay=("train" "validate" "test") +output=("train" "train" "test") +lidar="livox" +height="6" +# +# replay base for bird-eyed view (BEV) preview +# +root="/media/hdd1/opv2v/" +outp="/media/hdd1/opv2v/opencda_dump" +for ((i=0; i<${#replay[@]}; i++)) +do + python opencda_replay.py -t logreplay_bev -r ${root}/${replay[$i]}_${lidar}_${height}_${p} -o ${outp}/${output[$i]}_bev_-90 +done +# # +# # replay original for base +# # +# root="/media/hdd1/opv2v/" +# outp="/media/hdd1/opv2v/opencda_dump" +# for p in ${pitch[*]} +# do +# for ((i=0; i<${#replay[@]}; i++)) +# do +# python opencda_replay.py -t logreplay -r ${root}/${replay[$i]}_${lidar}_${height}_${p} -o ${outp}/${output[$i]}_${lidar}_${height}_${p} +# done +# done +# # +# # replay base for variations +# # +# root="/media/hdd1/opv2v/opencda_dump" +# outp="/media/hdd1/opv2v/opencda_dump" +# for p in ${pitch[*]} +# do +# for ((i=0; i<${#replay[@]}; i++)) +# do +# python opencda_replay.py -t logreplay_metric -r ${root}/${replay[$i]}_${lidar}_${height}_${p} -o ${outp}/metric_${output[$i]}_${lidar}_${height}_${p} +# done +# done +# # +# # compute metrics for each scenes in test set +# # +# outp="/media/hdd1/opv2v/opencda_dump" +# for p in ${pitch[*]} +# do +# python ComputeDNUC.py --dir ${outp}/metric_test_${lidar}_${height}_${p} +# done \ No newline at end of file diff --git a/docs/md_files_replay/base_-20_000069_camera0.png b/docs/md_files_replay/base_-20_000069_camera0.png new file mode 100644 index 00000000..bf962a24 Binary files /dev/null and b/docs/md_files_replay/base_-20_000069_camera0.png differ diff --git a/docs/md_files_replay/bev_000163_camera0.png b/docs/md_files_replay/bev_000163_camera0.png new file mode 100644 index 00000000..794bdd98 Binary files /dev/null and b/docs/md_files_replay/bev_000163_camera0.png differ diff --git a/docs/md_files_replay/metric.png b/docs/md_files_replay/metric.png new file mode 100644 index 00000000..cebc7f11 Binary files /dev/null and b/docs/md_files_replay/metric.png differ diff --git a/docs/md_files_replay/rsu.png b/docs/md_files_replay/rsu.png new file mode 100644 index 00000000..b6b6f11d Binary files /dev/null and b/docs/md_files_replay/rsu.png differ diff --git a/docs/md_files_replay/var_-30_000069_camera0.png b/docs/md_files_replay/var_-30_000069_camera0.png new file mode 100644 index 00000000..66e02a69 Binary files /dev/null and b/docs/md_files_replay/var_-30_000069_camera0.png differ diff --git a/docs/md_files_replay/var_-40_000069_camera0.png b/docs/md_files_replay/var_-40_000069_camera0.png new file mode 100644 index 00000000..fed054ac Binary files /dev/null and b/docs/md_files_replay/var_-40_000069_camera0.png differ diff --git a/opencda/core/common/data_dumper.py b/opencda/core/common/data_dumper.py index 78b1b3b9..4114b578 100644 --- a/opencda/core/common/data_dumper.py +++ b/opencda/core/common/data_dumper.py @@ -15,58 +15,19 @@ from opencda.core.common.misc import get_speed from opencda.core.sensing.perception import sensor_transformation as st from opencda.scenario_testing.utils.yaml_utils import save_yaml +import carla class DataDumper(object): - """ - Data dumper class to save data in local disk. - - Parameters - ---------- - perception_manager : opencda object - The perception manager contains rgb camera data and lidar data. - - vehicle_id : int - The carla.Vehicle id. - - save_time : str - The timestamp at the beginning of the simulation. - - Attributes - ---------- - rgb_camera : list - A list of opencda.CameraSensor that containing all rgb sensor data - of the managed vehicle. - - lidar ; opencda object - The lidar manager from perception manager. - - save_parent_folder : str - The parent folder to save all data related to a specific vehicle. - - count : int - Used to count how many steps have been executed. We dump data - every 10 steps. - - """ def __init__(self, perception_manager, - vehicle_id, - save_time): + save_path): self.rgb_camera = perception_manager.rgb_camera self.lidar = perception_manager.lidar - self.save_time = save_time - self.vehicle_id = vehicle_id - - current_path = os.path.dirname(os.path.realpath(__file__)) - self.save_parent_folder = \ - os.path.join(current_path, - '../../../data_dumping', - save_time, - str(self.vehicle_id)) + self.save_parent_folder = save_path if not os.path.exists(self.save_parent_folder): os.makedirs(self.save_parent_folder) @@ -91,44 +52,83 @@ def run_step(self, behavior_agent : opencda object Open """ - self.count += 1 + # self.count += 1 ### + # we ignore the first 60 steps if self.count < 60: return - # 10hz - if self.count % 2 != 0: - return + # if self.count % 2 != 0: + # return + # print('saving', self.count) self.save_rgb_image(self.count) - # self.save_lidar_points() self.save_yaml_file(perception_manager, localization_manager, behavior_agent, self.count) + self.save_lidar_points(self.count) def save_rgb_image(self, count): """ Save camera rgb images to disk. """ + if not self.rgb_camera: + return + for (i, camera) in enumerate(self.rgb_camera): - frame = camera.frame image = camera.image image_name = '%06d' % count + '_' + 'camera%d' % i + '.png' cv2.imwrite(os.path.join(self.save_parent_folder, image_name), image) + + # print('saved', self.perception_manager.id, os.path.join(self.save_parent_folder, image_name))###debug + # break # to save front only + + # def reverse_transform(self, points, p, y, r): + + # tran = carla.Transform(carla.Rotation(pitch=p, yaw=y, roll=r)) + # rot = tran.get_matrix()[:3, :3] + # return (rot @ points.T).T + + # def reverse_transform(self, points, trans): + + # x, y, z = trans.location.x, trans.location.y, trans.location.z + # p, y, r = trans.rotation.pitch, trans.rotation.yaw, trans.rotation.roll + # trans = carla.Transform(carla.Location(x,y,z), carla.Rotation(0,y,r)) + # rot = np.array(trans.get_matrix()) + + # points = np.append(points, np.ones((points.shape[0], 1)), axis=1) + # points = (rot @ points.T).T + + # return points[:, :-1] - def save_lidar_points(self): + def reverse_transform(self, points, trans): + + x, y, z = trans.location.x, trans.location.y, trans.location.z + p, y, r = trans.rotation.pitch, trans.rotation.yaw, trans.rotation.roll + # trans = carla.Transform(carla.Location(x,y,z), carla.Rotation(p,y,r)) + trans = carla.Transform(carla.Location(0,0,0), carla.Rotation(p,0,0)) + rot = np.linalg.inv(trans.get_matrix()) + # rot = trans.get_matrix() + + points = np.append(points, np.ones((points.shape[0], 1)), axis=1) + points = (rot @ points.T).T + + return points[:, :-1] + + def save_lidar_points(self, count): """ Save 3D lidar points to disk. """ point_cloud = self.lidar.data - frame = self.lidar.frame + frame = count # self.lidar.frame point_xyz = point_cloud[:, :-1] + # point_xyz[:, 1] = -point_xyz[:, 1] # horizontal flip point_intensity = point_cloud[:, -1] point_intensity = np.c_[ point_intensity, @@ -136,6 +136,23 @@ def save_lidar_points(self): np.zeros_like(point_intensity) ] + # x_rot = y_rot = z_rot = 0 + roll_rot = pitch_rot = yaw_rot = 0 + if self.lidar.spawn_point.rotation.pitch != 0: + # y_rot = np.radians(-self.lidar.spawn_point.rotation.pitch) + pitch_rot = -self.lidar.spawn_point.rotation.pitch + # if self.lidar.spawn_point.rotation.yaw != 0: + # z_rot = np.radians(-self.lidar.spawn_point.rotation.yaw) + # yaw_rot = -self.lidar.spawn_point.rotation.yaw + + if roll_rot != 0 or pitch_rot != 0 or yaw_rot != 0: + # rot = o3d.geometry.get_rotation_matrix_from_axis_angle([x_rot, y_rot, z_rot]) + # point_xyz = (rot @ point_xyz.T).T + # o3d_pcd.rotate(rot) + # point_xyz = self.reverse_transform(point_xyz, pitch_rot, yaw_rot, roll_rot) + pass + # point_xyz = self.reverse_transform(point_xyz, self.lidar.spawn_point) + o3d_pcd = o3d.geometry.PointCloud() o3d_pcd.points = o3d.utility.Vector3dVector(point_xyz) o3d_pcd.colors = o3d.utility.Vector3dVector(point_intensity) @@ -146,6 +163,7 @@ def save_lidar_points(self): pcd_name), pointcloud=o3d_pcd, write_ascii=True) + # print('saved', self.perception_manager.id, os.path.join(self.save_parent_folder,pcd_name))###debug def save_yaml_file(self, perception_manager, @@ -175,9 +193,16 @@ def save_yaml_file(self, # dump obstacle vehicles first objects = perception_manager.objects vehicle_list = objects['vehicles'] + # print('\nperception_manager vehicles', vehicle_list) for veh in vehicle_list: + if perception_manager.id == veh.carla_id: + print('perception_manager.id', perception_manager.id) + print('veh.carla_id', veh.carla_id) + continue ### + veh_carla_id = veh.carla_id + # veh_carla_id = str(veh.attributes['role_name']) veh_pos = veh.get_transform() veh_bbx = veh.bounding_box veh_speed = get_speed(veh) @@ -204,6 +229,7 @@ def save_yaml_file(self, }}) dump_yml.update({'vehicles': vehicle_dict}) + # print('perception_manager vehicle_dict', vehicle_dict) # dump ego pose and speed, if vehicle does not exist, then it is # a rsu(road side unit). @@ -238,7 +264,7 @@ def save_yaml_file(self, lidar_transformation.rotation.roll, lidar_transformation.rotation.yaw, lidar_transformation.rotation.pitch]}) - + # print('dump_yml lidar pose', lidar_transformation);print(dump_yml['lidar_pose']) # dump camera sensor coordinates under world coordinate system for (i, camera) in enumerate(self.rgb_camera): camera_param = {} @@ -292,6 +318,7 @@ def save_yaml_file(self, yml_name) save_yaml(dump_yml, save_path) + # print('saved', self.perception_manager.id, save_path)###debug @staticmethod def matrix2list(matrix): diff --git a/opencda/core/common/data_dumper_replay.py b/opencda/core/common/data_dumper_replay.py new file mode 100644 index 00000000..aee0fdb5 --- /dev/null +++ b/opencda/core/common/data_dumper_replay.py @@ -0,0 +1,332 @@ +import os + +import cv2 +import open3d as o3d +import numpy as np + +from opencda.core.common.misc import get_speed +from opencda.core.sensing.perception import sensor_transformation as st +from opencda.scenario_testing.utils.yaml_utils import save_yaml +from opencda.core.common.data_dumper import DataDumper + +class DataDumperReplay(DataDumper): + + def __init__(self, + perception_manager, + save_path): + + self.rgb_camera = perception_manager.rgb_camera + self.lidar = perception_manager.lidar + self.semantic_lidar = perception_manager.semantic_lidar ### + + self.save_parent_folder = save_path + + if not os.path.exists(self.save_parent_folder): + os.makedirs(self.save_parent_folder) + + self.count = 0 + + def run_step(self, + perception_manager, + localization_manager, + behavior_agent): + + # we ignore the first 60 steps + if self.count < 60: + return + + # 10hz + # if self.count % 2 != 0: + # return + + if self.rgb_camera is not None: + self.save_rgb_image(self.count) + if self.lidar is not None: + self.save_yaml_file(perception_manager, + localization_manager, + behavior_agent, + self.count) + if self.lidar is not None: + self.save_lidar_points(self.count) + + def save_rgb_image(self, count): + """ + Save camera rgb images to disk. + """ + if not self.rgb_camera: + return + + for (i, camera) in enumerate(self.rgb_camera): + + image = camera.image + + image_name = '%06d' % count + '_' + 'camera%d' % i + '.png' + + cv2.imwrite(os.path.join(self.save_parent_folder, image_name), + image) + + def save_lidar_points(self, count): + """ + Save 3D lidar points to disk. + """ + point_cloud = self.lidar.data + frame = count # self.lidar.frame + + point_xyz = point_cloud[:, :-1] + point_intensity = point_cloud[:, -1] + point_intensity = np.c_[ + point_intensity, + np.zeros_like(point_intensity), + np.zeros_like(point_intensity) + ] + + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(point_xyz) + o3d_pcd.colors = o3d.utility.Vector3dVector(point_intensity) + + # write to pcd file + pcd_name = '%06d' % frame + '.pcd' + o3d.io.write_point_cloud(os.path.join(self.save_parent_folder, + pcd_name), + pointcloud=o3d_pcd, + write_ascii=True) + + def save_semantic_lidar_points(self, count): + + LABEL_COLORS = np.array([ + (255, 255, 255), # None + (70, 70, 70), # Building + (100, 40, 40), # Fences + (55, 90, 80), # Other + (220, 20, 60), # Pedestrian + (153, 153, 153), # Pole + (157, 234, 50), # RoadLines + (128, 64, 128), # Road + (244, 35, 232), # Sidewalk + (107, 142, 35), # Vegetation + (0, 0, 142), # Vehicle + (102, 102, 156), # Wall + (220, 220, 0), # TrafficSign + (70, 130, 180), # Sky + (81, 0, 81), # Ground + (150, 100, 100), # Bridge + (230, 150, 140), # RailTrack + (180, 165, 180), # GuardRail + (250, 170, 30), # TrafficLight + (110, 190, 160), # Static + (170, 120, 50), # Dynamic + (45, 60, 150), # Water + (145, 170, 100), # Terrain + ]) / 255.0 # normalize each channel [0-1] since is what Open3D uses + + point_cloud = self.semantic_lidar.points + frame = count # self.lidar.frame + + point_xyz = point_cloud + label_color = LABEL_COLORS[self.semantic_lidar.obj_tag] + + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(point_xyz) + o3d_pcd.colors = o3d.utility.Vector3dVector(label_color) + + # write to pcd file + pcd_name = '%06d' % frame + '.pcd' + o3d.io.write_point_cloud(os.path.join(self.save_parent_folder, + pcd_name), + pointcloud=o3d_pcd, + write_ascii=True) + + def save_yaml_file(self, + perception_manager, + localization_manager, + behavior_agent, + count): + """ + Save objects positions/spped, true ego position, + predicted ego position, sensor transformations. + + Parameters + ---------- + perception_manager : opencda object + OpenCDA perception manager. + + localization_manager : opencda object + OpenCDA localization manager. + + behavior_agent : opencda object + OpenCDA behavior agent. + """ + frame = count + + dump_yml = {} + vehicle_dict = {} + + # dump obstacle vehicles first + objects = perception_manager.objects + vehicle_list = objects['vehicles'] + + for veh in vehicle_list: + if perception_manager.id == veh.carla_id: + continue ### + + veh_carla_id = veh.carla_id + veh_pos = veh.get_transform() + veh_bbx = veh.bounding_box + veh_speed = get_speed(veh) + + assert veh_carla_id != -1, "Please turn off perception active" \ + "mode if you are dumping data" + + vehicle_dict.update({veh_carla_id: { + 'bp_id': veh.type_id, + 'color': veh.color, + "location": [veh_pos.location.x, + veh_pos.location.y, + veh_pos.location.z], + "center": [veh_bbx.location.x, + veh_bbx.location.y, + veh_bbx.location.z], + "angle": [veh_pos.rotation.roll, + veh_pos.rotation.yaw, + veh_pos.rotation.pitch], + "extent": [veh_bbx.extent.x, + veh_bbx.extent.y, + veh_bbx.extent.z], + "speed": veh_speed + }}) + dump_yml.update({'vehicles': vehicle_dict}) + # dump ego pose and speed, if vehicle does not exist, then it is + # a rsu(road side unit). + predicted_ego_pos = localization_manager.get_ego_pos() + true_ego_pos = localization_manager.vehicle.get_transform() \ + if hasattr(localization_manager, 'vehicle') \ + else localization_manager.true_ego_pos + + dump_yml.update({'predicted_ego_pos': [ + predicted_ego_pos.location.x, + predicted_ego_pos.location.y, + predicted_ego_pos.location.z, + predicted_ego_pos.rotation.roll, + predicted_ego_pos.rotation.yaw, + predicted_ego_pos.rotation.pitch]}) + dump_yml.update({'true_ego_pos': [ + true_ego_pos.location.x, + true_ego_pos.location.y, + true_ego_pos.location.z, + true_ego_pos.rotation.roll, + true_ego_pos.rotation.yaw, + true_ego_pos.rotation.pitch]}) + dump_yml.update({'ego_speed': + float(localization_manager.get_ego_spd())}) + + # dump lidar sensor coordinates under world coordinate system + lidar_transformation = self.lidar.sensor.get_transform() + dump_yml.update({'lidar_pose': [ + lidar_transformation.location.x, + lidar_transformation.location.y, + lidar_transformation.location.z, + lidar_transformation.rotation.roll, + lidar_transformation.rotation.yaw, + lidar_transformation.rotation.pitch]}) + # dump camera sensor coordinates under world coordinate system + for (i, camera) in enumerate(self.rgb_camera): + camera_param = {} + camera_transformation = camera.sensor.get_transform() + camera_param.update({'cords': [ + camera_transformation.location.x, + camera_transformation.location.y, + camera_transformation.location.z, + camera_transformation.rotation.roll, + camera_transformation.rotation.yaw, + camera_transformation.rotation.pitch + ]}) + + # dump intrinsic matrix + camera_intrinsic = st.get_camera_intrinsic(camera.sensor) + camera_intrinsic = self.matrix2list(camera_intrinsic) + camera_param.update({'intrinsic': camera_intrinsic}) + + # dump extrinsic matrix lidar2camera + lidar2world = \ + st.x_to_world_transformation(self.lidar.sensor.get_transform()) + camera2world = \ + st.x_to_world_transformation(camera.sensor.get_transform()) + + world2camera = np.linalg.inv(camera2world) + lidar2camera = np.dot(world2camera, lidar2world) + lidar2camera = self.matrix2list(lidar2camera) + camera_param.update({'extrinsic': lidar2camera}) + dump_yml.update({'camera%d' % i: camera_param}) + + dump_yml.update({'RSU': True}) + # dump the planned trajectory if it exisit. + if behavior_agent is not None: + trajectory_deque = \ + behavior_agent.get_local_planner().get_trajectory() + trajectory_list = [] + + for i in range(len(trajectory_deque)): + tmp_buffer = trajectory_deque.popleft() + x = tmp_buffer[0].location.x + y = tmp_buffer[0].location.y + spd = tmp_buffer[1] + + trajectory_list.append([x, y, spd]) + + dump_yml.update({'plan_trajectory': trajectory_list}) + dump_yml.update({'RSU': False}) + + yml_name = '%06d' % frame + '.yaml' + save_path = os.path.join(self.save_parent_folder, + yml_name) + + save_yaml(dump_yml, save_path) + + @staticmethod + def matrix2list(matrix): + """ + To generate readable yaml file, we need to convert the matrix + to list format. + + Parameters + ---------- + matrix : np.ndarray + The extrinsic/intrinsic matrix. + + Returns + ------- + matrix_list : list + The matrix represents in list format. + """ + + assert len(matrix.shape) == 2 + return matrix.tolist() + +class DataDumperReplayPly(DataDumperReplay): + + def __init__(self, + perception_manager, + save_path): + + self.semantic_lidar = perception_manager.semantic_lidar ### + + self.save_parent_folder = save_path + + if not os.path.exists(self.save_parent_folder): + os.makedirs(self.save_parent_folder) + + self.count = 0 + + def save_semantic_lidar_points_ply(self, count): + + path = os.path.join(self.save_parent_folder, '%06d' % count + '.ply') + self.semantic_lidar.event.save_to_disk(path) + print('saved', path) + + def run_step(self): + + # we ignore the first 60 steps + if self.count < 60: + return + + self.save_semantic_lidar_points_ply(self.count) diff --git a/opencda/core/common/misc.py b/opencda/core/common/misc.py index 6fb0bd15..b2251d65 100644 --- a/opencda/core/common/misc.py +++ b/opencda/core/common/misc.py @@ -261,3 +261,75 @@ def get_speed_sumo(sumo2carla_ids, carla_id): return vehicle_speed return -1 + +TOWN_DICTIONARY = { + '2021_08_20_21_48_35': 'Town06', + '2021_08_18_19_48_05': 'Town06', + '2021_08_20_21_10_24': 'Town06', + '2021_08_21_09_28_12': 'Town06', + '2021_08_22_07_52_02': 'Town05', + '2021_08_22_09_08_29': 'Town05', + '2021_08_22_21_41_24': 'Town05', + '2021_08_23_12_58_19': 'Town05', + '2021_08_23_15_19_19': 'Town04', + '2021_08_23_16_06_26': 'Town04', + '2021_08_23_17_22_47': 'Town04', + '2021_08_23_21_07_10': 'Town10HD', + '2021_08_23_21_47_19': 'Town10HD', + '2021_08_24_07_45_41': 'Town10HD', + '2021_08_24_11_37_54': 'Town07', + '2021_08_24_20_09_18': 'Town04', + '2021_08_24_20_49_54': 'Town04', + '2021_08_24_21_29_28': 'Town04', + '2021_08_16_22_26_54': 'Town06', + '2021_08_18_09_02_56': 'Town06', + '2021_08_18_18_33_56': 'Town06', + '2021_08_18_21_38_28': 'Town06', + '2021_08_18_22_16_12': 'Town06', + '2021_08_18_23_23_19': 'Town06', + '2021_08_19_15_07_39': 'Town06', + '2021_08_20_16_20_46': 'Town06', + '2021_08_20_20_39_00': 'Town06', + '2021_08_20_21_00_19': 'Town06', + '2021_08_21_09_09_41': 'Town06', + '2021_08_21_15_41_04': 'Town05', + '2021_08_21_16_08_42': 'Town05', + '2021_08_21_17_00_32': 'Town05', + '2021_08_21_21_35_56': 'Town05', + '2021_08_21_22_21_37': 'Town05', + '2021_08_22_06_43_37': 'Town05', + '2021_08_22_07_24_12': 'Town05', + '2021_08_22_08_39_02': 'Town05', + '2021_08_22_09_43_53': 'Town05', + '2021_08_22_10_10_40': 'Town05', + '2021_08_22_10_46_58': 'Town06', + '2021_08_22_11_29_38': 'Town06', + '2021_08_22_22_30_58': 'Town05', + '2021_08_23_10_47_16': 'Town04', + '2021_08_23_11_06_41': 'Town05', + '2021_08_23_11_22_46': 'Town04', + '2021_08_23_12_13_48': 'Town05', + '2021_08_23_13_10_47': 'Town05', + '2021_08_23_16_42_39': 'Town04', + '2021_08_23_17_07_55': 'Town04', + '2021_08_23_19_27_57': 'Town10HD', + '2021_08_23_20_47_11': 'Town10HD', + '2021_08_23_22_31_01': 'Town10HD', + '2021_08_23_23_08_17': 'Town10HD', + '2021_08_24_09_25_42': 'Town07', + '2021_08_24_09_58_32': 'Town07', + '2021_08_24_12_19_30': 'Town07', + '2021_09_09_13_20_58': 'Town03', + '2021_09_09_19_27_35': 'Town01', + '2021_09_10_12_07_11': 'Town04', + '2021_09_09_23_21_21': 'Town03', + '2021_08_21_17_30_41': 'Town05', + '2021_08_22_13_37_16': 'Town06', + '2021_08_22_22_01_17': 'Town05', + '2021_08_23_10_51_24': 'Town05', + '2021_08_23_13_17_21': 'Town05', + '2021_08_23_19_42_07': 'Town10HD', + '2021_09_09_22_21_11': 'Town02', + '2021_09_11_00_33_16': 'Town10HD', + '2021_08_18_19_11_02': 'Town06' +} diff --git a/opencda/core/common/rsu_manager.py b/opencda/core/common/rsu_manager.py index 94f99508..64403432 100644 --- a/opencda/core/common/rsu_manager.py +++ b/opencda/core/common/rsu_manager.py @@ -124,4 +124,4 @@ def destroy(self): Destroy the actor vehicle """ self.perception_manager.destroy() - self.localizer.destroy() + self.localizer.destroy() \ No newline at end of file diff --git a/opencda/core/common/rsu_manager_replay.py b/opencda/core/common/rsu_manager_replay.py new file mode 100644 index 00000000..72e59bd3 --- /dev/null +++ b/opencda/core/common/rsu_manager_replay.py @@ -0,0 +1,82 @@ +from opencda.core.common.data_dumper_replay import DataDumperReplay, DataDumperReplayPly +from opencda.core.common.rsu_manager import RSUManager +from opencda.core.sensing.perception.perception_manager_replay import PerceptionManagerReplay +from opencda.core.sensing.localization.rsu_localization_manager import LocalizationManager +import os + +class RSUManagerReplay(RSUManager): + + def __init__( + self, + carla_world, + config_yaml, + carla_map, + cav_world, + current_time='', + data_dumping=None, + PerceptionManager=PerceptionManagerReplay, + DataDumper=DataDumperReplay): + + self.rid = config_yaml['id'] + # The id of rsu is always a negative int + if self.rid > 0: + self.rid = -self.rid + + # read map from the world everytime is time-consuming, so we need + # explicitly extract here + self.carla_map = carla_map + + # retrieve the configure for different modules + # todo: add v2x module to rsu later + sensing_config = config_yaml['sensing'] + sensing_config['localization']['global_position'] = \ + config_yaml['spawn_position'] + sensing_config['perception']['global_position'] = \ + config_yaml['spawn_position'] + + # localization module + self.localizer = LocalizationManager(carla_world, + sensing_config['localization'], + self.carla_map) + # perception module + self.perception_manager = PerceptionManager(vehicle=None, + config_yaml=sensing_config['perception'], + cav_world=cav_world, + carla_world=carla_world, + data_dump=data_dumping, + infra_id=self.rid) + if data_dumping: + save_path = os.path.join(data_dumping, current_time, str(self.rid)) + self.data_dumper = DataDumper(self.perception_manager, save_path) + else: + self.data_dumper = None + + cav_world.update_rsu_manager(self) + +class RSUManagerReplayPly(RSUManagerReplay): + + def __init__( + self, + carla_world, + config_yaml, + carla_map, + cav_world, + current_time='', + data_dumping=None): + + super(RSUManagerReplayPly ,self).__init__(carla_world, + config_yaml, + carla_map, + cav_world, + current_time, + data_dumping, + PerceptionManager=PerceptionManagerReplay, + DataDumper=DataDumperReplayPly) + + def update_info(self): + pass + + def run_step(self): + + if self.data_dumper: + self.data_dumper.run_step() \ No newline at end of file diff --git a/opencda/core/common/vehicle_manager.py b/opencda/core/common/vehicle_manager.py index 5135f082..9fa44187 100644 --- a/opencda/core/common/vehicle_manager.py +++ b/opencda/core/common/vehicle_manager.py @@ -225,4 +225,4 @@ def destroy(self): self.perception_manager.destroy() self.localizer.destroy() self.vehicle.destroy() - self.map_manager.destroy() + self.map_manager.destroy() \ No newline at end of file diff --git a/opencda/core/common/vehicle_manager_replay.py b/opencda/core/common/vehicle_manager_replay.py new file mode 100644 index 00000000..8013f1de --- /dev/null +++ b/opencda/core/common/vehicle_manager_replay.py @@ -0,0 +1,110 @@ +import uuid +import os + +from opencda.core.sensing.localization.localization_manager \ + import LocalizationManager +from opencda.core.sensing.perception.perception_manager_replay \ + import PerceptionManagerReplay, PerceptionManagerReplayBEV +from opencda.core.common.data_dumper_replay \ + import DataDumperReplay +from opencda.core.map.map_manager import MapManager + +class VehicleManagerReplay(object): + + def __init__( + self, + vehicle, + config_yaml, + application, + carla_map, + cav_world, + current_time='', + data_dumping=None, + PerceptionManager=PerceptionManagerReplay, + DataDumper=DataDumperReplay): + + # an unique uuid for this vehicle + self.vid = str(uuid.uuid1()) + self.vehicle = vehicle + self.carla_map = carla_map + + # retrieve the configure for different modules + sensing_config = config_yaml['sensing'] + map_config = config_yaml['map_manager'] + + # localization module + self.localizer = LocalizationManager( + vehicle, sensing_config['localization'], carla_map) + # perception module + self.perception_manager = PerceptionManager( + vehicle, sensing_config['perception'], cav_world, + data_dumping) + + # map manager + self.map_manager = MapManager(vehicle, + carla_map, + map_config) + + # behavior agent + self.agent = None + + if data_dumping: + save_path = os.path.join(data_dumping, current_time, str(vehicle.id)) + self.data_dumper = DataDumper(self.perception_manager, save_path) + else: + self.data_dumper = None + + cav_world.update_vehicle_manager(self) + + def update_info(self): + """ + Call perception and localization module to + retrieve surrounding info an ego position. + """ + # localization + self.localizer.localize() + + ego_pos = self.localizer.get_ego_pos() + ego_spd = self.localizer.get_ego_spd() + + # object detection + objects = self.perception_manager.detect(ego_pos) + + # update the ego pose for map manager + self.map_manager.update_information(ego_pos) + + def run_step(self, target_speed=None): + """ + Execute one step of navigation. + """ + + # dump data + if self.data_dumper: + self.data_dumper.run_step(self.perception_manager, + self.localizer, + self.agent) + + def destroy(self): + """ + Destroy the actor vehicle + """ + self.perception_manager.destroy() + self.localizer.destroy() + self.vehicle.destroy() + self.map_manager.destroy() + +class VehicleManagerReplayBEV(VehicleManagerReplay): + + def __init__( + self, + vehicle, + config_yaml, + application, + carla_map, + cav_world, + current_time='', + data_dumping=None): + + super(VehicleManagerReplayBEV, self).__init__(vehicle, config_yaml, application, carla_map, cav_world, + current_time,data_dumping,PerceptionManagerReplayBEV, + DataDumperReplay) \ No newline at end of file diff --git a/opencda/core/safety/safety_manager.py b/opencda/core/safety/safety_manager.py index ac3069d4..7c075420 100644 --- a/opencda/core/safety/safety_manager.py +++ b/opencda/core/safety/safety_manager.py @@ -48,4 +48,5 @@ def update_info(self, data_dict) -> dict: def destroy(self): for sensor in self.sensors: - sensor.destroy() + if hasattr(sensor, 'destroy'): + sensor.destroy() diff --git a/opencda/core/sensing/perception/perception_manager.py b/opencda/core/sensing/perception/perception_manager.py index 49a7cca8..f546a193 100644 --- a/opencda/core/sensing/perception/perception_manager.py +++ b/opencda/core/sensing/perception/perception_manager.py @@ -161,24 +161,12 @@ def __init__(self, vehicle, world, config_yaml, global_position): blueprint.set_attribute('lower_fov', str(config_yaml['lower_fov'])) blueprint.set_attribute('channels', str(config_yaml['channels'])) blueprint.set_attribute('range', str(config_yaml['range'])) - blueprint.set_attribute( - 'points_per_second', str( - config_yaml['points_per_second'])) - blueprint.set_attribute( - 'rotation_frequency', str( - config_yaml['rotation_frequency'])) - blueprint.set_attribute( - 'dropoff_general_rate', str( - config_yaml['dropoff_general_rate'])) - blueprint.set_attribute( - 'dropoff_intensity_limit', str( - config_yaml['dropoff_intensity_limit'])) - blueprint.set_attribute( - 'dropoff_zero_intensity', str( - config_yaml['dropoff_zero_intensity'])) - blueprint.set_attribute( - 'noise_stddev', str( - config_yaml['noise_stddev'])) + blueprint.set_attribute('points_per_second', str(config_yaml['points_per_second'])) + blueprint.set_attribute('rotation_frequency', str(config_yaml['rotation_frequency'])) + blueprint.set_attribute('dropoff_general_rate', str(config_yaml['dropoff_general_rate'])) + blueprint.set_attribute('dropoff_intensity_limit', str(config_yaml['dropoff_intensity_limit'])) + blueprint.set_attribute('dropoff_zero_intensity', str(config_yaml['dropoff_zero_intensity'])) + blueprint.set_attribute('noise_stddev', str(config_yaml['noise_stddev'])) # spawn sensor if global_position is None: @@ -188,8 +176,7 @@ def __init__(self, vehicle, world, config_yaml, global_position): y=global_position[1], z=global_position[2])) if vehicle is not None: - self.sensor = world.spawn_actor( - blueprint, spawn_point, attach_to=vehicle) + self.sensor = world.spawn_actor(blueprint, spawn_point, attach_to=vehicle) else: self.sensor = world.spawn_actor(blueprint, spawn_point) @@ -309,6 +296,7 @@ def _on_data_event(weak_self, event): return # shape:(n, 6) + self.event = event data = np.frombuffer(event.raw_data, dtype=np.dtype([ ('x', np.float32), ('y', np.float32), ('z', np.float32), ('CosAngle', np.float32), ('ObjIdx', np.uint32), @@ -851,4 +839,4 @@ def destroy(self): self.o3d_vis.destroy_window() if self.data_dump: - self.semantic_lidar.sensor.destroy() + self.semantic_lidar.sensor.destroy() \ No newline at end of file diff --git a/opencda/core/sensing/perception/perception_manager_replay.py b/opencda/core/sensing/perception/perception_manager_replay.py new file mode 100644 index 00000000..4c9f8ee0 --- /dev/null +++ b/opencda/core/sensing/perception/perception_manager_replay.py @@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +""" +Perception module base. +""" + +# Author: Runsheng Xu +# License: TDG-Attribution-NonCommercial-NoDistrib + +import weakref +import sys +import time + +import carla +import cv2 +import numpy as np +import open3d as o3d +import math + +import opencda.core.sensing.perception.sensor_transformation as st +from opencda.core.common.misc import \ + cal_distance_angle, get_speed, get_speed_sumo +from opencda.core.sensing.perception.obstacle_vehicle import \ + ObstacleVehicle +from opencda.core.sensing.perception.static_obstacle import TrafficLight +from opencda.core.sensing.perception.o3d_lidar_libs import \ + o3d_visualizer_init, o3d_pointcloud_encode, o3d_visualizer_show, \ + o3d_camera_lidar_fusion +from opencda.core.sensing.perception.perception_manager import CameraSensor, LidarSensor, SemanticLidarSensor, PerceptionManager + +class CameraSensorReplay(CameraSensor): + + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + x, y, z, yaw = relative_position + + # this is for rsu. It utilizes global position instead of relative + # position to the vehicle + if global_position is not None: + carla_location = carla.Location( + x=global_position[0], + y=global_position[1], + z=global_position[2]) + pitch = global_position[3] + yaw = global_position[4] + roll = global_position[5] + + carla_location = carla.Location(x=carla_location.x + x, + y=carla_location.y + y, + z=carla_location.z + z) + + carla_rotation = carla.Rotation(roll=0, yaw=yaw, pitch=pitch) + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point + +class LidarSensorReplay(LidarSensor): + + def __init__(self, vehicle, world, config_yaml, global_position): + + blueprint = world.get_blueprint_library().find('sensor.lidar.ray_cast') + + if vehicle is not None: + world = vehicle.get_world() + blueprint.set_attribute('channels', str(config_yaml['channels'])) + blueprint.set_attribute('dropoff_general_rate', str(config_yaml['dropoff_general_rate'])) + blueprint.set_attribute('dropoff_intensity_limit', str(config_yaml['dropoff_intensity_limit'])) + blueprint.set_attribute('dropoff_zero_intensity', str(config_yaml['dropoff_zero_intensity'])) + blueprint.set_attribute('upper_fov', str(config_yaml['upper_fov'])) + blueprint.set_attribute('lower_fov', str(config_yaml['lower_fov'])) + blueprint.set_attribute('noise_stddev', str(config_yaml['noise_stddev'])) + blueprint.set_attribute('points_per_second', str(config_yaml['points_per_second'])) + blueprint.set_attribute('range', str(config_yaml['range'])) + else: + # set attribute based on the configuration + blueprint.set_attribute('lidar_type', str(config_yaml['lidar_type'])) ### + blueprint.set_attribute('name', str(config_yaml['name'])) + + blueprint.set_attribute('rotation_frequency', str(config_yaml['rotation_frequency'])) + + # spawn sensor + if global_position is None: + spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) + else: + if str(config_yaml['lidar_type']) == 'Risley_prism': + rotation = carla.Rotation( + pitch = global_position[3], + yaw = global_position[4], + roll = global_position[5] + ) + else: + rotation = carla.Rotation( + pitch=0, + yaw=0, + roll=0 + ) + + spawn_point = carla.Transform(carla.Location(x=global_position[0], + y=global_position[1], + z=global_position[2]), + rotation) + self.spawn_point = spawn_point + self.sensor = world.spawn_actor(blueprint, spawn_point, attach_to=vehicle) + + # lidar data + self.data = None + self.timestamp = None + self.frame = 0 + # open3d point cloud object + self.o3d_pointcloud = o3d.geometry.PointCloud() + self.spawn_point = spawn_point + + weak_self = weakref.ref(self) + self.sensor.listen( + lambda event: LidarSensor._on_data_event( + weak_self, event)) + +class SemanticLidarSensorReplay(SemanticLidarSensor): + + def __init__(self, vehicle, world, config_yaml, global_position): + if vehicle is not None: + world = vehicle.get_world() + + blueprint = world.get_blueprint_library().find('sensor.lidar.ray_cast_semantic') + + # set attribute based on the configuration + if 'lidar_type' not in config_yaml: + blueprint.set_attribute('lidar_type', 'default') ### + blueprint.set_attribute('range', str(config_yaml['range'])) + else: + blueprint.set_attribute('lidar_type', config_yaml['lidar_type']) ### + blueprint.set_attribute('name', config_yaml['name']) + + blueprint.set_attribute('rotation_frequency', str(config_yaml['rotation_frequency'])) + + # spawn sensor + if global_position is None: + spawn_point = carla.Transform(carla.Location(x=-0.5, z=1.9)) + else: + if 'lidar_type' in config_yaml and str(config_yaml['lidar_type']) == 'Risley_prism': + rotation = carla.Rotation( + pitch = global_position[3], + yaw = global_position[4], + roll = global_position[5] + ) + else: + rotation = carla.Rotation( + pitch=0, + yaw=0, + roll=0 + ) + + spawn_point = carla.Transform(carla.Location(x=global_position[0], + y=global_position[1], + z=global_position[2]), + rotation) + + if vehicle is not None: + self.sensor = world.spawn_actor( + blueprint, spawn_point, attach_to=vehicle) + else: + self.sensor = world.spawn_actor(blueprint, spawn_point) + + # lidar data + self.points = None + self.obj_idx = None + self.obj_tag = None + + self.timestamp = None + self.frame = 0 + # open3d point cloud object + self.o3d_pointcloud = o3d.geometry.PointCloud() + + weak_self = weakref.ref(self) + self.sensor.listen( + lambda event: self._on_data_event( + weak_self, event)) + + # @staticmethod + # def _on_data_event(weak_self, event): + # """Semantic Lidar method""" + # self = weak_self() + + # time.sleep(5) + # if not self: + # return + + # # shape:(n, 6) + # self.event = event + # data = np.frombuffer(event.raw_data, dtype=np.dtype([ + # ('x', np.float32), ('y', np.float32), ('z', np.float32), + # ('CosAngle', np.float32), ('ObjIdx', np.uint32), + # ('ObjTag', np.uint32)])) + + # # (x, y, z, intensity) + # self.points = np.array([data['x'], data['y'], data['z']]).T + # self.obj_tag = np.array(data['ObjTag']) + # self.obj_idx = np.array(data['ObjIdx']) + + # self.data = data + # self.frame = event.frame + # self.timestamp = event.timestamp + +class PerceptionManagerReplay(PerceptionManager): + + def __init__(self, vehicle, config_yaml, cav_world, + data_dump=False, carla_world=None, infra_id=None): + self.vehicle = vehicle + self.carla_world = carla_world if carla_world is not None \ + else self.vehicle.get_world() + self._map = self.carla_world.get_map() + self.id = infra_id if infra_id is not None else vehicle.id + # self.id = infra_id if infra_id is not None else int(vehicle.attributes['role_name']) + + self.activate = config_yaml['activate'] + self.camera_visualize = config_yaml['camera']['visualize'] + self.camera_num = config_yaml['camera']['num'] + self.lidar_visualize = config_yaml['lidar']['visualize'] + self.global_position = config_yaml['global_position'] if 'global_position' in config_yaml else None + + self.cav_world = weakref.ref(cav_world)() + ml_manager = cav_world.ml_manager + + if self.activate and data_dump: + sys.exit("When you dump data, please deactivate the " + "detection function for precise label.") + + if self.activate and not ml_manager: + sys.exit( + 'If you activate the perception module, ' + 'then apply_ml must be set to true in' + 'the argument parser to load the detection DL model.') + self.ml_manager = ml_manager + + # we only spawn the camera when perception module is activated or + # camera visualization is needed + if self.activate or self.camera_visualize: + self.rgb_camera = [] + mount_position = config_yaml['camera']['positions'] + assert len(mount_position) == self.camera_num, \ + "The camera number has to be the same as the length of the" \ + "relative positions list" + + for i in range(self.camera_num): + self.rgb_camera.append( + CameraSensorReplay( + vehicle, self.carla_world, mount_position[i], + self.global_position)) + + else: + self.rgb_camera = None + + # we only spawn the LiDAR when perception module is activated or lidar + # visualization is needed + if self.activate or self.lidar_visualize: + self.lidar = LidarSensorReplay(vehicle, + self.carla_world, + config_yaml['lidar'], + self.global_position) + self.o3d_vis = o3d_visualizer_init(self.id) + else: + self.lidar = None + self.o3d_vis = None + + # if data dump is true, semantic lidar is also spawned + self.data_dump = data_dump + if data_dump: + self.semantic_lidar = SemanticLidarSensorReplay(vehicle, + self.carla_world, + # config_yaml['lidar'], + config_yaml['semanticlidar'],### + self.global_position) + + # count how many steps have been passed + self.count = 0 + # ego position + self.ego_pos = None + + # the dictionary contains all objects + self.objects = {} + # traffic light detection related + self.traffic_thresh = config_yaml['traffic_light_thresh'] \ + if 'traffic_light_thresh' in config_yaml else 50 + + def deactivate_mode(self, objects): + """ + Object detection using server information directly. + + Parameters + ---------- + objects : dict + The dictionary that contains all category of detected objects. + The key is the object category name and value is its 3d coordinates + and confidence. + + Returns + ------- + objects: dict + Updated object dictionary. + """ + world = self.carla_world + + vehicle_list = world.get_actors().filter("*vehicle*") + + # !!! remarks: filter_vehicle_out_sensor() turned off for rsu lidars + # with fixed locations otherwise recording replay simulation + # will have missing vehicles and fail exact replication + + # use semantic lidar to filter out vehicles out of the range + # if self.data_dump: + # vehicle_list = self.filter_vehicle_out_sensor(vehicle_list) + + # convert carla.Vehicle to opencda.ObstacleVehicle if lidar + # visualization is required. + if self.lidar: + vehicle_list = [ + ObstacleVehicle( + None, + None, + v, + self.lidar.sensor, + self.cav_world.sumo2carla_ids) for v in vehicle_list] + else: + vehicle_list = [ + ObstacleVehicle( + None, + None, + v, + None, + self.cav_world.sumo2carla_ids) for v in vehicle_list] + + objects.update({'vehicles': vehicle_list}) + + if self.camera_visualize: + while self.rgb_camera[0].image is None: + continue + + # names = ['front', 'right', 'left', 'back'] + names = ['front', 'back'] + + for (i, rgb_camera) in enumerate(self.rgb_camera): + if i > self.camera_num - 1 or i > self.camera_visualize - 1: + break + # we only visualiz the frontal camera + rgb_image = np.array(rgb_camera.image) + # draw the ground truth bbx on the camera image + rgb_image = self.visualize_3d_bbx_front_camera(objects, + rgb_image, + i) + # resize to make it fittable to the screen + rgb_image = cv2.resize(rgb_image, (0, 0), fx=0.4, fy=0.4) + + # show image using cv2 + cv2.imshow( + '%s camera of actor %d, perception deactivated' % + (names[i], self.id), rgb_image) + cv2.waitKey(1) + + if self.lidar_visualize: + while self.lidar.data is None: + continue + o3d_pointcloud_encode(self.lidar.data, self.lidar.o3d_pointcloud) + # render the raw lidar + o3d_visualizer_show( + self.o3d_vis, + self.count, + self.lidar.o3d_pointcloud, + objects) + + # add traffic light + objects = self.retrieve_traffic_lights(objects) + self.objects = objects + + return objects + +class PerceptionManagerReplayBEV(PerceptionManagerReplay): + + def __init__(self, vehicle, config_yaml, cav_world, + data_dump=False, carla_world=None, infra_id=None): + self.vehicle = vehicle + self.carla_world = carla_world if carla_world is not None \ + else self.vehicle.get_world() + self._map = self.carla_world.get_map() + self.id = infra_id if infra_id is not None else vehicle.id + # self.id = infra_id if infra_id is not None else int(vehicle.attributes['role_name']) + + self.activate = config_yaml['activate'] + self.camera_visualize = config_yaml['camera']['visualize'] + self.camera_num = config_yaml['camera']['num'] + self.lidar_visualize = config_yaml['lidar']['visualize'] + self.global_position = config_yaml['global_position'] if 'global_position' in config_yaml else None + + self.cav_world = weakref.ref(cav_world)() + + if self.activate and data_dump: + sys.exit("When you dump data, please deactivate the " + "detection function for precise label.") + + if self.activate or self.camera_visualize: + self.rgb_camera = [] + mount_position = config_yaml['camera']['positions'] + assert len(mount_position) == self.camera_num, \ + "The camera number has to be the same as the length of the" \ + "relative positions list" + + for i in range(self.camera_num): + self.rgb_camera.append( + CameraSensorReplayBEV( + vehicle, self.carla_world, mount_position[i], + self.global_position)) + + else: + self.rgb_camera = None + + self.lidar = None + self.o3d_vis = None + + self.semantic_lidar = None + # count how many steps have been passed + self.count = 0 + # ego position + self.ego_pos = None + + # the dictionary contains all objects + self.objects = {} + # traffic light detection related + self.traffic_thresh = config_yaml['traffic_light_thresh'] \ + if 'traffic_light_thresh' in config_yaml else 50 + +class CameraSensorReplayBEV(CameraSensor): + + @staticmethod + def spawn_point_estimation(relative_position, global_position): + + # pitch = 0 + carla_location = carla.Location(x=0, y=0, z=0) + # x, y, z, pitch, yaw, roll = relative_position + + carla_location = carla.Location(x=carla_location.x, + y=carla_location.y, + z=carla_location.z+40) + + carla_rotation = carla.Rotation(roll=0, yaw=0, pitch=-90) + spawn_point = carla.Transform(carla_location, carla_rotation) + + return spawn_point diff --git a/opencda/mv_files.py b/opencda/mv_files.py new file mode 100644 index 00000000..cc4b3e4f --- /dev/null +++ b/opencda/mv_files.py @@ -0,0 +1,29 @@ +import os, shutil + +def main(): + + src_dir = '/media/hdd1/opv2v/opencda_dump/train/' + des_dir = '/media/hdd1/opv2v/opencda_dump/test/' + rang = [292, 315] + interval = 2 + + exts = ['_camera0.png', '.yaml', '.pcd'] + + for scene_dir in os.listdir(src_dir): + + for rsu_dir in os.listdir(os.path.join(src_dir, scene_dir)): + + des_path = os.path.join(des_dir, scene_dir, rsu_dir) + + if not os.path.exists(des_path): + os.makedirs(des_path) + + for i in range(*rang, interval): + ind = str(i).zfill(6) + + for ext in exts: + shutil.move(os.path.join(src_dir, scene_dir, rsu_dir, ind+ext), os.path.join(des_dir, scene_dir, rsu_dir, ind+ext)) + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/opencda/scenario_testing/config_yaml/default.yaml b/opencda/scenario_testing/config_yaml/default.yaml index 48a486bf..d233afdd 100644 --- a/opencda/scenario_testing/config_yaml/default.yaml +++ b/opencda/scenario_testing/config_yaml/default.yaml @@ -25,15 +25,16 @@ world: rsu_base: sensing: perception: + # global_position: [] activate: false # when not activated, objects positions will be retrieved from server directly camera: visualize: 4 # how many camera images need to be visualized. 0 means no visualization for camera - num: 4 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + num: 2 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num positions: - [2.5, 0, 1.0, 0] - - [0.0, 0.3, 1.8, 100] - - [0.0, -0.3, 1.8, -100] + # - [0.0, 0.3, 1.8, 100] + # - [0.0, -0.3, 1.8, -100] - [-2.0, 0.0, 1.5, 180] lidar: # lidar sensor configuration, check CARLA sensor reference for more details visualize: true diff --git a/opencda/scenario_testing/config_yaml/logreplay.yaml b/opencda/scenario_testing/config_yaml/logreplay.yaml new file mode 100644 index 00000000..5575dbeb --- /dev/null +++ b/opencda/scenario_testing/config_yaml/logreplay.yaml @@ -0,0 +1,51 @@ +world: + fixed_delta_seconds: 0.1 + weather: + sun_altitude_angle: 60 # 90 is the midday and -90 is the midnight +rsu_base: + sensing: + perception: + camera: + visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera + num: 1 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + positions: + - [2.5, 0, 1.0, 0] + lidar: &lidar # lidar sensor configuration, check CARLA sensor reference for more details + lidar_type: Risley_prism + name: horizon + # lidar_type: Surround + # name: pandar64 + visualize: true + rotation_frequency: 10 + semanticlidar: *lidar +# Basic parameters of the vehicles +vehicle_base: + sensing: # include perception and localization + perception: + activate: false # when not activated, objects positions will be retrieved from server directly + camera: + visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera + num: 0 # how many cameras are mounted on the vehicle. + positions: # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num + # - [2.5, 0, 1.0, 0] + # - [0, 0.3, 1.8, 100] + # - [0, -0.3, 1.8, -100] + # - [-2.0, 0, 1.5, 180] + lidar: # lidar sensor configuration, check CARLA sensor reference for more details + visualize: true + channels: 64 + range: 120 + points_per_second: 1300000 + rotation_frequency: 10 # the simulation is 20 fps + upper_fov: 2 + lower_fov: -25 + dropoff_general_rate: 0.1 + dropoff_intensity_limit: 0.7 + dropoff_zero_intensity: 0.15 + noise_stddev: 0.02 + map_manager: + visualize: false # whether to visualize the rasteraization map + activate: false # whether activate the map manager +carla_traffic_manager: + fixed_delta_seconds: 0.1 # i.e. 10hz +rsu_loc_dir: '/media/hdd1/OpenCOOD/logreplay/scenario/rsu.json' \ No newline at end of file diff --git a/opencda/scenario_testing/config_yaml/logreplay_bev.yaml b/opencda/scenario_testing/config_yaml/logreplay_bev.yaml new file mode 100644 index 00000000..759aaa61 --- /dev/null +++ b/opencda/scenario_testing/config_yaml/logreplay_bev.yaml @@ -0,0 +1,53 @@ +world: + fixed_delta_seconds: 0.1 + weather: + sun_altitude_angle: 60 # 90 is the midday and -90 is the midnight +rsu_base: + sensing: + perception: + activate: false # when not activated, objects positions will be retrieved from server directly + camera: + visualize: 0 # how many camera images need to be visualized. 0 means no visualization for camera + num: 0 # how many cameras are mounted on the vehicle. Maximum 3(frontal, left and right cameras) + positions: + # - [2.5, 0, 1.0, 0] + lidar: # lidar sensor configuration, check CARLA sensor reference for more details + lidar_type: Risley_prism + name: horizon + # lidar_type: Surround + # name: pandar64 + visualize: false + rotation_frequency: 10 + semanticlidar: &vehlidar + visualize: false + channels: 64 + range: 120 + points_per_second: 1300000 + rotation_frequency: 10 # the simulation is 20 fps + upper_fov: 2 + lower_fov: -25 + dropoff_general_rate: 0.1 + dropoff_intensity_limit: 0.7 + dropoff_zero_intensity: 0.15 + noise_stddev: 0.02 +# Basic parameters of the vehicles +vehicle_base: + sensing: # include perception and localization + perception: + activate: false # when not activated, objects positions will be retrieved from server directly + camera: + visualize: 1 # how many camera images need to be visualized. 0 means no visualization for camera + num: 1 # how many cameras are mounted on the vehicle. + positions: # relative positions (x,y,z,yaw) of the camera. len(positions) should be equal to camera num + - [2.5, 0, 1.0, 0] + # - [0, 0.3, 1.8, 100] + # - [0, -0.3, 1.8, -100] + # - [-2.0, 0, 1.5, 180] + lidar: *vehlidar # lidar sensor configuration, check CARLA sensor reference for more details + semanticlidar: *vehlidar + map_manager: + visualize: false # whether to visualize the rasteraization map + activate: false # whether activate the map manager +carla_traffic_manager: + fixed_delta_seconds: 0.1 # i.e. 10hz +rsu_loc_dir: '/media/hdd1/OpenCOOD/logreplay/scenario/rsu.json' \ No newline at end of file diff --git a/opencda/scenario_testing/config_yaml/logreplay_metric.yaml b/opencda/scenario_testing/config_yaml/logreplay_metric.yaml new file mode 120000 index 00000000..46783326 --- /dev/null +++ b/opencda/scenario_testing/config_yaml/logreplay_metric.yaml @@ -0,0 +1 @@ +/home/x/Projects/lidar_placement/OpenCDA/opencda/scenario_testing/config_yaml/logreplay.yaml \ No newline at end of file diff --git a/opencda/scenario_testing/logreplay.py b/opencda/scenario_testing/logreplay.py new file mode 100644 index 00000000..26776f99 --- /dev/null +++ b/opencda/scenario_testing/logreplay.py @@ -0,0 +1,213 @@ +import carla +from collections import OrderedDict +from omegaconf import OmegaConf + +import opencda.scenario_testing.utils.sim_api_replay as sim_api +from opencda.core.common.cav_world import CavWorld +from opencda.core.common.misc import TOWN_DICTIONARY +import os, time + +def clean_yaml(path): + + ''' + remove error/ warning messages + ''' + + with open(path, 'r') as r: + + yaml_r = r.readlines() + + with open(path, 'w') as w: + + del_flag = False + + for textline in yaml_r: + + if '!!python/' not in textline: + if del_flag: + try: + # cast = int(textline[4:]) + del_flag = False + except ValueError: + pass + else: + w.write(textline) + else: + del_flag = True + +def extract_vehicle_positions(dataset_dir, scene_name, timestamp, get_bg_veh_bp=False): + + cav_pos = OrderedDict() + bg_veh_pos = OrderedDict() + + bg_veh_bp = OrderedDict() if get_bg_veh_bp else None + cav_ids = os.listdir(os.path.join(dataset_dir, scene_name)) + cav_ids = sorted([c for c in cav_ids if c.isalnum() and int(c) > 0]) # exclude RSUs with -ve ids + cav0 = cav_ids[0] + + for cav_id in cav_ids: + if not os.path.isdir(os.path.join(dataset_dir, scene_name, cav_id)): + continue + + yaml_path = os.path.join(dataset_dir,scene_name,cav_id,timestamp+'.yaml') + clean_yaml(yaml_path) + cav_yaml = OmegaConf.load(yaml_path) + cav0 = cav_ids[0] # remark + + if cav_id == cav0: #in cav_ids: + cav_pos[cav_id] = cav_yaml['true_ego_pos'] + else: + bg_veh_pos[int(cav_id)] = cav_yaml['true_ego_pos'] + + for bg_veh_id, bg_veh_content in cav_yaml['vehicles'].items(): + if str(bg_veh_id) == cav0 or int(bg_veh_id) < 0: + continue + + bg_veh_pos[bg_veh_id] = bg_veh_content['location']+bg_veh_content['angle'] + + if bg_veh_bp is not None: + bg_veh_bp[bg_veh_id] = {'bp_id': bg_veh_content['bp_id'], 'color': bg_veh_content['color']} + + cav_pos = {cav0: cav_pos[cav0]} # only choose ego vehicle + + if get_bg_veh_bp: + return cav_pos, bg_veh_pos, bg_veh_bp + else: + return cav_pos, bg_veh_pos + +def structure_transform_cav(pose): + """ + Convert the pose saved in list to transform format. + + Parameters + ---------- + pose : list + x, y, z, roll, yaw, pitch + + Returns + ------- + carla.Transform + """ + cur_pose = carla.Transform(carla.Location(x=pose[0], + y=pose[1], + z=pose[2]), + carla.Rotation(roll=pose[3], + yaw=pose[4], + pitch=pose[5])) + + return cur_pose + +def run_scenario(opt, scenario_params, ScenarioManager=sim_api.ScenarioManagerReplay): + + veh_data_dump = True + rsu_data_dump = True + + dataset_dir = scenario_params['root_dir'] + scene_name = scenario_params['current_time'] + base_dataset_flag = scenario_params['base_dataset_flag'] + cav_dir = sorted([d for d in os.listdir(os.path.join(dataset_dir, scene_name)) + if os.path.isdir(os.path.join(dataset_dir,scene_name,d))])[0] + timestamps = set(f[:6] for f in os.listdir(os.path.join(dataset_dir,scene_name,cav_dir))) + timestamps = sorted(list(timestamps)) + + # create CAV world + cav_world = CavWorld(opt.apply_ml) + + # create scenario manager + scenario_manager = ScenarioManager(scenario_params, opt.apply_ml, opt.version, + town=TOWN_DICTIONARY[scene_name], cav_world=cav_world) + + single_cav_dict = rsu_list = bg_veh_dict = None + + try: + + if opt.record: + scenario_manager.client. \ + start_recorder(f'{scene_name}.log', True) + + # set up vehicles and rsus + bg_veh_bp = None + + if base_dataset_flag: + # remark: after replaying original to generate base, veh model and color is available + cav_pos, bg_veh_pos, bg_veh_bp = extract_vehicle_positions(dataset_dir, scene_name, timestamps[0], True) + else: + # original OPV2V dataset did not record background cav details e.g. bp_id, exact replication is not possible + cav_pos, bg_veh_pos = extract_vehicle_positions(dataset_dir, scene_name, timestamps[0]) + + # initialize cav actors + single_cav_dict = scenario_manager.create_vehicle_manager(cav_pos, timestamps[0], application=['single'], data_dump=veh_data_dump) + bg_veh_dict = scenario_manager.create_traffic_carla(bg_veh_pos, timestamps[0], bg_veh_bp) + rsu_list = scenario_manager.create_rsu_manager(data_dump=rsu_data_dump) + + spectator = scenario_manager.world.get_spectator() + + # run steps + + for t in timestamps: + print(t) + + scenario_manager.tick() + anchor_cav = single_cav_dict[list(single_cav_dict.keys())[0]]['actor'] + transform = anchor_cav.vehicle.get_transform() + spectator.set_transform( + carla.Transform(transform.location + carla.Location(z=40), + carla.Rotation(pitch=-90))) + + # move vehicles and rsus, adjust data_dumper count + # remark: data_dumper normally starts from t=0 and increment by 1 each time + # however, recording data starts after t=60 and differs by 2 + # according to recording original OPV2V's settings + + cav_pos, bg_veh_pos = extract_vehicle_positions(dataset_dir, scene_name, t) + + for cav_id, cav_content in single_cav_dict.items(): + if veh_data_dump: + cav_content['actor'].data_dumper.count = int(t) + + scenario_manager.move_vehicle(cav_content, t, + structure_transform_cav(cav_pos[cav_id])) + + for bg_veh_id, bg_veh_content in bg_veh_dict.items(): + if bg_veh_id in bg_veh_pos: + scenario_manager.move_vehicle(bg_veh_content, t, + structure_transform_cav(bg_veh_pos[bg_veh_id])) + + for rsu in rsu_list: + if rsu_data_dump: + rsu.data_dumper.count = int(t) + + # run_step update traffic and dump data + + for cav_id, cav_content in single_cav_dict.items(): + single_cav = cav_content['actor'] + single_cav.update_info() + single_cav.run_step() + + for rsu in rsu_list: + rsu.update_info() + rsu.run_step() + + # break ### debug + + except KeyboardInterrupt: + pass + + finally: + + time.sleep(8) + + if opt.record: + scenario_manager.client.stop_recorder() + + scenario_manager.close() + + if single_cav_dict is not None: + for cav_id, cav_content in single_cav_dict.items(): + cav_content['actor'].destroy() + if rsu_list is not None: + for r in rsu_list: + r.destroy() + if bg_veh_dict is not None: + for veh_content in bg_veh_dict.values(): + veh_content['actor'].destroy() diff --git a/opencda/scenario_testing/logreplay_bev.py b/opencda/scenario_testing/logreplay_bev.py new file mode 100644 index 00000000..6e4124ea --- /dev/null +++ b/opencda/scenario_testing/logreplay_bev.py @@ -0,0 +1,112 @@ +import carla +from collections import OrderedDict +from omegaconf import OmegaConf + +import opencda.scenario_testing.utils.sim_api_replay as sim_api +from opencda.core.common.cav_world import CavWorld +from opencda.core.common.misc import TOWN_DICTIONARY +import os, time + +from opencda.scenario_testing.logreplay \ + import clean_yaml, extract_vehicle_positions, structure_transform_cav, run_scenario + +def run_scenario(opt, scenario_params, ScenarioManager=sim_api.ScenarioManagerReplayBEV): + + veh_data_dump = True + # rsu_data_dump = True + + dataset_dir = scenario_params['root_dir'] + scene_name = scenario_params['current_time'] + base_dataset_flag = scenario_params['base_dataset_flag'] + cav_dir = sorted([d for d in os.listdir(os.path.join(dataset_dir, scene_name)) + if os.path.isdir(os.path.join(dataset_dir,scene_name,d))])[0] + timestamps = set(f[:6] for f in os.listdir(os.path.join(dataset_dir,scene_name,cav_dir))) + timestamps = sorted(list(timestamps)) + + # create CAV world + cav_world = CavWorld(opt.apply_ml) + + # create scenario manager + scenario_manager = ScenarioManager(scenario_params, opt.apply_ml, opt.version, + town=TOWN_DICTIONARY[scene_name], cav_world=cav_world) + + single_cav_dict = bg_veh_dict = None + + try: + + if opt.record: + scenario_manager.client. \ + start_recorder(f'{scene_name}.log', True) + + bg_veh_bp = None + + if base_dataset_flag: + cav_pos, bg_veh_pos, bg_veh_bp = extract_vehicle_positions(dataset_dir, scene_name, timestamps[0], True) + else: + cav_pos, bg_veh_pos = extract_vehicle_positions(dataset_dir, scene_name, timestamps[0]) + + veh_data_dump = True + + single_cav_dict = scenario_manager.create_vehicle_manager(cav_pos, timestamps[0], application=['single'], data_dump=veh_data_dump) + + bg_veh_dict = scenario_manager.create_traffic_carla(bg_veh_pos, timestamps[0], bg_veh_bp) + + if veh_data_dump: + for cav_id, cav_content in single_cav_dict.items(): + cav_content['actor'].data_dumper.count = int(timestamps[0]) + + spectator = scenario_manager.world.get_spectator() + + # run steps + + for t in timestamps: + print(t) + + scenario_manager.tick() + anchor_cav = single_cav_dict[list(single_cav_dict.keys())[0]]['actor'] + transform = anchor_cav.vehicle.get_transform() + spectator.set_transform( + carla.Transform(transform.location + carla.Location(z=50), + carla.Rotation(pitch=-90))) + + # move vehicles and rsus, adjust data_dumper count + + cav_pos, bg_veh_pos = extract_vehicle_positions(dataset_dir, scene_name, t) + + for cav_id, cav_content in single_cav_dict.items(): + if veh_data_dump: + cav_content['actor'].data_dumper.count = int(t) + + scenario_manager.move_vehicle(cav_content, t, + structure_transform_cav(cav_pos[cav_id])) + + for bg_veh_id, bg_veh_content in bg_veh_dict.items(): + if bg_veh_id in bg_veh_pos: + scenario_manager.move_vehicle(bg_veh_content, t, + structure_transform_cav(bg_veh_pos[bg_veh_id])) + + # run_step update traffic and dump data + + for cav_id, cav_content in single_cav_dict.items(): + single_cav = cav_content['actor'] + single_cav.update_info() + single_cav.run_step() + + except KeyboardInterrupt: + pass + + finally: + + time.sleep(8) + + if opt.record: + scenario_manager.client.stop_recorder() + + scenario_manager.close() + + if single_cav_dict is not None: + for cav_id, cav_content in single_cav_dict.items(): + cav_content['actor'].destroy() + if bg_veh_dict is not None: + for veh_content in bg_veh_dict.values(): + veh_content['actor'].destroy() diff --git a/opencda/scenario_testing/logreplay_metric.py b/opencda/scenario_testing/logreplay_metric.py new file mode 100644 index 00000000..9921c16f --- /dev/null +++ b/opencda/scenario_testing/logreplay_metric.py @@ -0,0 +1,55 @@ +from opencda.scenario_testing.utils import sim_api_replay +from opencda.core.common.cav_world import CavWorld +from opencda.core.common.misc import TOWN_DICTIONARY +import os, time + +def run_scenario(opt, scenario_params): + + rsu_data_dump = True + + dataset_dir = scenario_params['root_dir'] + scene_name = scenario_params['current_time'] + cav_dir = sorted([d for d in os.listdir(os.path.join(dataset_dir,scene_name)) if os.path.isdir(os.path.join(dataset_dir,scene_name,d))])[0] + timestamps = set(f[:6] for f in os.listdir(os.path.join(dataset_dir,scene_name,cav_dir))) + timestamps = sorted(list(timestamps)) + + # create CAV world + cav_world = CavWorld(opt.apply_ml) + + # create scenario manager + scenario_manager = sim_api_replay.ScenarioManagerReplayPly(scenario_params, opt.apply_ml, opt.version, + town=TOWN_DICTIONARY[scene_name], cav_world=cav_world) + + rsu_list = scenario_manager.create_rsu_manager(data_dump=rsu_data_dump) + + try: + # run steps + + for t in timestamps[:1]: + + print(t) + + scenario_manager.tick() + + for rsu in rsu_list: + if rsu_data_dump: + rsu.data_dumper.count = int(t) + + # run_step update traffic and dump data + + for rsu in rsu_list: + rsu.update_info() + rsu.run_step() + + except KeyboardInterrupt: + pass + + finally: + + time.sleep(8) + + scenario_manager.close() + + if rsu_list is not None: + for r in rsu_list: + r.destroy() diff --git a/opencda/scenario_testing/utils/ComputeDNUC.py b/opencda/scenario_testing/utils/ComputeDNUC.py new file mode 100644 index 00000000..b0d27e6a --- /dev/null +++ b/opencda/scenario_testing/utils/ComputeDNUC.py @@ -0,0 +1,397 @@ +import shutil, random, math, os, json, argparse +import numpy as np +import open3d as o3d +random.seed(123) + +def is_number(s): + try: + float(s) + return True + except ValueError: + pass + try: + import unicodedata + unicodedata.numeric(s) + return True + except (TypeError, ValueError): + pass + return False + +def read_pcd(file): + # file_split = os.path.splitext(file) + pcd = [] + obj_tag = [] + with open(file, 'r') as r: + for line in r.readlines(): + l = line.split(' ') + + if not is_number(l[0]): + continue + + pcd.append(list(map(float, l[:3]))) + obj_tag.append(int(l[5].strip())) + return np.array(pcd), np.array(obj_tag) + +import torch +import torch.nn.functional as F +def project_points_by_matrix_torch(points, transformation_matrix): + # convert to homogeneous coordinates via padding 1 at the last dimension. + # (N, 4) + points_homogeneous = F.pad(points, (0, 1), mode="constant", value=1) + # (N, 4) + projected_points = torch.einsum("ik, jk->ij", points_homogeneous, + transformation_matrix) + return projected_points[:, :3] + +def pcd_coordinate_convertor(file, trans_mat): + file_split = os.path.splitext(file) + with open(file, 'r') as r: + # with open(file+'.new', 'w') as w: + with open(file_split[0]+'_conv'+file_split[1], 'w') as w: + for l in r.readlines(): + # print(l.split(' ')[0]) + if is_number(l.split(' ')[0]): + #print(l) + + # loc = np.matrix([float(l[0]), float(l[1]), float(l[2]), 1.0]) + x = float(l.split(' ')[0]) + y = float(l.split(' ')[1]) + z = float(l.split(' ')[2]) + loc = np.matrix([x, y, z, 1.0]) + + loc_target = np.dot(trans_mat, loc.T) + loc_target = loc_target.tolist() + # print(loc_target) + + l_new_list = [0, 0, 0, 0, 0, 0] + + l_new_list[0] = str(loc_target[0][0]) + l_new_list[1] = str(loc_target[1][0]) + l_new_list[2] = str(loc_target[2][0]) + l_new_list[3] = l.split(' ')[3] + l_new_list[4] = l.split(' ')[4] + l_new_list[5] = l.split(' ')[5] + l_new = ' '.join(l_new_list) + w.write(l_new) + + else: + w.write(l) + print('converted', file_split[0]+'_conv'+file_split[1]) + # if 'nan' not in l: + # w.write(l) + # shutil.move(file+'.new', file) + +def computeNUC(file, d, disk_radius, p, target_field): + ''' + file -- semantic ply file path + d -- the number of disk + disk_radius -- the radius of disk + p -- disk area percentage + ''' + point_list = semantic_filter(file, target_field) + total_point = len(point_list) # total number of point + random_d_index = random.sample(range(total_point), d) # choose d indexes from total_point randomly, d must be smaller than total_point + total_nums_in_disk = 0 + viz(np.array(point_list)) ### + + tmp_val = [] # n_i / (N * p) + for index in random_d_index: + nums_in_index = nums_in_disk(point_list, index, disk_radius) # compute the point number in index_th disk + total_nums_in_disk = total_nums_in_disk + nums_in_index + tmp_val.append(nums_in_index / (total_point *p)) + + avg = total_nums_in_disk / (d * total_point * p) + + nuc = 0 + for val in tmp_val: + nuc = nuc + math.pow(val - avg, 2) + nuc = math.sqrt(nuc / d) + + return total_point, nuc + +def nums_in_disk(point_list, point_index, disk_radius): + disk_center_point = point_list[point_index] + d_x = disk_center_point[0] + d_y = disk_center_point[1] + # d_z = disk_center_point[2] + point_nums = 0 + for point in point_list: + p_x = point[0] + p_y = point[1] + # p_z = point[2] + dist = math.sqrt(math.pow(d_x - p_x, 2) + math.pow(d_y - p_y, 2)) + if dist <= disk_radius: + point_nums = point_nums + 1 + return point_nums + +def nums_in_disk_mat(point_list, point_index, disk_radius): + point_list = np.array(point_list) + disk_center_point = point_list[point_index] + + dist = np.linalg.norm(point_list[:,:2]-disk_center_point[:2], axis=1) + point_nums = (dist <= disk_radius).sum() + return point_nums + +def semantic_filter_mat(pcd, obj_tag): + target_tags = np.array([7, 8]) # ['road', 'sidewalk'] + semantic_mask = np.in1d(obj_tag, target_tags) + return pcd[semantic_mask] + +def area_filter(pcd, target_field): + lidar_maks = (target_field[0] < pcd[:,0]) & (pcd[:,0] < target_field[1]) & (target_field[2] < pcd[:,1]) & (pcd[:,1] < target_field[3]) + return pcd[lidar_maks] + +def semantic_filter(file, target_field): + target_tag_1 = 7 # 7 means Road + target_tag_2 = 8 # 8 means SideWalk + filtered_list = [] + + file_split = os.path.splitext(file) + file = file_split[0]+'_conv'+file_split[1] + + with open(file, 'r') as r: + with open(file+'.new', 'w') as w: + for l in r.readlines(): + # print(l.split(' ')[0]) + + if is_number(l.split(' ')[0]): + if int(l.split(' ')[5]) != target_tag_1 and int(l.split(' ')[5]) != target_tag_2: + continue + l_x = float(l.split(' ')[0]) + l_y = float(l.split(' ')[1]) + if (l_x < target_field[0]) or (l_x > target_field[1]) or (l_y < target_field[2]) or (l_y > target_field[3]): + continue + l_new_list = [l.split(' ')[0], + l.split(' ')[1], + l.split(' ')[2], + l.split(' ')[3], + l.split(' ')[4], + l.split(' ')[5]] + + pos_list = [float(l.split(' ')[0]), float(l.split(' ')[1]), float(l.split(' ')[2])] # save (x, y, z) + filtered_list.append(pos_list) + + l_new = ' '.join(l_new_list) + w.write(l_new) + + else: + w.write(l) + + with open(file+'.new', 'r') as rd: + with open(file + '.filtered', 'w') as wt: + for i, line in enumerate(rd.readlines()): + if i == 2: + line_new = [] + line_new.append(line.split(' ')[0]) + line_new.append(line.split(' ')[1]) + line_new.append(str(len(filtered_list))) + write_str = ' '.join(line_new) + '\n' + wt.write(write_str) + else: + wt.write(line) + os.remove(file+'.new') + new_name = file[:-4] + '_filtered.ply' + shutil.move(file + '.filtered', new_name) + print('filtered', new_name) + return filtered_list + +def computeNUC_mat(point_list, d, disk_radius, p): + ''' + file -- semantic ply file path + d -- the number of disk + disk_radius -- the radius of disk + p -- disk area percentage + ''' + # point_list = semantic_filter_mat(pcd, obj_tag, target_field) + total_point = len(point_list) # total number of point + random_d_index = random.sample(range(total_point), d) # choose d indexes from total_point randomly, d must be smaller than total_point + total_nums_in_disk = 0 + + tmp_val = [] # n_i / (N * p) + for index in random_d_index: + nums_in_index = nums_in_disk_mat(point_list, index, disk_radius) # compute the point number in index_th disk + total_nums_in_disk = total_nums_in_disk + nums_in_index + tmp_val.append(nums_in_index) + + avg = total_nums_in_disk / (d * total_point * p) + + tmp_val = np.array(tmp_val) / (total_point *p) + nuc = math.sqrt(((tmp_val - avg)**2).sum() / d) + + return total_point, nuc + +def x_to_world(pose): + + x, y, z, roll, yaw, pitch = pose[:] + + # used for rotation matrix + c_y = np.cos(np.radians(yaw)) + s_y = np.sin(np.radians(yaw)) + c_r = np.cos(np.radians(roll)) + s_r = np.sin(np.radians(roll)) + c_p = np.cos(np.radians(pitch)) + s_p = np.sin(np.radians(pitch)) + + matrix = np.identity(4) + # translation matrix + matrix[0, 3] = x + matrix[1, 3] = y + matrix[2, 3] = z + + # rotation matrix + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + + return matrix +def x1_to_x2(x1, x2): + + x1_to_world = x_to_world(x1) + x2_to_world = x_to_world(x2) + world_to_x2 = np.linalg.inv(x2_to_world) + + transformation_matrix = np.dot(world_to_x2, x1_to_world) + return transformation_matrix + +BOUND = 10 +rsu_loc_dir = '/media/hdd1/OpenCOOD/logreplay/scenario/rsu.json' +with open(rsu_loc_dir, 'r') as j: + rsu_configs = json.load(j) +lidar_range = {} +def get_range(scenario_folder): + coors = rsu_configs['locations'][str(rsu_configs['scenes'][scenario_folder])] + coor_x = [c[0] for c in coors.values()] + coor_y = [c[1] for c in coors.values()] + # GT_RANGE = [-140, -40, -3, 140, 40, 1] + r = (min(coor_x)-BOUND, min(coor_y)-BOUND, -3, max(coor_x)+BOUND, max(coor_y)+BOUND, 1) + return r +def get_target_field(r): + return r[0], r[3], r[1], r[4] +def get_center(r): + return [(r[0]+r[3])/2, (r[1]+r[4])/2] + [0]*4 +def get_lidar_pose(scenario_folder, id): + if int(id) < 0: + id = str(-int(id)-1) + coors = rsu_configs['locations'][str(rsu_configs['scenes'][scenario_folder])] + return coors[id] + +colors = { + 0: [0, 0, 0], # None + 1: [70, 70, 70], # Buildings + 2: [190, 153, 153], # Fences + 3: [72, 0, 90], # Other + 4: [220, 20, 60], # Pedestrians + 5: [153, 153, 153], # Poles + 6: [157, 234, 50], # RoadLines + 7: [128, 64, 128], # Roads + 8: [244, 35, 232], # Sidewalks + 9: [107, 142, 35], # Vegetation + 10: [0, 0, 255], # Vehicles + 11: [102, 102, 156], # Walls + 12: [220, 220, 0], # TrafficSigns + 13: [70, 130, 180], # Sky + 14: [81, 0, 81], # Ground + 15: [150, 100, 100], # Bridge + 16: [230, 150, 140], # RailTrack + 17: [180, 165, 180], # All types of guard rails/crash barriers. + 18: [250, 170, 30], # Traffic Light + 19: [110, 190, 160], # Static + 20: [170, 120, 50], # Dynamic + 21: [45, 60, 150], # Water + 22: [145, 170, 100] # Terrain + } +from matplotlib import pyplot as plt +def viz(pcd, path=None): + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(pcd[:, :3]) + + int_color = np.ones((pcd.shape[0], 3)) + int_color[:, 0] *= 0 #247 / 255 + int_color[:, 1] *= 0 #244 / 255 + int_color[:, 2] *= 1 # 237 / 255 + o3d_pcd.colors = o3d.utility.Vector3dVector(int_color) + + + # if path: + # vis = o3d.visualization.Visualizer() + # vis.create_window(visible = False) + # vis.add_geometry(o3d_pcd) + # img = vis.capture_screen_float_buffer(True) + # plt.imsave(path, img) #plt.imshow(np.asarray(img)) + + # vis = o3d.visualization.Visualizer() + # vis.create_window(visible=False) #works for me with False, on some systems needs to be true + # vis.add_geometry(o3d_pcd) # vis.update_geometry(your_mesh) + # vis.poll_events() + # vis.update_renderer() + # vis.capture_screen_image(path) + # vis.destroy_window() + + # print('saved', path) + # else: + o3d.visualization.draw_geometries([o3d_pcd]) + +def eval(scene_dir, scene_name, pitch): + print(scene_dir.split('/')[-1], scene_name) + + # scene_dir = f'/media/hdd1/opv2v/opencda_dump/test_livox_6-20_metric2/' + # scene_name = '2021_08_23_12_58_19' + root_dir = f'{scene_dir}/{scene_name}/' + ply_file = '/'+[f for f in sorted(os.listdir(root_dir+'-1')) if f.endswith('.ply')][0] + pitch = int(pitch) + + lidar_range = get_range(scene_name) + target_field = get_target_field(lidar_range) #[92, 112, 80, 110] + center = get_center(lidar_range) + + # ---------test------------ + index = 1 + d = 800 # hyper parameter + disk_radius = 0.7 + area = (target_field[1] - target_field[0]) * (target_field[3] - target_field[2]) + p = math.pi * (disk_radius**2) / area + + pcd_list = [] + + for index in range(-4, 0): + + file = root_dir + str(index) + ply_file + lidar_pose = get_lidar_pose(scene_name, str(index)) + lidar_pose[3] = pitch; lidar_pose = lidar_pose[:3] + list(reversed(lidar_pose[3:])) + trans_matrix = x1_to_x2(lidar_pose, [0]*6) + + pcd, obj_tag = read_pcd(file) + pcd = semantic_filter_mat(pcd, obj_tag) + pcd = project_points_by_matrix_torch(torch.Tensor(pcd), torch.Tensor(trans_matrix)).numpy() + pcd_list.append(pcd) + + origin_lidar = np.vstack(pcd_list) + point_list = area_filter(origin_lidar, target_field) + + try: + total_nums, NUC = computeNUC_mat(point_list, d, disk_radius, p) + print("total_nums:", total_nums, 'area', area)#, "\nNUC:", NUC) + print("density:", total_nums/area, "\nNUC:", NUC, '\n') + viz(point_list) #, root_dir+'-1/pcd.png') + except ValueError: + print('point number == 0') + +def main(): + + argparser = argparse.ArgumentParser(description=__doc__) + argparser.add_argument('--dir',type=str) + args = argparser.parse_args() + + root_dir = args.dir + pitch = root_dir.split('_')[-1] + for scene in os.listdir(root_dir): + eval(root_dir, scene, pitch) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/opencda/scenario_testing/utils/sim_api.py b/opencda/scenario_testing/utils/sim_api.py index 5150b890..cefffa6c 100644 --- a/opencda/scenario_testing/utils/sim_api.py +++ b/opencda/scenario_testing/utils/sim_api.py @@ -162,7 +162,7 @@ class ScenarioManager: CAV World that contains the information of all CAVs. carla_map : carla.map - Car;a HD Map. + Carla HD Map. """ @@ -680,9 +680,7 @@ def create_traffic_carla(self): if isinstance(traffic_config['vehicle_list'], list) or \ isinstance(traffic_config['vehicle_list'], ListConfig): - bg_list = self.spawn_vehicles_by_list(tm, - traffic_config, - bg_list) + bg_list = self.spawn_vehicles_by_list(tm, traffic_config, bg_list) else: bg_list = self.spawn_vehicle_by_range(tm, traffic_config, bg_list) diff --git a/opencda/scenario_testing/utils/sim_api_replay.py b/opencda/scenario_testing/utils/sim_api_replay.py new file mode 100644 index 00000000..1ab5bc4d --- /dev/null +++ b/opencda/scenario_testing/utils/sim_api_replay.py @@ -0,0 +1,329 @@ + +import random +import sys +import json +from omegaconf import OmegaConf + +import carla +import numpy as np + +from opencda.core.common.vehicle_manager_replay import VehicleManagerReplay +from opencda.core.common.rsu_manager_replay import RSUManagerReplay, RSUManagerReplayPly +from opencda.scenario_testing.utils.customized_map_api import load_customized_world, bcolors +from opencda.scenario_testing.utils.sim_api import car_blueprint_filter, multi_class_vehicle_blueprint_filter, ScenarioManager + +class ScenarioManagerReplay(ScenarioManager): + + def __init__(self, scenario_params, + apply_ml, + carla_version, + xodr_path=None, + town=None, + cav_world=None): + self.scenario_params = scenario_params + self.carla_version = carla_version + + simulation_config = scenario_params['world'] + + # set random seed if stated + if 'seed' in simulation_config: + np.random.seed(simulation_config['seed']) + random.seed(simulation_config['seed']) + + self.client = \ + carla.Client('localhost', simulation_config['client_port']) + self.client.set_timeout(10.0) + + if xodr_path: + self.world = load_customized_world(xodr_path, self.client) + elif town: + try: + self.world = self.client.load_world(town) + except RuntimeError: + print( + f"{bcolors.FAIL} %s is not found in your CARLA repo! " + f"Please download all town maps to your CARLA " + f"repo!{bcolors.ENDC}" % town) + else: + self.world = self.client.get_world() + + if not self.world: + sys.exit('World loading failed') + + self.origin_settings = self.world.get_settings() + new_settings = self.world.get_settings() + + if simulation_config['sync_mode']: + new_settings.synchronous_mode = True + new_settings.fixed_delta_seconds = \ + simulation_config['fixed_delta_seconds'] + else: + sys.exit( + 'ERROR: Current version only supports sync simulation mode') + + self.world.apply_settings(new_settings) + + # set weather + weather = self.set_weather(simulation_config['weather']) + self.world.set_weather(weather) + + # Define probabilities for each type of blueprint + self.use_multi_class_bp = scenario_params["blueprint"][ + 'use_multi_class_bp'] if 'blueprint' in scenario_params else False + if self.use_multi_class_bp: + # bbx/blueprint meta + with open(scenario_params['blueprint']['bp_meta_path']) as f: + self.bp_meta = json.load(f) + self.bp_class_sample_prob = scenario_params['blueprint'][ + 'bp_class_sample_prob'] + + # normalize probability + self.bp_class_sample_prob = { + k: v / sum(self.bp_class_sample_prob.values()) for k, v in + self.bp_class_sample_prob.items()} + + self.cav_world = cav_world + self.carla_map = self.world.get_map() + self.apply_ml = apply_ml + + def create_vehicle_manager(self, cav_positions, cur_timestamp, application, + map_helper=None, + data_dump=False): + print('Creating single CAVs.') + # By default, we use lincoln as our cav model. + default_model = 'vehicle.lincoln.mkz2017' \ + if self.carla_version == '0.9.11' else 'vehicle.lincoln.mkz_2017' + + cav_vehicle_bp = \ + self.world.get_blueprint_library().find(default_model) + single_cav_dict = {} + + cav_configs = self.scenario_params['scenario']['single_cav_list'] + + for i, (cav_id, cav_pos) in enumerate(cav_positions.items()): + + cav_config = cav_configs[i] + + # in case the cav wants to join a platoon later + # it will be empty dictionary for single cav application + platoon_base = OmegaConf.create({'platoon': self.scenario_params.get('platoon_base',{})}) + cav_config = OmegaConf.merge(self.scenario_params['vehicle_base'], + platoon_base, + cav_config) + # if the spawn position is a single scalar, we need to use map + # helper to transfer to spawn transform + if 'spawn_special' not in cav_config: + spawn_transform = carla.Transform( + carla.Location( + x=cav_pos[0], + y=cav_pos[1], + z=cav_pos[2]), + carla.Rotation( + pitch=cav_pos[5], + yaw=cav_pos[4], + roll=cav_pos[3])) + else: + spawn_transform = map_helper(self.carla_version, *cav_config['spawn_special']) + + cav_vehicle_bp.set_attribute('color', '0, 0, 255') + + vehicle = self.world.try_spawn_actor(cav_vehicle_bp, spawn_transform) + while not vehicle: + spawn_transform.location.z += 0.01 + vehicle = self.world.try_spawn_actor(cav_vehicle_bp, spawn_transform) + + # create vehicle manager for each cav + data_dump_path = self.scenario_params['output_dir'] if data_dump else None + vehicle_manager = VehicleManagerReplay( + vehicle, cav_config, application, + self.carla_map, self.cav_world, + current_time=self.scenario_params['current_time'], + data_dumping=data_dump_path) + + self.world.tick() + + single_cav_dict[cav_id] = {'actor': vehicle_manager, 'time': cur_timestamp} + + return single_cav_dict + + def create_rsu_manager(self, data_dump): + """ + Create a list of RSU. + + Parameters + ---------- + data_dump : bool + Whether to dump sensor data. + + Returns + ------- + rsu_list : list + A list contains all rsu managers.. + """ + print('Creating RSU.') + rsu_list = [] + + data_dump_path = self.scenario_params['output_dir'] if data_dump else None + for i, rsu_config in enumerate(self.scenario_params['scenario']['rsu_list'][:-1]): + rsu_config = OmegaConf.merge(self.scenario_params['rsu_base'], + rsu_config) + rsu_manager = RSUManagerReplay(self.world, rsu_config, + self.carla_map, + self.cav_world, + self.scenario_params['current_time'], + data_dumping=data_dump_path) + + rsu_list.append(rsu_manager) + + return rsu_list + + def spawn_vehicles_by_dict(self, traffic_config, bg_veh_pos, cur_timestamp): + + blueprint_library = self.world.get_blueprint_library() + if not self.use_multi_class_bp: + ego_vehicle_random_list = car_blueprint_filter(blueprint_library, + self.carla_version) + else: + label_list = list(self.bp_class_sample_prob.keys()) + prob = [self.bp_class_sample_prob[itm] for itm in label_list] + + # if not random select, we always choose lincoln.mkz with green color + color = '0, 255, 0' + default_model = 'vehicle.lincoln.mkz2017' \ + if self.carla_version == '0.9.11' else 'vehicle.lincoln.mkz_2017' + ego_vehicle_bp = blueprint_library.find(default_model) + + bg_dict = {} + for i, (veh_id, veh_pos) in enumerate(bg_veh_pos.items()): + spawn_transform = carla.Transform( + carla.Location( + x=veh_pos[0], + y=veh_pos[1], + z=veh_pos[2]), + carla.Rotation( + pitch=veh_pos[5], + yaw=veh_pos[4], + roll=veh_pos[3])) + + if not traffic_config['random']: + ego_vehicle_bp.set_attribute('color', color) + + else: + # sample a bp from various classes + if self.use_multi_class_bp: + label = np.random.choice(label_list, p=prob) + # Given the label (class), find all associated blueprints in CARLA + ego_vehicle_random_list = multi_class_vehicle_blueprint_filter( + label, blueprint_library, self.bp_meta) + ego_vehicle_bp = random.choice(ego_vehicle_random_list) + + if ego_vehicle_bp.has_attribute("color"): + color = random.choice( + ego_vehicle_bp.get_attribute( + 'color').recommended_values) + ego_vehicle_bp.set_attribute('color', color) + + vehicle = self.world.try_spawn_actor(ego_vehicle_bp, spawn_transform) + + while not vehicle: + spawn_transform.location.z += 0.01 + vehicle = self.world.try_spawn_actor(ego_vehicle_bp, spawn_transform) + + bg_dict[veh_id] = {'actor': vehicle, 'time': cur_timestamp} + + return bg_dict + + def spawn_vehicles_by_dict_bp(self, traffic_config, bg_veh_pos, cur_timestamp, bg_veh_bp): + + blueprint_library = self.world.get_blueprint_library() + bg_dict = {} + + for i, (veh_id, veh_pos) in enumerate(bg_veh_pos.items()): + + bp = bg_veh_bp[veh_id] + ego_vehicle_bp = blueprint_library.find(bp['bp_id']) + ego_vehicle_bp.set_attribute('color', bp['color']) + + spawn_transform = carla.Transform( + carla.Location( + x=veh_pos[0], + y=veh_pos[1], + z=veh_pos[2]), + carla.Rotation( + pitch=veh_pos[5], + yaw=veh_pos[4], + roll=veh_pos[3])) + + vehicle = self.world.try_spawn_actor(ego_vehicle_bp, spawn_transform) + + while not vehicle: + spawn_transform.location.z += 0.01 + vehicle = self.world.try_spawn_actor(ego_vehicle_bp, spawn_transform) + + bg_dict[veh_id] = {'actor': vehicle, 'time': cur_timestamp} + + return bg_dict + + def move_vehicle(self, cav_content, cur_timestamp, transform): + """ + Move the + + Parameters + ---------- + veh_id : str + Vehicle's id + + cur_timestamp : str + Current timestamp + + transform : carla.Transform + Current pose/ + """ + # this represent this vehicle is already moved in this round + if cav_content['time'] == cur_timestamp: + return + + veh = cav_content['actor'] + if isinstance(veh, VehicleManagerReplay): + veh = veh.vehicle + elif isinstance(veh, carla.libcarla.Vehicle): + pass + else: + raise NotImplementedError(f'{type(veh)}') + + veh.set_transform(transform) + cav_content['time'] = cur_timestamp + + def create_traffic_carla(self, bg_veh_pos, cur_timestamp, bg_veh_bp): + + print('Spawning CARLA traffic flow.') + traffic_config = self.scenario_params['carla_traffic_manager'] + + if bg_veh_bp is None: + bg_dict = self.spawn_vehicles_by_dict(traffic_config, bg_veh_pos, cur_timestamp) + else: + bg_dict = self.spawn_vehicles_by_dict_bp(traffic_config, bg_veh_pos, cur_timestamp, bg_veh_bp) + + print('CARLA traffic flow generated.') + return bg_dict + +class ScenarioManagerReplayPly(ScenarioManagerReplay): + + def create_rsu_manager(self, data_dump): + + print('Creating RSU.') + rsu_list = [] + + data_dump_path = self.scenario_params['output_dir'] if data_dump else None + for i, rsu_config in enumerate(self.scenario_params['scenario']['rsu_list'][:-1]): + rsu_config = OmegaConf.merge(self.scenario_params['rsu_base'], + rsu_config) + rsu_manager = RSUManagerReplayPly(self.world, rsu_config, + self.carla_map, + self.cav_world, + self.scenario_params['current_time'], + data_dumping=data_dump_path) + + rsu_list.append(rsu_manager) + + return rsu_list \ No newline at end of file diff --git a/opencda_replay.py b/opencda_replay.py new file mode 100644 index 00000000..fc0a2f2c --- /dev/null +++ b/opencda_replay.py @@ -0,0 +1,171 @@ +""" +enables exact replay of scenario recordings, as detailed as each vehicle's model and color, and rsu placement +""" + +import argparse +import importlib +import os, shutil +import sys +from omegaconf import OmegaConf +import json +from utils.check_missing import create_rsu_list, update_rsus, check_missing_cavs +from opencda.version import __version__ +from opencda.core.common.misc import TOWN_DICTIONARY + +def arg_parse(): + + parser = argparse.ArgumentParser(description="OpenCDA scenario runner.") + + parser.add_argument('-t', "--test_scenario", required=True, type=str, + help='Define the name of the scenario you want to test. The given name must' + 'match one of the testing scripts(e.g. single_2lanefree_carla) in ' + 'opencda/scenario_testing/ folder' + ' as well as the corresponding yaml file in opencda/scenario_testing/config_yaml.') + parser.add_argument("--record", action='store_true', + help='whether to record and save the simulation process to .log file') + parser.add_argument("--apply_ml", + action='store_true', + help='whether ml/dl framework such as sklearn/pytorch is needed in the testing. ' + 'Set it to true only when you have installed the pytorch/sklearn package.') + parser.add_argument('-v', "--version", type=str, default='0.9.12', + help='Specify the CARLA simulator version, default' + 'is 0.9.11, 0.9.12 is also supported.') + + parser.add_argument('-r', "--root_dir", type=str, + help='base data to replay on') + parser.add_argument('-o', "--output_dir", type=str) + parser.add_argument('-b', "--base_dataset", action='store_true', + help='whether replaying on original OPV2V dataset') + + opt = parser.parse_args() + return opt + +def main(): + + opt = arg_parse() + + print("OpenCDA Version: %s" % __version__) + + default_yaml = config_yaml = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'opencda/scenario_testing/config_yaml/default.yaml') + config_yaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'opencda/scenario_testing/config_yaml/%s.yaml' % opt.test_scenario) + + default_dict = OmegaConf.load(default_yaml) + config_dict = OmegaConf.load(config_yaml) + + print('root_dir', opt.root_dir) + print('output_dir', opt.output_dir) + + default_dict['root_dir'] = opt.root_dir # '/media/hdd1/opv2v/' + opt.root_dir + '/' + default_dict['output_dir'] = opt.output_dir # '/media/hdd1/opv2v/opencda_dump_test/' + opt.output_dir + '/' + default_dict['base_dataset_flag'] = opt.base_dataset + + target = check_missing_cavs(default_dict['root_dir'], default_dict['output_dir']) + + root_dir = default_dict['root_dir'] + dataset_dir = root_dir + + with open(default_dict['rsu_loc_dir'], 'r') as j: + rsu_configs = json.load(j) + + ''' + automate: + - desired adjustment config passed as arguments + - iteratively update corresponding carla sensor config parameters + ''' + pitch = int(opt.output_dir.split('_')[-1]) + update_rsus(rsu_configs, pitch) + target = [t for t in target if t in rsu_configs['scenes']] + + # future extension: allow dynamic input for lidar type + # lidar_configs = { + # 'type': 'livox', + # 'height': 6, + # 'pitch': pitch + # } + + # current simulation chooses 'Town05' map only + target_towns = ['Town05'] + scenario_folders = sorted([os.path.join(dataset_dir, x) + for x in os.listdir(dataset_dir) if + os.path.isdir(os.path.join(dataset_dir, x)) and TOWN_DICTIONARY[x] in target_towns]) + + for (i, scenario_folder) in enumerate(scenario_folders): + + scene_name = os.path.split(scenario_folder)[-1] + + if scene_name not in target: # filter scenes that belong to target_towns + print(f'{scene_name} not in target') + continue + + scenario_folder_root = scenario_folder.replace(dataset_dir, root_dir) + protocol_yml = [x for x in os.listdir(scenario_folder_root) if x.endswith('.yaml')] + scenario_folder_root_yaml = os.path.join(scenario_folder_root,protocol_yml[0]) + scene_dict = OmegaConf.load(scenario_folder_root_yaml) + + scene_dict = OmegaConf.merge(default_dict, scene_dict) + # config_dict has highest priority, hence last merge + scene_dict = OmegaConf.merge(scene_dict, config_dict) + + scenario_folder_output = os.path.join(default_dict['output_dir'], scene_name) + + if not os.path.exists(scenario_folder_output): + # 'data_protocol.yaml' is needed for each replay recording + os.makedirs(scenario_folder_output) + yaml_file = os.path.join(scenario_folder_output, 'data_protocol.yaml') + if not os.path.exists(yaml_file): + shutil.copy(scenario_folder_root_yaml, yaml_file) + else: + cav_dirs = [d for d in os.listdir(scenario_folder_output) + if os.path.isdir(os.path.join(scenario_folder_output, d)) and int(d) > 0] + + if 'metric' in opt.test_scenario: + # logreplay_metric only records rsu lidars, thus unrelated to vehicle recordings + pass + else: + print('existing vehicle cav dirs from previous simulation') + del_flag = input('delete files (y/n)').strip().lower() + assert del_flag in ['y', 'n'] + if del_flag == 'y': + for d in cav_dirs: + print('removed', d) + shutil.rmtree(os.path.join(scenario_folder_output, d)) + + # if scene_name not in rsu_configs['scenes']: # debug + # print(f'{scene_name} not in rsu_configs[scenes]') + # continue + + rsu_locations = rsu_configs['locations'][str(rsu_configs['scenes'][scene_name])] + rsu_list = create_rsu_list(rsu_locations) + + # if len(rsu_list) < 4: # debug + # print(f'{scene_name} len(rsu_list)={len(rsu_list)} < 4') + # continue + + print('scene_name', scene_name) + + scene_dict = OmegaConf.merge(scene_dict, {'current_time': scene_name, + 'scenario': {'rsu_list': rsu_list}}) + + # import the testing script + testing_scenario = importlib.import_module("opencda.scenario_testing.%s" % opt.test_scenario) + + # check if the yaml file for the specific testing scenario exists + if not os.path.isfile(config_yaml): + sys.exit("opencda/scenario_testing/config_yaml/%s.yaml not found!" % opt.test_cenario) + + # get the function for running the scenario from the testing script + scenario_runner = getattr(testing_scenario, 'run_scenario') + + # run the scenario testing + scenario_runner(opt, scene_dict) + + break ### + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + print(' - Exited by user.') \ No newline at end of file diff --git a/utils/ComputeDNUC.py b/utils/ComputeDNUC.py new file mode 100644 index 00000000..ac13125a --- /dev/null +++ b/utils/ComputeDNUC.py @@ -0,0 +1,479 @@ +import shutil, random, math, os, json, argparse +import numpy as np + +def is_number(s): + try: + float(s) + return True + except ValueError: + pass + try: + import unicodedata + unicodedata.numeric(s) + return True + except (TypeError, ValueError): + pass + return False +def read_pcd(file): + # file_split = os.path.splitext(file) + pcd = [] + obj_tag = [] + with open(file, 'r') as r: + for line in r.readlines(): + l = line.split(' ') + + if not is_number(l[0]): + continue + + pcd.append(list(map(float, l[:3]))) + obj_tag.append(int(l[5].strip())) + return np.array(pcd), np.array(obj_tag) +import torch +import torch.nn.functional as F +def project_points_by_matrix_torch(points, transformation_matrix): + # convert to homogeneous coordinates via padding 1 at the last dimension. + # (N, 4) + points_homogeneous = F.pad(points, (0, 1), mode="constant", value=1) + # (N, 4) + projected_points = torch.einsum("ik, jk->ij", points_homogeneous, + transformation_matrix) + return projected_points[:, :3] + +def pcd_coordinate_convertor(file, trans_mat): + file_split = os.path.splitext(file) + with open(file, 'r') as r: + # with open(file+'.new', 'w') as w: + with open(file_split[0]+'_conv'+file_split[1], 'w') as w: + for l in r.readlines(): + # print(l.split(' ')[0]) + if is_number(l.split(' ')[0]): + #print(l) + + # loc = np.matrix([float(l[0]), float(l[1]), float(l[2]), 1.0]) + x = float(l.split(' ')[0]) + y = float(l.split(' ')[1]) + z = float(l.split(' ')[2]) + loc = np.matrix([x, y, z, 1.0]) + + loc_target = np.dot(trans_mat, loc.T) + loc_target = loc_target.tolist() + # print(loc_target) + + l_new_list = [0, 0, 0, 0, 0, 0] + + l_new_list[0] = str(loc_target[0][0]) + l_new_list[1] = str(loc_target[1][0]) + l_new_list[2] = str(loc_target[2][0]) + l_new_list[3] = l.split(' ')[3] + l_new_list[4] = l.split(' ')[4] + l_new_list[5] = l.split(' ')[5] + l_new = ' '.join(l_new_list) + w.write(l_new) + + else: + w.write(l) + print('converted', file_split[0]+'_conv'+file_split[1]) + # if 'nan' not in l: + # w.write(l) + # shutil.move(file+'.new', file) +random.seed(123) +def computeNUC(file, d, disk_radius, p, target_field): + ''' + file -- semantic ply file path + d -- the number of disk + disk_radius -- the radius of disk + p -- disk area percentage + ''' + point_list = semantic_filter(file, target_field) + total_point = len(point_list) # total number of point + random_d_index = random.sample(range(total_point), d) # choose d indexes from total_point randomly, d must be smaller than total_point + total_nums_in_disk = 0 + viz(np.array(point_list)) ### + + tmp_val = [] # n_i / (N * p) + for index in random_d_index: + nums_in_index = nums_in_disk(point_list, index, disk_radius) # compute the point number in index_th disk + total_nums_in_disk = total_nums_in_disk + nums_in_index + tmp_val.append(nums_in_index / (total_point *p)) + + avg = total_nums_in_disk / (d * total_point * p) + + nuc = 0 + for val in tmp_val: + nuc = nuc + math.pow(val - avg, 2) + nuc = math.sqrt(nuc / d) + + return total_point, nuc + +def nums_in_disk(point_list, point_index, disk_radius): + disk_center_point = point_list[point_index] + d_x = disk_center_point[0] + d_y = disk_center_point[1] + # d_z = disk_center_point[2] + point_nums = 0 + for point in point_list: + p_x = point[0] + p_y = point[1] + # p_z = point[2] + dist = math.sqrt(math.pow(d_x - p_x, 2) + math.pow(d_y - p_y, 2)) + if dist <= disk_radius: + point_nums = point_nums + 1 + return point_nums + +def nums_in_disk_mat(point_list, point_index, disk_radius): + point_list = np.array(point_list) + disk_center_point = point_list[point_index] + + dist = np.linalg.norm(point_list[:,:2]-disk_center_point[:2], axis=1) + point_nums = (dist <= disk_radius).sum() + return point_nums + +def semantic_filter_mat(pcd, obj_tag): + target_tags = np.array([7, 8]) # ['road', 'sidewalk'] + semantic_mask = np.in1d(obj_tag, target_tags) + return pcd[semantic_mask] + +def area_filter(pcd, target_field): + lidar_maks = (target_field[0] < pcd[:,0]) & (pcd[:,0] < target_field[1]) & (target_field[2] < pcd[:,1]) & (pcd[:,1] < target_field[3]) + return pcd[lidar_maks] + +def semantic_filter(file, target_field): + target_tag_1 = 7 # 7 means Road + target_tag_2 = 8 # 8 means SideWalk + filtered_list = [] + + file_split = os.path.splitext(file) + file = file_split[0]+'_conv'+file_split[1] + + with open(file, 'r') as r: + with open(file+'.new', 'w') as w: + for l in r.readlines(): + # print(l.split(' ')[0]) + + if is_number(l.split(' ')[0]): + if int(l.split(' ')[5]) != target_tag_1 and int(l.split(' ')[5]) != target_tag_2: + continue + l_x = float(l.split(' ')[0]) + l_y = float(l.split(' ')[1]) + if (l_x < target_field[0]) or (l_x > target_field[1]) or (l_y < target_field[2]) or (l_y > target_field[3]): + continue + l_new_list = [l.split(' ')[0], + l.split(' ')[1], + l.split(' ')[2], + l.split(' ')[3], + l.split(' ')[4], + l.split(' ')[5]] + # l_new_list[2] = l.split(' ')[2] + # l_new_list[3] = l.split(' ')[3] + # l_new_list[4] = l.split(' ')[4] + # l_new_list[5] = l.split(' ')[5] + + pos_list = [float(l.split(' ')[0]), float(l.split(' ')[1]), float(l.split(' ')[2])] # save (x, y, z) + filtered_list.append(pos_list) + + l_new = ' '.join(l_new_list) + w.write(l_new) + + else: + w.write(l) + + with open(file+'.new', 'r') as rd: + with open(file + '.filtered', 'w') as wt: + for i, line in enumerate(rd.readlines()): + if i == 2: + line_new = [] + line_new.append(line.split(' ')[0]) + line_new.append(line.split(' ')[1]) + line_new.append(str(len(filtered_list))) + write_str = ' '.join(line_new) + '\n' + wt.write(write_str) + else: + wt.write(line) + os.remove(file+'.new') + new_name = file[:-4] + '_filtered.ply' + shutil.move(file + '.filtered', new_name) + print('filtered', new_name) + return filtered_list + +# def computeNUC_mat(pcd, obj_tag, d, disk_radius, p, target_field): +def computeNUC_mat(point_list, d, disk_radius, p): + ''' + file -- semantic ply file path + d -- the number of disk + disk_radius -- the radius of disk + p -- disk area percentage + ''' + # point_list = semantic_filter_mat(pcd, obj_tag, target_field) + total_point = len(point_list) # total number of point + random_d_index = random.sample(range(total_point), d) # choose d indexes from total_point randomly, d must be smaller than total_point + total_nums_in_disk = 0 + + tmp_val = [] # n_i / (N * p) + for index in random_d_index: + nums_in_index = nums_in_disk_mat(point_list, index, disk_radius) # compute the point number in index_th disk + total_nums_in_disk = total_nums_in_disk + nums_in_index + tmp_val.append(nums_in_index) + + avg = total_nums_in_disk / (d * total_point * p) + + tmp_val = np.array(tmp_val) / (total_point *p) + nuc = math.sqrt(((tmp_val - avg)**2).sum() / d) + + return total_point, nuc + +# from opencood.utils.transformation_utils import x1_to_x2 +def x_to_world(pose): + + x, y, z, roll, yaw, pitch = pose[:] + + # used for rotation matrix + c_y = np.cos(np.radians(yaw)) + s_y = np.sin(np.radians(yaw)) + c_r = np.cos(np.radians(roll)) + s_r = np.sin(np.radians(roll)) + c_p = np.cos(np.radians(pitch)) + s_p = np.sin(np.radians(pitch)) + + matrix = np.identity(4) + # translation matrix + matrix[0, 3] = x + matrix[1, 3] = y + matrix[2, 3] = z + + # rotation matrix + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + + return matrix +def x1_to_x2(x1, x2): + + x1_to_world = x_to_world(x1) + x2_to_world = x_to_world(x2) + world_to_x2 = np.linalg.inv(x2_to_world) + + transformation_matrix = np.dot(world_to_x2, x1_to_world) + return transformation_matrix + +BOUND = 10 +rsu_loc_dir = '/media/hdd1/OpenCOOD/logreplay/scenario/rsu.json' +with open(rsu_loc_dir, 'r') as j: + rsu_configs = json.load(j) +lidar_range = {} +def get_range(scenario_folder): + # if '/' in scenario_folder: + # scenario_folder = os.path.basename(scenario_folder) + # if scenario_folder in lidar_range: + # return lidar_range[scenario_folder] + coors = rsu_configs['locations'][str(rsu_configs['scenes'][scenario_folder])] + coor_x = [c[0] for c in coors.values()] + coor_y = [c[1] for c in coors.values()] + # GT_RANGE = [-140, -40, -3, 140, 40, 1] + r = (min(coor_x)-BOUND, min(coor_y)-BOUND, -3, max(coor_x)+BOUND, max(coor_y)+BOUND, 1) + # lidar_range[scenario_folder] = r + # return lidar_range[scenario_folder] + return r +def get_target_field(r): + return r[0], r[3], r[1], r[4] +def get_center(r): + return [(r[0]+r[3])/2, (r[1]+r[4])/2] + [0]*4 +def get_lidar_pose(scenario_folder, id): + if int(id) < 0: + id = str(-int(id)-1) + coors = rsu_configs['locations'][str(rsu_configs['scenes'][scenario_folder])] + return coors[id] + +import open3d as o3d +colors = { + 0: [0, 0, 0], # None + 1: [70, 70, 70], # Buildings + 2: [190, 153, 153], # Fences + 3: [72, 0, 90], # Other + 4: [220, 20, 60], # Pedestrians + 5: [153, 153, 153], # Poles + 6: [157, 234, 50], # RoadLines + 7: [128, 64, 128], # Roads + 8: [244, 35, 232], # Sidewalks + 9: [107, 142, 35], # Vegetation + 10: [0, 0, 255], # Vehicles + 11: [102, 102, 156], # Walls + 12: [220, 220, 0], # TrafficSigns + 13: [70, 130, 180], # Sky + 14: [81, 0, 81], # Ground + 15: [150, 100, 100], # Bridge + 16: [230, 150, 140], # RailTrack + 17: [180, 165, 180], # All types of guard rails/crash barriers. + 18: [250, 170, 30], # Traffic Light + 19: [110, 190, 160], # Static + 20: [170, 120, 50], # Dynamic + 21: [45, 60, 150], # Water + 22: [145, 170, 100] # Terrain + } +from matplotlib import pyplot as plt +def viz(pcd, path=None): + o3d_pcd = o3d.geometry.PointCloud() + o3d_pcd.points = o3d.utility.Vector3dVector(pcd[:, :3]) + + int_color = np.ones((pcd.shape[0], 3)) + int_color[:, 0] *= 0 #247 / 255 + int_color[:, 1] *= 0 #244 / 255 + int_color[:, 2] *= 1 # 237 / 255 + o3d_pcd.colors = o3d.utility.Vector3dVector(int_color) + + + if path: + # vis = o3d.visualization.Visualizer() + # vis.create_window(visible = False) + # vis.add_geometry(o3d_pcd) + # img = vis.capture_screen_float_buffer(True) + # plt.imsave(path, img) #plt.imshow(np.asarray(img)) + + vis = o3d.visualization.Visualizer() + vis.create_window(visible=False) #works for me with False, on some systems needs to be true + vis.add_geometry(o3d_pcd) # vis.update_geometry(your_mesh) + vis.poll_events() + vis.update_renderer() + vis.capture_screen_image(path) + vis.destroy_window() + + print('saved', path) + else: + o3d.visualization.draw_geometries([o3d_pcd]) + +# def viz(pcd, save_path='', mode='constant'): +# """ +# Visualize the prediction, groundtruth with point cloud together. + +# Parameters +# ---------- +# pred_tensor : torch.Tensor +# (N, 8, 3) prediction. + +# gt_tensor : torch.Tensor +# (N, 8, 3) groundtruth bbx + +# pcd : torch.Tensor +# PointCloud, (N, 4). + +# show_vis : bool +# Whether to show visualization. + +# save_path : str +# Save the visualization results to given path. + +# mode : str +# Color rendering mode. +# """ + +# def custom_draw_geometry(pcd): +# vis = o3d.visualization.Visualizer() +# vis.create_window() + +# opt = vis.get_render_option() +# opt.background_color = np.asarray([0, 0, 0]) +# opt.point_size = 1.0 + +# vis.add_geometry(pcd) + +# vis.run() +# vis.destroy_window() + +# if len(pcd.shape) == 3: +# pcd = pcd[0] +# origin_lidar = pcd +# # if not isinstance(pcd, np.ndarray): +# # origin_lidar = common_utils.torch_tensor_to_numpy(pcd) + +# # origin_lidar_intcolor = \ +# # color_encoding(origin_lidar[:, -1] if mode == 'intensity' +# # else origin_lidar[:, 2], mode=mode) +# # left -> right hand +# origin_lidar[:, :1] = -origin_lidar[:, :1] + +# o3d_pcd = o3d.geometry.PointCloud() +# o3d_pcd.points = o3d.utility.Vector3dVector(origin_lidar[:, :3]) +# # o3d_pcd.colors = o3d.utility.Vector3dVector(origin_lidar_intcolor) + +# # if pred_tensor is None: +# # pred_tensor = torch.empty((0,8,3)) +# # oabbs_pred = bbx2oabb(pred_tensor, color=(1, 0, 0)) +# # oabbs_gt = bbx2oabb(gt_tensor, color=(0, 1, 0)) + +# visualize_elements = [o3d_pcd]# + oabbs_pred + oabbs_gt +# custom_draw_geometry(o3d_pcd)#, oabbs_pred, oabbs_gt) +# # if save_path: +# # save_o3d_visualization(visualize_elements, save_path) + +def eval(scene_dir, scene_name, pitch): + print(scene_dir.split('/')[-2], scene_name) + + # scene_dir = f'/media/hdd1/opv2v/opencda_dump/test_livox_6-20_metric2/' + # scene_name = '2021_08_23_12_58_19' + root_dir = f'{scene_dir}/{scene_name}/' + ply_file = '/'+[f for f in sorted(os.listdir(root_dir+'-1')) if f.endswith('.ply')][0] + pitch = int(pitch) + + lidar_range = get_range(scene_name) + target_field = get_target_field(lidar_range) #[92, 112, 80, 110] + center = get_center(lidar_range) + + # ---------test------------ + index = 1 + # file = './7_placement.ply' + d = 800# 6000 + disk_radius = 0.7 + area = (target_field[1] - target_field[0]) * (target_field[3] - target_field[2]) + p = math.pi * (disk_radius**2) / area + + pcd_list = [] + + for index in range(-4, 0): + + file = root_dir + str(index) + ply_file + lidar_pose = get_lidar_pose(scene_name, str(index)) + lidar_pose[3] = pitch; lidar_pose = lidar_pose[:3] + list(reversed(lidar_pose[3:])) + trans_matrix = x1_to_x2(lidar_pose, [0]*6) + + # pcd_coordinate_convertor(file, trans_matrix) + # total_nums, NUC = computeNUC(file, d, disk_radius, p, target_field) + + pcd, obj_tag = read_pcd(file) + pcd = semantic_filter_mat(pcd, obj_tag) + pcd = project_points_by_matrix_torch(torch.Tensor(pcd), torch.Tensor(trans_matrix)).numpy() + pcd_list.append(pcd) + # viz(pcd) + + origin_lidar = np.vstack(pcd_list) + point_list = area_filter(origin_lidar, target_field) + + try: + total_nums, NUC = computeNUC_mat(point_list, d, disk_radius, p) + print("total_nums:", total_nums, 'area', area)#, "\nNUC:", NUC) + print("density:", total_nums/area, "\nNUC:", NUC, '\n') + # viz(origin_lidar) + viz(point_list)#, root_dir+'-1/pcd.png') + except ValueError: + print('point number == 0') + +def main(): + + # eval('/media/hdd1/opv2v/opencda_dump_test/metric_test_livox_6_-20', '2021_08_22_21_41_24', '-20') + # return + + argparser = argparse.ArgumentParser(description=__doc__) + argparser.add_argument('--dir',type=str) + args = argparser.parse_args() + + root_dir = args.dir + pitch = root_dir.split('_')[-1] + for scene in os.listdir(root_dir): + eval(root_dir, scene, pitch) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/utils/check_missing.py b/utils/check_missing.py new file mode 100644 index 00000000..091c3c70 --- /dev/null +++ b/utils/check_missing.py @@ -0,0 +1,155 @@ +import os +import glob +import shutil + +def create_rsu_list(rsu_locations): + """ + Filter and format original RSU json data + + Parameters + ---------- + rsu_locations : dict { int: tuple(x,y,z,p,y,r) } + + Returns + ------- + Formatted RSU locations: list [ dict{...} ] + """ + + rsu_list = [] + index = 1 + + for key, l in rsu_locations.items(): + + if key.isnumeric(): + rsu_list.append({ + 'name': f'rsu{index}', + 'spawn_position': l, + 'id': -index + } + ) + index += 1 + + # elif key == 'center': + # rsu_list.append({ + # 'name': f'rsu0', + # 'spawn_position': l, + # 'id': 0 + # } + # ) + else: + if index == 1: + raise RuntimeError(f'rsu_locations is empty: {rsu_locations}') + + return rsu_list + +def update_rsus(rsu_configs, pitch): + """ + Modify pitch configuration of each RSU location + + Parameters + ---------- + rsu_configs : dict # i.e. rsu.json + + Returns + ------- + Updated RSU location dict + """ + + for i, loc in rsu_configs['locations'].items(): + for j, coor in loc.items(): + + if not isinstance(coor, list): + continue + + coor[3] = pitch + rsu_configs['locations'][i][j] = coor + +def extract_timestamps(cav_dir, file_types=['.yaml','.pcd']): + + """ + Count timestamps of each given directory and return smallest + if complete episode recorded, all counts should be equal to that of base data + + Parameters + ---------- + cav_dir : str # ./test_set/2021_08_22_21_41_24/123 + + Returns + ------- + int + """ + + def get_timestamps(target_dir, ext): + + file_list = [f for f in os.listdir(target_dir) if f.endswith(t)] + timestamps = [] + + for file in file_list: + res = file.split('/')[-1] + timestamp = res.replace(ext, '') + timestamps.append(timestamp) + + return timestamps + + num_timestamps = [] + for t in file_types: + # files = [f for f in os.listdir(cav_dir) if f.endswith(t)] + ts = get_timestamps(cav_dir, t) + num_timestamps.append(ts) + + return min(num_timestamps) + +def check_missing_cavs(root_dir, output_dir): + """ + Carla simulator occassionally crashes and exits with incomplete episodes + this function helps to detect missing frames/ timestamps + + Iterate through scenes and each cav dirs to check for incomplete recordings + + Parameters + ---------- + root_dir : str + output_dir : str + + Returns + ------- + list # scene names with inncomplete recordings + """ + + missing_scenes = [] + + for scene in sorted(os.listdir(root_dir)): + + output_dir_scene = os.path.join(output_dir, scene) + + # case 1: scene recording had not even started yet + if not os.path.exists(output_dir_scene): + missing_scenes.append(scene) + continue + + + output_cav = [d for d in sorted(os.listdir(output_dir_scene)) if os.path.isdir(os.path.join(output_dir_scene, d))] + # if os.path.isdir(...) to filter 'data_protocol.yaml' file + # case 2: empty scene directory with 0 cavs + if len(output_cav) == 0: + missing_scenes.append(scene) + continue + + root_dir_scene = os.path.join(root_dir, scene) + root_cav = [d for d in sorted(os.listdir(root_dir_scene)) if os.path.isdir(os.path.join(root_dir_scene, d))][0] + ts_root = extract_timestamps(os.path.join(root_dir_scene, root_cav)) + + + for cav in output_cav: + output_dir_scene_cav = os.path.join(output_dir_scene, cav) + if not os.path.isdir(output_dir_scene_cav): # skip data_protocol.yaml + continue + + ts = extract_timestamps(output_dir_scene_cav) + # ts = extract_timestamps_ply(os.path.join(livox_dir, scene, cav)) ### + + if set(ts_root) != set(ts): + missing_scenes.append(scene) + break + + return missing_scenes diff --git a/utils/open3d_pcd.py b/utils/open3d_pcd.py new file mode 100644 index 00000000..0f54652a --- /dev/null +++ b/utils/open3d_pcd.py @@ -0,0 +1,24 @@ +Simport open3d as o3d +import numpy as np +import argparse + +def main(): + + parser = argparse.ArgumentParser() + parser.add_argument('-p', "--path", required=True, type=str) + parser.add_argument('-f', "--flip", action='store_true') + opt = parser.parse_args() + + # pcd_data = o3d.data.PCDPointCloud() + pcd = o3d.io.read_point_cloud(opt.path) + # print(pcd_data.path) + # print('pcd', pcd) + if opt.flip: + pts = np.asarray(pcd.points) + pts[:, 0] = -pts[:, 0] + pcd.points = o3d.utility.Vector3dVector(pts) + # print('points', pts, pts.shape) + o3d.visualization.draw_geometries([pcd]) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/utils/print_spectactor_coord.py b/utils/print_spectactor_coord.py new file mode 100644 index 00000000..e0bf850b --- /dev/null +++ b/utils/print_spectactor_coord.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +# ============================================================================== +# -- find carla module --------------------------------------------------------- +# ============================================================================== + + +import glob +import os +import sys +import time +import carla + +try: + sys.path.append(glob.glob('./PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % ( + sys.version_info.major, + sys.version_info.minor, + 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) +except IndexError: + pass + +# ============================================================================== +# -- imports ------------------------------------------------------------------- +# ============================================================================== + + +import carla + +_HOST_ = '127.0.0.1' +_PORT_ = 2000 +_SLEEP_TIME_ = 2 + +def main(): + + client = carla.Client(_HOST_, _PORT_) + client.set_timeout(2.0) +# world = client.load_world("Town05") + world = client.get_world() + origin_settings = world.get_settings() + + try: + + # print(help(t)) + # print("(x,y,z) = ({},{},{})".format(t.location.x, t.location.y,t.location.z)) + + # print_coord(world) + while True: + t = world.get_spectator().get_transform() + coordinate_str = "(x,y,z) = ({},{},{})".format(int(t.location.x), int(t.location.y),int(t.location.z)) + rotation_str = "(p,y,r) = ({},{},{})".format(int(t.rotation.pitch), int(t.rotation.yaw), int(t.rotation.roll)) + print (coordinate_str, '\t', rotation_str) + time.sleep(_SLEEP_TIME_) + + # set_transform(new_transform) + + # except KeyboardInterrupt: + + # pass + + finally: + + # for actor in world.get_actors(): + # actor.destroy() + + world.apply_settings(origin_settings) + +if __name__ == '__main__': + main() + + +