From a027cbcf380df07a10e7b956990f79240e951dc1 Mon Sep 17 00:00:00 2001 From: Balakumar Sundaralingam Date: Mon, 5 Aug 2024 21:52:45 -0700 Subject: [PATCH] Minor fixes and geom module documentation --- CHANGELOG.md | 12 + examples/isaac_sim/motion_gen_reacher.py | 12 +- examples/isaac_sim/realsense_mpc.py | 3 +- examples/isaac_sim/realsense_reacher.py | 3 +- pyproject.toml | 2 + .../cuda_robot_model/cuda_robot_model.py | 60 +- src/curobo/curobolib/__init__.py | 7 +- src/curobo/curobolib/kinematics.py | 2 +- src/curobo/curobolib/ls.py | 2 +- src/curobo/curobolib/opt.py | 2 +- src/curobo/curobolib/tensor_step.py | 2 +- src/curobo/curobolib/util_file.py | 24 +- src/curobo/geom/__init__.py | 8 +- src/curobo/geom/cv.py | 63 +- src/curobo/geom/sdf/sdf_grid.py | 7 +- src/curobo/geom/sdf/utils.py | 22 +- src/curobo/geom/sdf/warp_primitives.py | 5 + src/curobo/geom/sdf/warp_sdf_fns.py | 4 + .../geom/sdf/warp_sdf_fns_deprecated.py | 4 + src/curobo/geom/sdf/world.py | 552 +++++++++++++++--- src/curobo/geom/sdf/world_blox.py | 386 +++++++++--- src/curobo/geom/sdf/world_mesh.py | 475 ++++++++++----- src/curobo/geom/sdf/world_voxel.py | 261 +++++++-- src/curobo/geom/sphere_fit.py | 177 ++++-- src/curobo/geom/transform.py | 289 +++++++-- src/curobo/geom/types.py | 505 ++++++++++++---- src/curobo/rollout/cost/dist_cost.py | 8 +- src/curobo/types/camera.py | 6 +- src/curobo/util/trajectory.py | 10 +- src/curobo/util/warp.py | 3 + src/curobo/util_file.py | 220 ++++++- src/curobo/wrap/model/robot_world.py | 6 +- src/curobo/wrap/reacher/motion_gen.py | 20 +- src/curobo/wrap/reacher/mpc.py | 109 ++++ tests/curobo_robot_world_model_test.py | 3 +- tests/mpc_test.py | 64 +- tests/world_config_test.py | 70 ++- 37 files changed, 2753 insertions(+), 655 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17b461bd..3e3cf25c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,18 @@ its affiliates is strictly prohibited. --> # Changelog +## Latest Commit + +### New Features +- Add pose cost metric to MPC to allow for partial pose reaching. +- Update obstacle poses in cpu reference with an optional flag. + +### BugFixes & Misc. +- Fixed optimize_dt not being correctly set when motion gen is called in reactive mode. +- Add documentation for geom module. +- Add descriptive api for computing kinematics. +- Fix cv2 import order in isaac sim realsense examples. + ## Version 0.7.4 ### Changes in Default Behavior diff --git a/examples/isaac_sim/motion_gen_reacher.py b/examples/isaac_sim/motion_gen_reacher.py index eaaa4bd0..d2818d77 100644 --- a/examples/isaac_sim/motion_gen_reacher.py +++ b/examples/isaac_sim/motion_gen_reacher.py @@ -209,6 +209,7 @@ def main(): trim_steps = None max_attempts = 4 interpolation_dt = 0.05 + enable_finetune_trajopt = True if args.reactive: trajopt_tsteps = 40 trajopt_dt = 0.04 @@ -216,6 +217,7 @@ def main(): max_attempts = 1 trim_steps = [1, None] interpolation_dt = trajopt_dt + enable_finetune_trajopt = False motion_gen_config = MotionGenConfig.load_from_robot_config( robot_cfg, world_cfg, @@ -231,8 +233,9 @@ def main(): trim_steps=trim_steps, ) motion_gen = MotionGen(motion_gen_config) - print("warming up...") - motion_gen.warmup(enable_graph=True, warmup_js_trajopt=False, parallel_finetune=True) + if not args.reactive: + print("warming up...") + motion_gen.warmup(enable_graph=True, warmup_js_trajopt=False) print("Curobo is Ready") @@ -242,9 +245,8 @@ def main(): enable_graph=False, enable_graph_attempt=2, max_attempts=max_attempts, - enable_finetune_trajopt=True, - parallel_finetune=True, - time_dilation_factor=0.5, + enable_finetune_trajopt=enable_finetune_trajopt, + time_dilation_factor=0.5 if not args.reactive else 1.0, ) usd_help.load_stage(my_world.stage) diff --git a/examples/isaac_sim/realsense_mpc.py b/examples/isaac_sim/realsense_mpc.py index c19fd8e0..269fc2b8 100644 --- a/examples/isaac_sim/realsense_mpc.py +++ b/examples/isaac_sim/realsense_mpc.py @@ -9,6 +9,7 @@ # its affiliates is strictly prohibited. # + try: # Third Party import isaacsim @@ -16,6 +17,7 @@ pass # Third Party +import cv2 import torch a = torch.zeros(4, device="cuda:0") @@ -31,7 +33,6 @@ } ) # Third Party -import cv2 import numpy as np import torch from matplotlib import cm diff --git a/examples/isaac_sim/realsense_reacher.py b/examples/isaac_sim/realsense_reacher.py index 5e8aa45f..960a9c0f 100644 --- a/examples/isaac_sim/realsense_reacher.py +++ b/examples/isaac_sim/realsense_reacher.py @@ -15,7 +15,9 @@ except ImportError: pass + # Third Party +import cv2 import torch a = torch.zeros(4, device="cuda:0") @@ -31,7 +33,6 @@ ) # Third Party -import cv2 import numpy as np import torch from matplotlib import cm diff --git a/pyproject.toml b/pyproject.toml index f91f8f44..5e61e4ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,3 +65,5 @@ omit = [ "src/curobo/rollout/cost/manipulability_cost.py", ] +[tool.interrogate] +ignore-regex = ["forward", "backward"] \ No newline at end of file diff --git a/src/curobo/cuda_robot_model/cuda_robot_model.py b/src/curobo/cuda_robot_model/cuda_robot_model.py index ec0d20db..3f9b1bbf 100644 --- a/src/curobo/cuda_robot_model/cuda_robot_model.py +++ b/src/curobo/cuda_robot_model/cuda_robot_model.py @@ -505,6 +505,49 @@ def compute_kinematics( return self.get_state(js.position, link_name, calculate_jacobian) + def compute_kinematics_from_joint_state( + self, js: JointState, link_name: Optional[str] = None, calculate_jacobian: bool = False + ) -> CudaRobotModelState: + """Compute forward kinematics of the robot. + + Args: + js: Joint state of robot. + link_name: Name of link to return pose of. If None, returns end-effector pose. + calculate_jacobian: Calculate jacobian of the robot. Not supported. + + + Returns: + CudaRobotModelState: Kinematic state of the robot. + + """ + if js.joint_names is not None: + if js.joint_names != self.kinematics_config.joint_names: + log_error("Joint names do not match, reoder joints before forward kinematics") + + return self.get_state(js.position, link_name, calculate_jacobian) + + def compute_kinematics_from_joint_position( + self, + joint_position: torch.Tensor, + link_name: Optional[str] = None, + calculate_jacobian: bool = False, + ) -> CudaRobotModelState: + """Compute forward kinematics of the robot. + + Args: + joint_position: Joint position of robot. Assumed to only contain active joints in the + order specified in :attr:`CudaRobotModel.joint_names`. + link_name: Name of link to return pose of. If None, returns end-effector pose. + calculate_jacobian: Calculate jacobian of the robot. Not supported. + + + Returns: + CudaRobotModelState: Kinematic state of the robot. + + """ + + return self.get_state(joint_position, link_name, calculate_jacobian) + def get_robot_link_meshes(self) -> List[Mesh]: """Get meshes of all links of the robot. @@ -578,12 +621,12 @@ def get_robot_as_spheres(self, q: torch.Tensor, filter_valid: bool = True) -> Li def get_link_poses(self, q: torch.Tensor, link_names: List[str]) -> Pose: """Get Pose of links at given joint configuration q using forward kinematics. - Note that only the links specified in :var:`~link_names` are returned. + Note that only the links specified in :class:`~CudaRobotModelConfig.link_names` are returned. Args: q: Joint configuration of the robot, shape should be [batch_size, dof]. link_names: Names of links to get pose of. This should be a subset of - :var:`~link_names`. + :class:`~CudaRobotModelConfig.link_names`. Returns: Pose: Poses of links at given joint configuration. @@ -839,6 +882,19 @@ def attach_external_objects_to_robot( return True + def get_active_js(self, full_js: JointState): + """Get joint state of active joints of the robot. + + Args: + full_js: Joint state of all joints. + + Returns: + JointState: Joint state of active joints. + """ + active_jnames = self.joint_names + out_js = full_js.get_ordered_joint_state(active_jnames) + return out_js + @property def ee_link(self) -> str: """End-effector link of the robot. Changing requires reinitializing this class.""" diff --git a/src/curobo/curobolib/__init__.py b/src/curobo/curobolib/__init__.py index d9f07477..0fc865d2 100644 --- a/src/curobo/curobolib/__init__.py +++ b/src/curobo/curobolib/__init__.py @@ -8,4 +8,9 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # -"""CuRoboLib Module.""" +""" +cuRoboLib module contains CUDA implementations (kernels) of robotics algorithms, wrapped in +C++, and compiled with PyTorch for use in Python. + +All implementations are in ``.cu`` files in ``cpp`` sub-directory. +""" diff --git a/src/curobo/curobolib/kinematics.py b/src/curobo/curobolib/kinematics.py index 71c20958..1c7927a2 100644 --- a/src/curobo/curobolib/kinematics.py +++ b/src/curobo/curobolib/kinematics.py @@ -24,7 +24,7 @@ from torch.utils.cpp_extension import load # CuRobo - from curobo.curobolib.util_file import add_cpp_path + from curobo.util_file import add_cpp_path kinematics_fused_cu = load( name="kinematics_fused_cu", diff --git a/src/curobo/curobolib/ls.py b/src/curobo/curobolib/ls.py index 5a9b678e..4743ef31 100644 --- a/src/curobo/curobolib/ls.py +++ b/src/curobo/curobolib/ls.py @@ -24,7 +24,7 @@ from torch.utils.cpp_extension import load # CuRobo - from curobo.curobolib.util_file import add_cpp_path + from curobo.util_file import add_cpp_path line_search_cu = load( name="line_search_cu", diff --git a/src/curobo/curobolib/opt.py b/src/curobo/curobolib/opt.py index 0be088fe..9140c4fd 100644 --- a/src/curobo/curobolib/opt.py +++ b/src/curobo/curobolib/opt.py @@ -24,7 +24,7 @@ from torch.utils.cpp_extension import load # CuRobo - from curobo.curobolib.util_file import add_cpp_path + from curobo.util_file import add_cpp_path lbfgs_step_cu = load( name="lbfgs_step_cu", diff --git a/src/curobo/curobolib/tensor_step.py b/src/curobo/curobolib/tensor_step.py index f97624a4..fdf44c00 100644 --- a/src/curobo/curobolib/tensor_step.py +++ b/src/curobo/curobolib/tensor_step.py @@ -23,7 +23,7 @@ from torch.utils.cpp_extension import load # CuRobo - from curobo.curobolib.util_file import add_cpp_path + from curobo.util_file import add_cpp_path log_warn("tensor_step_cu not found, jit compiling...") tensor_step_cu = load( diff --git a/src/curobo/curobolib/util_file.py b/src/curobo/curobolib/util_file.py index 9187d986..2651a66d 100644 --- a/src/curobo/curobolib/util_file.py +++ b/src/curobo/curobolib/util_file.py @@ -8,27 +8,9 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # - +"""Deprecated, use functions from :mod:`curobo.util_file` instead.""" # Standard Library import os - -def get_cpp_path(): - path = os.path.dirname(__file__) - return os.path.join(path, "cpp") - - -def join_path(path1, path2): - if isinstance(path2, str): - return os.path.join(path1, path2) - else: - return path2 - - -def add_cpp_path(sources): - cpp_path = get_cpp_path() - new_list = [] - for s in sources: - s = join_path(cpp_path, s) - new_list.append(s) - return new_list +# CuRobo +from curobo.util_file import add_cpp_path, get_cpp_path, join_path diff --git a/src/curobo/geom/__init__.py b/src/curobo/geom/__init__.py index e5253f1f..9f83c114 100644 --- a/src/curobo/geom/__init__.py +++ b/src/curobo/geom/__init__.py @@ -10,8 +10,8 @@ # """ -This module contains code for geometric processing such as pointcloud processing, analytic signed -distance computation for the environment, and also signed distance computation between robot and the -environment. These functions can be used for robot self collision checking and also for robot -environment collision checking. +This module contains functions for geometric processing such as pointcloud processing, analytic +signed distance computation for the environment, and also signed distance computation between robot +and the environment. These functions can be used for robot self collision checking and also for +robot environment collision checking. """ diff --git a/src/curobo/geom/cv.py b/src/curobo/geom/cv.py index 8d0e0077..bf85f043 100644 --- a/src/curobo/geom/cv.py +++ b/src/curobo/geom/cv.py @@ -8,6 +8,7 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Computer Vision functions, including projection between depth and pointclouds.""" # Third Party import torch @@ -17,15 +18,18 @@ @get_torch_jit_decorator() -def project_depth_to_pointcloud(depth_image: torch.Tensor, intrinsics_matrix: torch.Tensor): - """Projects numpy depth image to point cloud. +def project_depth_to_pointcloud( + depth_image: torch.Tensor, + intrinsics_matrix: torch.Tensor, +) -> torch.Tensor: + """Projects depth image to point cloud. Args: - np_depth_image: numpy array float, shape (h, w). - intrinsics array: numpy array float, 3x3 intrinsics matrix. + depth_image: torch tensor of shape (b, h, w). + intrinsics array: torch tensor for intrinsics matrix of shape (b, 3, 3). Returns: - array of float (h, w, 3) + torch tensor of shape (b, h, w, 3) """ fx = intrinsics_matrix[0, 0] fy = intrinsics_matrix[1, 1] @@ -48,16 +52,21 @@ def project_depth_to_pointcloud(depth_image: torch.Tensor, intrinsics_matrix: to @get_torch_jit_decorator() def get_projection_rays( - height: int, width: int, intrinsics_matrix: torch.Tensor, depth_to_meter: float = 0.001 -): - """Projects numpy depth image to point cloud. + height: int, + width: int, + intrinsics_matrix: torch.Tensor, + depth_to_meter: float = 0.001, +) -> torch.Tensor: + """Get projection rays for a image size and batch of intrinsics matrices. Args: - np_depth_image: numpy array float, shape (h, w). - intrinsics array: numpy array float, 3x3 intrinsics matrix. + height: Height of the images. + width: Width of the images. + intrinsics_matrix: Batch of intrinsics matrices of shape (b, 3, 3). + depth_to_meter: Scaling factor to convert depth to meters. Returns: - array of float (h, w, 3) + torch.Tensor: Projection rays of shape (b, height * width, 3). """ fx = intrinsics_matrix[:, 0:1, 0:1] fy = intrinsics_matrix[:, 1:2, 1:2] @@ -92,15 +101,15 @@ def get_projection_rays( def project_pointcloud_to_depth( pointcloud: torch.Tensor, output_image: torch.Tensor, -): - """Projects pointcloud to depth image +) -> torch.Tensor: + """Projects pointcloud to depth image based on indices. Args: - np_depth_image: numpy array float, shape (h, w). - intrinsics array: numpy array float, 3x3 intrinsics matrix. + pointcloud: PointCloud of shape (b, h, w, 3). + output_image: Image of shape (b, h, w). Returns: - array of float (h, w) + torch.Tensor: Depth image of shape (b, h, w). """ width, height = output_image.shape @@ -112,10 +121,26 @@ def project_pointcloud_to_depth( @get_torch_jit_decorator() def project_depth_using_rays( - depth_image: torch.Tensor, rays: torch.Tensor, filter_origin: bool = False -): + depth_image: torch.Tensor, + rays: torch.Tensor, + filter_origin: bool = False, + depth_threshold: float = 0.01, +) -> torch.Tensor: + """Project depth image to pointcloud using projection rays. + + Projection rays can be calculated using :func:`~curobo.geom.cv.get_projection_rays` function. + + Args: + depth_image: Dpepth image of shape (b, h, w). + rays: Projection rays of shape (b, h * w, 3). + filter_origin: Remove points with depth less than depth_threshold. + depth_threshold: Threshold to filter points. + + Returns: + Pointcloud of shape (b, h * w, 3). + """ if filter_origin: - depth_image = torch.where(depth_image < 0.01, 0, depth_image) + depth_image = torch.where(depth_image < depth_threshold, 0, depth_image) depth_image = depth_image.view(depth_image.shape[0], -1, 1).contiguous() points = depth_image * rays diff --git a/src/curobo/geom/sdf/sdf_grid.py b/src/curobo/geom/sdf/sdf_grid.py index a2fb3268..6ea7e2a6 100644 --- a/src/curobo/geom/sdf/sdf_grid.py +++ b/src/curobo/geom/sdf/sdf_grid.py @@ -8,14 +8,14 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Module contains deprecated code for computing Signed Distance Field and it's gradient.""" # Third Party import torch -# from curobo.util.torch_utils import get_torch_jit_decorator - # @get_torch_jit_decorator() def lookup_distance(pt, dist_matrix_flat, num_voxels): + """Lookup distance from distance matrix.""" # flatten: ind_pt = ( (pt[..., 0]) * (num_voxels[1] * num_voxels[2]) + pt[..., 1] * num_voxels[2] + pt[..., 2] @@ -26,6 +26,7 @@ def lookup_distance(pt, dist_matrix_flat, num_voxels): # @get_torch_jit_decorator() def compute_sdf_gradient(pt, dist_matrix_flat, num_voxels, dist): + """Compute gradient of SDF.""" grad_l = [] for i in range(3): # x,y,z pt_n = pt.clone() @@ -50,6 +51,8 @@ def compute_sdf_gradient(pt, dist_matrix_flat, num_voxels, dist): class SDFGrid(torch.autograd.Function): + """Sdf grid torch function.""" + @staticmethod def forward(ctx, pt, dist_matrix_flat, num_voxels): # input = x_id,y_id,z_id diff --git a/src/curobo/geom/sdf/utils.py b/src/curobo/geom/sdf/utils.py index 133c96b9..a3b89b3b 100644 --- a/src/curobo/geom/sdf/utils.py +++ b/src/curobo/geom/sdf/utils.py @@ -8,17 +8,28 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Module contains uilities for world collision checkers.""" # CuRobo -from curobo.geom.sdf.world import CollisionCheckerType, WorldCollisionConfig +from curobo.geom.sdf.world import ( + CollisionCheckerType, + WorldCollision, + WorldCollisionConfig, + WorldPrimitiveCollision, +) from curobo.util.logger import log_error -def create_collision_checker(config: WorldCollisionConfig): - if config.checker_type == CollisionCheckerType.PRIMITIVE: - # CuRobo - from curobo.geom.sdf.world import WorldPrimitiveCollision +def create_collision_checker(config: WorldCollisionConfig) -> WorldCollision: + """Create collision checker based on configuration. + Args: + config: Input world collision configuration. + + Returns: + Type of collision checker based on configuration. + """ + if config.checker_type == CollisionCheckerType.PRIMITIVE: return WorldPrimitiveCollision(config) elif config.checker_type == CollisionCheckerType.BLOX: # CuRobo @@ -37,4 +48,3 @@ def create_collision_checker(config: WorldCollisionConfig): return WorldVoxelCollision(config) else: log_error("Unknown Collision Checker type: " + config.checker_type, exc_info=True) - raise NotImplementedError diff --git a/src/curobo/geom/sdf/warp_primitives.py b/src/curobo/geom/sdf/warp_primitives.py index deb95692..a0c9442d 100644 --- a/src/curobo/geom/sdf/warp_primitives.py +++ b/src/curobo/geom/sdf/warp_primitives.py @@ -8,6 +8,7 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Warp-lang based world collision functions are implemented as torch autograd functions.""" # Third Party import torch @@ -28,6 +29,8 @@ class SdfMeshWarpPy(torch.autograd.Function): + """Pytorch autograd function for computing signed distance between spheres and meshes.""" + @staticmethod def forward( ctx, @@ -109,6 +112,8 @@ def backward(ctx, grad_output): class SweptSdfMeshWarpPy(torch.autograd.Function): + """Compute signed distance between trajectory of spheres and meshes.""" + @staticmethod def forward( ctx, diff --git a/src/curobo/geom/sdf/warp_sdf_fns.py b/src/curobo/geom/sdf/warp_sdf_fns.py index be8461db..081448d1 100644 --- a/src/curobo/geom/sdf/warp_sdf_fns.py +++ b/src/curobo/geom/sdf/warp_sdf_fns.py @@ -8,6 +8,7 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""CUDA kernels implemented in warp-lang for computing signed distance to meshes.""" # Third Party import warp as wp @@ -18,6 +19,7 @@ def mesh_query_point_fn( point: wp.vec3, max_distance: float, ): + """Query point on mesh.""" collide_result = wp.mesh_query_point(idx, point, max_distance) return collide_result @@ -46,6 +48,7 @@ def get_swept_closest_pt_batch_env( env_query_idx: wp.array(dtype=wp.int32), use_batch_env: wp.uint8, ): + """Compute signed distance between a trajectory of a sphere and world meshes.""" # we launch nspheres kernels # compute gradient here and return # distance is negative outside and positive inside @@ -364,6 +367,7 @@ def get_closest_pt_batch_env( use_batch_env: wp.uint8, compute_esdf: wp.uint8, ): + """Compute signed distance between a sphere and world meshes.""" # we launch nspheres kernels # compute gradient here and return # distance is negative outside and positive inside diff --git a/src/curobo/geom/sdf/warp_sdf_fns_deprecated.py b/src/curobo/geom/sdf/warp_sdf_fns_deprecated.py index 084b2710..e38c50a1 100644 --- a/src/curobo/geom/sdf/warp_sdf_fns_deprecated.py +++ b/src/curobo/geom/sdf/warp_sdf_fns_deprecated.py @@ -8,6 +8,8 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Deprecated warp kernels that use API compatibile with warp-lang < 1.0.0""" + # Third Party import warp as wp @@ -36,6 +38,7 @@ def get_swept_closest_pt_batch_env( env_query_idx: wp.array(dtype=wp.int32), use_batch_env: wp.uint8, ): + """Kernel to compute swept closest point to mesh.""" # we launch nspheres kernels # compute gradient here and return # distance is negative outside and positive inside @@ -352,6 +355,7 @@ def get_closest_pt_batch_env( use_batch_env: wp.uint8, compute_esdf: wp.uint8, ): + """Kernel to compute closest point to mesh.""" # we launch nspheres kernels # compute gradient here and return # distance is negative outside and positive inside diff --git a/src/curobo/geom/sdf/world.py b/src/curobo/geom/sdf/world.py index 5d3216fa..5a43d85d 100644 --- a/src/curobo/geom/sdf/world.py +++ b/src/curobo/geom/sdf/world.py @@ -8,6 +8,9 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""World representations for computing signed distance are implemented in this module.""" + +from __future__ import annotations # Standard Library from dataclasses import dataclass @@ -27,16 +30,39 @@ @dataclass class CollisionBuffer: + """Helper class stores all buffers required to compute collision cost and gradients.""" + + #: Buffer to store signed distance cost value for each query sphere. distance_buffer: torch.Tensor + + #: Buffer to store gradient of signed distance cost value for each query sphere. grad_distance_buffer: torch.Tensor + + #: Buffer to store sparsity index for each query sphere. If sphere's value is not in collsiion, + #: sparsity index is set to 0, else 1. Used to prevent rewriting 0 values in distance_buffer + #: and grad_distance_buffer. sparsity_index_buffer: torch.Tensor + + #: Shape of the distance buffer. This is used to check if the buffer needs to be recreated. shape: Optional[torch.Size] = None def __post_init__(self): + """Initialize the buffer shape if not provided.""" self.shape = self.distance_buffer.shape @classmethod - def initialize_from_shape(cls, shape: torch.Size, tensor_args: TensorDeviceType): + def initialize_from_shape( + cls, shape: torch.Size, tensor_args: TensorDeviceType + ) -> CollisionBuffer: + """Initialize the CollisionBuffer from the given shape of query spheres. + + Args: + shape: Input shape of the query spheres. The shape is (batch, horizon, n_spheres, 4). + tensor_args: Device and precision of the tensors. + + Returns: + CollisionBuffer: Initialized CollisionBuffer object. + """ batch, horizon, n_spheres, _ = shape distance_buffer = torch.zeros( (batch, horizon, n_spheres), @@ -56,6 +82,12 @@ def initialize_from_shape(cls, shape: torch.Size, tensor_args: TensorDeviceType) return CollisionBuffer(distance_buffer, grad_distance_buffer, sparsity_idx) def _update_from_shape(self, shape: torch.Size, tensor_args: TensorDeviceType): + """Update shape of buffers. + + Args: + shape: New shape of the query spheres. + tensor_args: device and precision of the tensors. + """ batch, horizon, n_spheres, _ = shape self.distance_buffer = torch.zeros( (batch, horizon, n_spheres), @@ -75,23 +107,24 @@ def _update_from_shape(self, shape: torch.Size, tensor_args: TensorDeviceType): self.shape = shape[:3] def update_buffer_shape(self, shape: torch.Size, tensor_args: TensorDeviceType): - if self.shape != shape[:3]: - # print("Recreating PRIM: ",self.shape, shape) + """Update the buffer shape if the shape of the query spheres changes. - # self = CollisionBuffer.initialize_from_shape( - # shape, - # tensor_args, - # ) + Args: + shape: New shape of the query spheres. + tensor_args: device and precision of the tensors. + """ + if self.shape != shape[:3]: self._update_from_shape(shape, tensor_args) - # print("New shape:",self.shape) - def clone(self): + def clone(self) -> CollisionBuffer: + """Clone the CollisionBuffer object.""" dist_buffer = self.distance_buffer.clone() grad_buffer = self.grad_distance_buffer.clone() sparse_buffer = self.sparsity_index_buffer.clone() return CollisionBuffer(dist_buffer, grad_buffer, sparse_buffer) - def __mul__(self, scalar: float): + def __mul__(self, scalar: float) -> CollisionBuffer: + """Multiply the CollisionBuffer by a scalar value.""" self.distance_buffer *= scalar self.grad_distance_buffer *= scalar self.sparsity_index_buffer *= int(scalar) @@ -100,18 +133,25 @@ def __mul__(self, scalar: float): @dataclass class CollisionQueryBuffer: - """Class stores all buffers required to query collisions - This class currently has three main buffers. We initialize the required - buffers based on ? - """ + """Class stores all buffers required to query collisions across world representations.""" + #: Buffer to store signed distance cost value for Cuboid world obstacles. primitive_collision_buffer: Optional[CollisionBuffer] = None + + #: Buffer to store signed distance cost value for Mesh world obstacles. mesh_collision_buffer: Optional[CollisionBuffer] = None + + #: Buffer to store signed distance cost value for Blox world obstacles. blox_collision_buffer: Optional[CollisionBuffer] = None + + #: Buffer to store signed distance cost value for Voxel world obstacles. voxel_collision_buffer: Optional[CollisionBuffer] = None + + #: Shape of the query spheres. This is used to check if the buffer needs to be recreated. shape: Optional[torch.Size] = None def __post_init__(self): + """Initialize the shape of the query spheres if not provided.""" if self.shape is None: if self.primitive_collision_buffer is not None: self.shape = self.primitive_collision_buffer.shape @@ -122,7 +162,8 @@ def __post_init__(self): elif self.voxel_collision_buffer is not None: self.shape = self.voxel_collision_buffer.shape - def __mul__(self, scalar: float): + def __mul__(self, scalar: float) -> CollisionQueryBuffer: + """Multiply tensors by a scalar value.""" if self.primitive_collision_buffer is not None: self.primitive_collision_buffer = self.primitive_collision_buffer * scalar if self.mesh_collision_buffer is not None: @@ -133,7 +174,8 @@ def __mul__(self, scalar: float): self.voxel_collision_buffer = self.voxel_collision_buffer * scalar return self - def clone(self): + def clone(self) -> CollisionQueryBuffer: + """Clone the CollisionQueryBuffer object.""" prim_buffer = mesh_buffer = blox_buffer = voxel_buffer = None if self.primitive_collision_buffer is not None: prim_buffer = self.primitive_collision_buffer.clone() @@ -157,7 +199,17 @@ def initialize_from_shape( shape: torch.Size, tensor_args: TensorDeviceType, collision_types: Dict[str, bool], - ): + ) -> CollisionQueryBuffer: + """Initialize the CollisionQueryBuffer from the given shape of query spheres. + + Args: + shape: Input shape of the query spheres. The shape is (batch, horizon, n_spheres, 4). + tensor_args: Device and precision of the tensors. + collision_types: Dictionary of collision types to initialize buffers for. + + Returns: + CollisionQueryBuffer: Initialized CollisionQueryBuffer object. + """ primitive_buffer = mesh_buffer = blox_buffer = voxel_buffer = None if "primitive" in collision_types and collision_types["primitive"]: primitive_buffer = CollisionBuffer.initialize_from_shape(shape, tensor_args) @@ -176,7 +228,17 @@ def create_from_shape( shape: torch.Size, tensor_args: TensorDeviceType, collision_types: Dict[str, bool], - ): + ) -> CollisionQueryBuffer: + """Create the CollisionQueryBuffer from the given shape of query spheres. + + Args: + shape: Input shape of the query spheres. The shape is (batch, horizon, n_spheres, 4). + tensor_args: Device and precision of the tensors. + collision_types: Dictionary of collision types to initialize buffers for. + + Returns: + CollisionQueryBuffer: Initialized CollisionQueryBuffer object. + """ if "primitive" in collision_types and collision_types["primitive"]: self.primitive_collision_buffer = CollisionBuffer.initialize_from_shape( shape, tensor_args @@ -195,6 +257,13 @@ def update_buffer_shape( tensor_args: TensorDeviceType, collision_types: Optional[Dict[str, bool]], ): + """Update buffer shape if it doesn't match existing shape. + + Args: + shape: New shape of the query spheres. + tensor_args: Device and precision of the tensors. + collision_types: Dictionary of collision types to update buffers for. + """ # update buffers: assert len(shape) == 4 # shape is: batch, horizon, n_spheres, 4 if self.shape is None: # buffers not initialized: @@ -215,7 +284,12 @@ def update_buffer_shape( def get_gradient_buffer( self, - ): + ) -> Optional[torch.Tensor]: + """Compute the gradient buffer by summing the gradient buffers of all collision types. + + Returns: + torch.Tensor: Gradient buffer for all collision types + """ prim_buffer = mesh_buffer = blox_buffer = None current_buffer = None if self.primitive_collision_buffer is not None: @@ -245,10 +319,7 @@ def get_gradient_buffer( class CollisionCheckerType(Enum): - """Type of collision checker to use. - Args: - Enum (_type_): _description_ - """ + """Type of collision checker to use.""" PRIMITIVE = "PRIMITIVE" BLOX = "BLOX" @@ -258,15 +329,37 @@ class CollisionCheckerType(Enum): @dataclass class WorldCollisionConfig: + """Configuration parameters for the WorldCollision object.""" + + #: Device and precision of the tensors. tensor_args: TensorDeviceType + + #: World obstacles to load for collision checking. world_model: Optional[Union[List[WorldConfig], WorldConfig]] = None + + #: Number of obstacles to cache for collision checking across representations. + #: Use this to create a fixed size buffer for collision checking, e.g, {'obb': 1000} will + #: create a buffer of 1000 cuboids for each environment. cache: Optional[Dict[Obstacle, int]] = None + + #: Number of environments to use for collision checking. n_envs: int = 1 + + #: Type of collision checker to use. checker_type: CollisionCheckerType = CollisionCheckerType.PRIMITIVE + + #: Maximum distance to compute collision checking cost outside the object. This value is + #: added in addition to a query sphere radius and collision activation distance. A smaller + #: value will speedup collision checking but can result in slower convergence with swept + #: sphere collision checking. max_distance: Union[torch.Tensor, float] = 0.1 + + #: Maximum distance outside an obstacle to use when computing euclidean signed distance field + #: (ESDF) from different world representations. max_esdf_distance: Union[torch.Tensor, float] = 100.0 def __post_init__(self): + """Post initialization method to set default values.""" if self.world_model is not None and isinstance(self.world_model, list): self.n_envs = len(self.world_model) if isinstance(self.max_distance, float): @@ -279,7 +372,17 @@ def load_from_dict( world_coll_checker_dict: Dict, world_model_dict: Union[WorldConfig, Dict, List[WorldConfig]] = None, tensor_args: TensorDeviceType = TensorDeviceType(), - ): + ) -> WorldCollisionConfig: + """Load the WorldCollisionConfig from a dictionary. + + Args: + world_coll_checker_dict: Dictionary containing the configuration parameters. + world_model_dict: Dictionary containing obstacles. + tensor_args: Device and precision of the tensors. + + Returns: + WorldCollisionConfig: Initialized WorldCollisionConfig object. + """ world_cfg = world_model_dict if world_model_dict is not None: if isinstance(world_model_dict, list) and isinstance(world_model_dict[0], dict): @@ -295,7 +398,14 @@ def load_from_dict( class WorldCollision(WorldCollisionConfig): + """Base class for computing signed distance between query spheres and world obstacles.""" + def __init__(self, config: Optional[WorldCollisionConfig] = None): + """Initialize the WorldCollision object. + + Args: + config: Configuration parameters for the WorldCollision object. + """ if config is not None: WorldCollisionConfig.__init__(self, **vars(config)) self.collision_types = {} # Use this dictionary to store collision types @@ -303,8 +413,28 @@ def __init__(self, config: Optional[WorldCollisionConfig] = None): self._cache_voxelization_collision_buffer = None def load_collision_model(self, world_model: WorldConfig): + """Load the world obstacles for collision checking.""" raise NotImplementedError + def update_obstacle_pose_in_world_model(self, name: str, pose: Pose, env_idx: int = 0): + """Update the pose of an obstacle in the world model. + + Args: + name: Name of the obstacle to update. + pose: Pose to update the obstacle. + env_idx: Environment index to update the obstacle. + """ + if self.world_model is None: + return + + if isinstance(self.world_model, list): + world = self.world_model[env_idx] + else: + world = self.world_model + obstacle = world.get_obstacle(name) + if obstacle is not None: + obstacle.pose = pose.to_list() + def get_sphere_distance( self, query_sphere, @@ -316,11 +446,7 @@ def get_sphere_distance( sum_collisions: bool = True, compute_esdf: bool = False, ): - """ - Computes the signed distance via analytic function - Args: - tensor_sphere: b, n, 4 - """ + """Compute the signed distance between query spheres and world obstacles.""" raise NotImplementedError def get_sphere_collision( @@ -332,12 +458,7 @@ def get_sphere_collision( env_query_idx: Optional[torch.Tensor] = None, return_loss: bool = False, ): - """ - Computes the signed distance via analytic function - Args: - tensor_sphere: b, n, 4 - we assume we don't need gradient for this function. If you need gradient, use get_sphere_distance - """ + """Compute binary collision between query spheres and world obstacles.""" raise NotImplementedError @@ -354,6 +475,7 @@ def get_swept_sphere_distance( return_loss: bool = False, sum_collisions: bool = True, ): + """Compute the signed distance between trajectory of spheres and world obstacles.""" raise NotImplementedError def get_swept_sphere_collision( @@ -368,17 +490,7 @@ def get_swept_sphere_collision( env_query_idx: Optional[torch.Tensor] = None, return_loss: bool = False, ): - raise NotImplementedError - - def get_sphere_trace( - self, - query_sphere, - collision_query_buffer: CollisionQueryBuffer, - weight: torch.Tensor, - sweep_steps: int, - env_query_idx: Optional[torch.Tensor] = None, - return_loss: bool = False, - ): + """Compute binary collision between trajectory of spheres and world obstacles.""" raise NotImplementedError def get_voxels_in_bounding_box( @@ -386,14 +498,29 @@ def get_voxels_in_bounding_box( cuboid: Cuboid = Cuboid(name="test", pose=[0, 0, 0, 1, 0, 0, 0], dims=[1, 1, 1]), voxel_size: float = 0.02, ) -> Union[List[Cuboid], torch.Tensor]: + """Get occupied voxels in a grid bounded by the given cuboid. + + Args: + cuboid: Bounding box to get the occupied voxels. + voxel_size: Size of the voxel grid. + + Returns: + Tensor with the occupied voxels in the bounding box. + """ new_grid = self.get_occupancy_in_bounding_box(cuboid, voxel_size) occupied = new_grid.get_occupied_voxels(0.0) return occupied def clear_voxelization_cache(self): + """Clear cache that contains voxelization locations.""" self._cache_voxelization = None def update_cache_voxelization(self, new_grid: VoxelGrid): + """Update locaiton of voxels based on new grid parameters. Only for debugging. + + Args: + new_grid: New grid to use for getting voxelized occupancy of current world obstacles. + """ if ( self._cache_voxelization is None or self._cache_voxelization.voxel_size != new_grid.voxel_size @@ -418,6 +545,16 @@ def get_occupancy_in_bounding_box( cuboid: Cuboid = Cuboid(name="test", pose=[0, 0, 0, 1, 0, 0, 0], dims=[1, 1, 1]), voxel_size: float = 0.02, ) -> VoxelGrid: + """Get the occupancy of voxels in a grid bounded by the given cuboid. + + Args: + cuboid: Cuboid to get the occupancy of voxels. Provide pose and dimenstions to + create occupancy information. + voxel_size: Size in meters to use as the voxel size. + + Returns: + Grid with the occupancy of voxels in the bounding box. + """ new_grid = VoxelGrid( name=cuboid.name, dims=cuboid.dims, pose=cuboid.pose, voxel_size=voxel_size ) @@ -448,6 +585,20 @@ def get_esdf_in_bounding_box( voxel_size: float = 0.02, dtype=torch.float32, ) -> VoxelGrid: + """Get the Euclidean signed distance in a grid bounded by the given cuboid. + + Distance is positive inside obstacles and negative outside obstacles. + + Args: + cuboid: Bounding cuboid to query signed distance. + voxel_size: Size of the voxels in meters. + dtype: Data type of the feature tensor. Use :var:`torch.bfloat16` or + :var:`torch.float8_e4m3fn` for reduced memory usage. + + Returns: + Voxels with the Euclidean signed distance in the bounding box. + """ + new_grid = VoxelGrid( name=cuboid.name, dims=cuboid.dims, @@ -483,9 +634,22 @@ def get_mesh_in_bounding_box( cuboid: Cuboid = Cuboid(name="test", pose=[0, 0, 0, 1, 0, 0, 0], dims=[1, 1, 1]), voxel_size: float = 0.02, ) -> Mesh: + """Get a mesh representation of the world obstacles based on occupancy in a bounding box. + + This uses marching cubes to create a mesh representation of the world obstacles. Use this + to debug world representations. + + Args: + cuboid: Bounding box to get the mesh representation. + voxel_size: Size of the voxels in meters. + + Returns: + Mesh representation of the world obstacles in the bounding box. + """ voxels = self.get_voxels_in_bounding_box(cuboid, voxel_size) # voxels = voxels.cpu().numpy() - # cuboids = [Cuboid(name="c_"+str(x), pose=[voxels[x,0],voxels[x,1],voxels[x,2], 1,0,0,0], dims=[voxel_size, voxel_size, voxel_size]) for x in range(voxels.shape[0])] + # cuboids = [Cuboid(name="c_"+str(x), pose=[voxels[x,0],voxels[x,1],voxels[x,2], 1,0,0,0], + # dims=[voxel_size, voxel_size, voxel_size]) for x in range(voxels.shape[0])] # mesh = WorldConfig(cuboid=cuboids).get_mesh_world(True).mesh[0] mesh = Mesh.from_pointcloud( voxels[:, :3].detach().cpu().numpy(), @@ -493,11 +657,27 @@ def get_mesh_in_bounding_box( ) return mesh - def get_obstacle_names(self, env_idx: int = 0): + def get_obstacle_names(self, env_idx: int = 0) -> List[str]: + """Get the names of the obstacles in the world. + + Args: + env_idx: Environment index to get the obstacle names. + + Returns: + Obstacle names in the world. + """ return [] def check_obstacle_exists(self, name: str, env_idx: int = 0) -> bool: + """Check if an obstacle exists in the world by name. + Args: + name: Name of the obstacle to check. + env_idx: Environment index to check the obstacle. + + Returns: + True if the obstacle exists in the world, else False. + """ obstacle_names = self.get_obstacle_names(env_idx) if name in obstacle_names: @@ -507,14 +687,14 @@ def check_obstacle_exists(self, name: str, env_idx: int = 0) -> bool: class WorldPrimitiveCollision(WorldCollision): - """World Oriented Bounding Box representation object - - We represent the world with oriented bounding boxes. For speed, we assume there is a - maximum number of obbs that can be instantiated. This number is read from the WorldCollisionConfig. - If no cache is setup, we use the number from the first call of load_collision_model. - """ + """World collision checking with oriented bounding boxes (cuboids) for obstacles.""" def __init__(self, config: WorldCollisionConfig): + """Initialize the WorldPrimitiveCollision object. + + Args: + config: Configuration parameters for the WorldPrimitiveCollision object. + """ super().__init__(config) self._world_cubes = None self._cube_tensor_list = None @@ -529,17 +709,36 @@ def __init__(self, config: WorldCollisionConfig): self.load_collision_model(self.world_model) def _init_cache(self): + """Initialize obstacles cache to allow for dynamic addition of obstacles.""" if self.cache is not None and "obb" in self.cache and self.cache["obb"] not in [None, 0]: self._create_obb_cache(self.cache["obb"]) def load_collision_model( self, world_config: WorldConfig, env_idx=0, fix_cache_reference: bool = False ): + """Load world obstacles into collision checker. + + Args: + world_config: Obstacles to load into the collision checker. + env_idx: Environment index to load the obstacles. + fix_cache_reference: If True, throws error if number of obstacles is greater than + cache. If False, creates a larger cache. Note that when using collision checker + inside a recorded cuda graph, recreating the cache will break the graph as the + reference pointer to the cache will change. + """ self._load_collision_model_in_cache( world_config, env_idx, fix_cache_reference=fix_cache_reference ) - def get_obstacle_names(self, env_idx: int = 0): + def get_obstacle_names(self, env_idx: int = 0) -> List[str]: + """Get the names of the obstacles in the world. + + Args: + env_idx: Environment index to get the obstacle names. + + Returns: + Obstacle names in the world. + """ base_obstacles = super().get_obstacle_names(env_idx) return self._env_obbs_names[env_idx] + base_obstacles @@ -615,6 +814,13 @@ def load_batch_collision_model(self, world_config_list: List[WorldConfig]): def _load_collision_model_in_cache( self, world_config: WorldConfig, env_idx: int = 0, fix_cache_reference: bool = False ): + """Load world obstacles into collision checker cache. This only loads cuboids. + + Args: + world_config: World obstacles to load into the collision checker. + env_idx: Environment index to load the obstacles. + fix_cache_reference: If True, does not allow to load more obstacles than cache size. + """ cube_objs = world_config.cuboid max_obb = len(cube_objs) self.world_model = world_config @@ -646,7 +852,12 @@ def _load_collision_model_in_cache( self._env_obbs_names[env_idx][:max_obb] = names_batch self.collision_types["primitive"] = True - def _create_obb_cache(self, obb_cache): + def _create_obb_cache(self, obb_cache: int): + """Create cache for cuboid (oriented bounding box) obstacles. + + Args: + obb_cache: Number of cuboids to cache for collision checking. + """ box_dims = ( torch.zeros( (self.n_envs, obb_cache, 4), @@ -678,12 +889,18 @@ def add_obb_from_raw( env_idx: int, w_obj_pose: Optional[Pose] = None, obj_w_pose: Optional[Pose] = None, - ): - """ + ) -> int: + """Add cuboid obstacle to world. + Args: - dims: lenght, width, height - position: x,y,z - rotation: matrix (3x3) + name: Name of the obstacle. Must be unique. + dims: Dimensions of the cuboid obstacle [length, width, height]. + env_idx: Environment index to add the obstacle to. + w_obj_pose: Pose of the obstacle in world frame. + obj_w_pose: Inverse pose of the obstacle in world frame. + + Returns: + Index of the obstacle in the world. """ assert w_obj_pose is not None or obj_w_pose is not None if name in self._env_obbs_names[env_idx]: @@ -705,7 +922,16 @@ def add_obb( self, cuboid: Cuboid, env_idx: int = 0, - ): + ) -> int: + """Add cuboid obstacle to world. + + Args: + cuboid: Cuboid to add. + env_idx: Environment index to add the obstacle to. + + Returns: + Index of the obstacle in the world. + """ return self.add_obb_from_raw( cuboid.name, self.tensor_args.to_device(cuboid.dims), @@ -720,12 +946,13 @@ def update_obb_dims( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update obstacle dimensions + """Update dimensinots of an existing cuboid obstacle. Args: - obj_dims (torch.Tensor): [dim.x,dim.y, dim.z], give as [b,3] - obj_idx (torch.Tensor or int): - + obj_dims: [dim.x,dim.y, dim.z]. + name: Name of the obstacle to update. + env_obj_idx: Index of the obstacle to update. Not required if name is provided. + env_idx: Environment index to update the obstacle. """ if env_obj_idx is not None: self._cube_tensor_list[0][env_obj_idx, :3] = obj_dims @@ -741,6 +968,13 @@ def enable_obstacle( enable: bool = True, env_idx: int = 0, ): + """Enable/Disable object in collision checking functions. + + Args: + name: Name of the obstacle to enable. + enable: True to enable, False to disable. + env_idx: Index of the environment to enable the obstacle in. + """ return self.enable_obb(enable, name, None, env_idx) def enable_obb( @@ -750,12 +984,13 @@ def enable_obb( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update obstacle dimensions + """Enable/Disable cuboid in collision checking functions. Args: - obj_dims (torch.Tensor): [dim.x,dim.y, dim.z], give as [b,3] - obj_idx (torch.Tensor or int): - + enable: True to enable, False to disable. + name: Name of the obstacle to enable. + env_obj_idx: Index of the obstacle to enable. Not required if name is provided. + env_idx: Index of the environment to enable the obstacle in. """ if env_obj_idx is not None: self._cube_tensor_list[2][env_obj_idx] = int(enable) # enable == 1 @@ -770,11 +1005,28 @@ def update_obstacle_pose( name: str, w_obj_pose: Pose, env_idx: int = 0, + update_cpu_reference: bool = False, ): + """Update pose of an existing obstacle. + + Args: + name: Name of obstacle. + w_obj_pose: Pose of the obstacle in world frame. + env_idx: Index of the environment to update the obstacle in. + update_cpu_reference: If True, updates the CPU reference with the new pose. This is + useful for debugging and visualization. Only supported for env_idx=0. + """ if self._env_obbs_names is not None and name in self._env_obbs_names[env_idx]: - self.update_obb_pose(name=name, w_obj_pose=w_obj_pose, env_idx=env_idx) + self.update_obb_pose( + name=name, + w_obj_pose=w_obj_pose, + env_idx=env_idx, + ) else: - log_error("obstacle not found in OBB world model: " + name) + log_warn("obstacle not found in OBB world model: " + name) + + if update_cpu_reference: + self.update_obstacle_pose_in_world_model(name, w_obj_pose, env_idx) def update_obb_pose( self, @@ -784,11 +1036,17 @@ def update_obb_pose( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update pose of a specific objects. - This also updates the signed distance grid to account for the updated object pose. + """Update pose of an existing cuboid obstacle. + Args: - obj_w_pose: Pose - obj_idx: + w_obj_pose: Pose of the obstacle in world frame. + obj_w_pose: Inverse pose of the obstacle in world frame. Not required if w_obj_pose is + provided. + name: Name of the obstacle to update. + env_obj_idx: Index of the obstacle to update. Not required if name is provided. + env_idx: Index of the environment to update the obstacle. + update_cpu_reference: If True, updates the CPU reference with the new pose. This is + useful for debugging and visualization. Only supported for env_idx=0. """ obj_w_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) if env_obj_idx is not None: @@ -803,12 +1061,21 @@ def _get_obstacle_poses( w_obj_pose: Optional[Pose] = None, obj_w_pose: Optional[Pose] = None, ): + """Get pose of world from obstacle frame of reference. + + Args: + w_obj_pose: Pose of the obstacle in world frame. + obj_w_pose: Pose of world in obstacle frame of reference. + + Returns: + Pose of world in obstacle frame of reference. + """ if w_obj_pose is not None: w_inv_pose = w_obj_pose.inverse() elif obj_w_pose is not None: w_inv_pose = obj_w_pose else: - raise ValueError("Object pose is not given") + log_error("Object pose is not given") return w_inv_pose def get_obb_idx( @@ -816,13 +1083,22 @@ def get_obb_idx( name: str, env_idx: int = 0, ) -> int: + """Get index of the cuboid obstacle in the world. + + Args: + name: Name of the obstacle to get the index. + env_idx: Environment index to get the obstacle index. + + Returns: + Index of the obstacle in the world. + """ if name not in self._env_obbs_names[env_idx]: log_error("Obstacle with name: " + name + " not found in current world", exc_info=True) return self._env_obbs_names[env_idx].index(name) def get_sphere_distance( self, - query_sphere, + query_sphere: torch.Tensor, collision_query_buffer: CollisionQueryBuffer, weight: torch.Tensor, activation_distance: torch.Tensor, @@ -830,9 +1106,33 @@ def get_sphere_distance( return_loss=False, sum_collisions: bool = True, compute_esdf: bool = False, - ): + ) -> torch.Tensor: + """Compute the signed distance between query spheres and world obstacles. + + This distance can be used as a collision cost for optimization. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Buffer to store collision query results. + weight: Weight of the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Signed distance between query spheres and world obstacles. + """ if "primitive" not in self.collision_types or not self.collision_types["primitive"]: - raise ValueError("Primitive Collision has no obstacles") + log_error("Primitive Collision has no obstacles") b, h, n, _ = query_sphere.shape # This can be read from collision query buffer use_batch_env = True @@ -870,18 +1170,32 @@ def get_sphere_distance( def get_sphere_collision( self, - query_sphere, + query_sphere: torch.Tensor, collision_query_buffer: CollisionQueryBuffer, weight: torch.Tensor, activation_distance: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None, return_loss=False, **kwargs, - ): + ) -> torch.Tensor: + """Compute binary collision between query spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Weight to scale the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: True is not supported for binary classification. Set to False. + + Returns: + Tensor with binary collision results. + """ if "primitive" not in self.collision_types or not self.collision_types["primitive"]: - raise ValueError("Primitive Collision has no obstacles") + log_error("Primitive Collision has no obstacles") if return_loss: - raise ValueError("cannot return loss for classification, use get_sphere_distance") + log_error("cannot return loss for classification, use get_sphere_distance") b, h, n, _ = query_sphere.shape use_batch_env = True if env_query_idx is None: @@ -915,7 +1229,7 @@ def get_sphere_collision( def get_swept_sphere_distance( self, - query_sphere, + query_sphere: torch.Tensor, collision_query_buffer: CollisionQueryBuffer, weight: torch.Tensor, activation_distance: torch.Tensor, @@ -925,14 +1239,38 @@ def get_swept_sphere_distance( env_query_idx: Optional[torch.Tensor] = None, return_loss=False, sum_collisions: bool = True, - ): - """ - Computes the signed distance via analytic function + ) -> torch.Tensor: + """Compute the signed distance between trajectory of spheres and world obstacles. + Args: - tensor_sphere: b, n, 4 + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + + Returns: + Collision cost between trajectory of spheres and world obstacles. """ + if "primitive" not in self.collision_types or not self.collision_types["primitive"]: - raise ValueError("Primitive Collision has no obstacles") + log_error("Primitive Collision has no obstacles") b, h, n, _ = query_sphere.shape use_batch_env = True @@ -971,7 +1309,7 @@ def get_swept_sphere_distance( def get_swept_sphere_collision( self, - query_sphere, + query_sphere: torch.Tensor, collision_query_buffer: CollisionQueryBuffer, weight: torch.Tensor, activation_distance: torch.Tensor, @@ -980,16 +1318,35 @@ def get_swept_sphere_collision( enable_speed_metric=False, env_query_idx: Optional[torch.Tensor] = None, return_loss=False, - ): - """ - Computes the signed distance via analytic function + ) -> torch.Tensor: + """Get binary collision between trajectory of spheres and world obstacles. + Args: - tensor_sphere: b, n, 4 + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. This is not used. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. This is not used. + env_query_idx: Environment index for each batch of query spheres. + return_loss: This is not supported for binary classification. Set to False. + + Returns: + Collision value between trajectory of spheres and world obstacles. """ + if "primitive" not in self.collision_types or not self.collision_types["primitive"]: - raise ValueError("Primitive Collision has no obstacles") + log_error("Primitive Collision has no obstacles") if return_loss: - raise ValueError("cannot return loss for classify, use get_swept_sphere_distance") + log_error("cannot return loss for classify, use get_swept_sphere_distance") b, h, n, _ = query_sphere.shape use_batch_env = True @@ -1025,6 +1382,7 @@ def get_swept_sphere_collision( return dist def clear_cache(self): + """Delete all cuboid obstacles from the world.""" if self._cube_tensor_list is not None: self._cube_tensor_list[2][:] = 0 self._env_n_obbs[:] = 0 diff --git a/src/curobo/geom/sdf/world_blox.py b/src/curobo/geom/sdf/world_blox.py index 978b70cb..d737faa3 100644 --- a/src/curobo/geom/sdf/world_blox.py +++ b/src/curobo/geom/sdf/world_blox.py @@ -8,7 +8,7 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # - +"""World represented by ESDF layers of nvblox.""" # Standard Library from typing import List, Optional @@ -36,19 +36,17 @@ class WorldBloxCollision(WorldVoxelCollision): """World Collision Representaiton using Nvidia's nvblox library. - This class depends on pytorch wrapper for nvblox. - Additionally, this representation does not support batched environments as we only store - one world via nvblox. + This class depends on pytorch wrapper for nvblox. Additionally, this representation does not + support batched environments as we only store one world via nvblox. There are two ways to use nvblox, one is by loading maps from disk and the other is by - creating maps online. In both these instances, we might load more than one map and need to check - collisions against all maps. - - To facilitate online map creation and updation, we build apis in this - class that provide. + creating maps online. In both these instances, we might load more than one map and need to + check collisions against all maps.To facilitate online map creation and updation, we build apis + in this class to process depth images. """ def __init__(self, config: WorldCollisionConfig): + """Initialize with a world collision configuration.""" self._pose_offset = None self._blox_mapper = None self._blox_tensor_list = None @@ -56,6 +54,15 @@ def __init__(self, config: WorldCollisionConfig): super().__init__(config) def load_collision_model(self, world_model: WorldConfig, fix_cache_reference: bool = False): + """Load collision model from world obstacles. Only 1 environment is supported. + + Args: + world_model: Obstacles in world to load. + fix_cache_reference: If True, throws error if number of obstacles is greater than + cache. If False, creates a larger cache. Note that when using collision checker + inside a recorded cuda graph, recreating the cache will break the graph as the + reference pointer to the cache will change. + """ # load nvblox mesh if len(world_model.blox) > 0: # check if there is a mapper instance: @@ -109,70 +116,20 @@ def load_collision_model(self, world_model: WorldConfig, fix_cache_reference: bo self._blox_names = names self.collision_types["blox"] = True - return super().load_collision_model(world_model, fix_cache_reference=fix_cache_reference) + super().load_collision_model(world_model, fix_cache_reference=fix_cache_reference) def clear_cache(self): + """Clear obstacle cache, clears nvblox maps and other obstacles.""" self._blox_mapper.clear() self._blox_mapper.update_hashmaps() super().clear_cache() def clear_blox_layer(self, layer_name: str): + """Clear a specific blox layer.""" index = self._blox_names.index(layer_name) self._blox_mapper.clear(index) self._blox_mapper.update_hashmaps() - def _get_blox_sdf( - self, - query_spheres, - collision_query_buffer: CollisionQueryBuffer, - weight, - activation_distance, - return_loss: bool = False, - compute_esdf: bool = False, - ): - d = self._blox_mapper.query_sphere_sdf_cost( - query_spheres, - collision_query_buffer.blox_collision_buffer.distance_buffer, - collision_query_buffer.blox_collision_buffer.grad_distance_buffer, - collision_query_buffer.blox_collision_buffer.sparsity_index_buffer, - weight, - activation_distance, - self.max_esdf_distance, - self._blox_tensor_list[0], - self._blox_tensor_list[1], - return_loss, - compute_esdf, - ) - return d - - def _get_blox_swept_sdf( - self, - query_spheres, - collision_query_buffer, - weight, - activation_distance, - speed_dt, - sweep_steps, - enable_speed_metric, - return_loss=False, - ): - d = self._blox_mapper.query_sphere_trajectory_sdf_cost( - query_spheres, - collision_query_buffer.blox_collision_buffer.distance_buffer, - collision_query_buffer.blox_collision_buffer.grad_distance_buffer, - collision_query_buffer.blox_collision_buffer.sparsity_index_buffer, - weight, - activation_distance, - speed_dt, - self._blox_tensor_list[0], - self._blox_tensor_list[1], - sweep_steps, - enable_speed_metric, - return_loss, - use_experimental=False, - ) - return d - def get_sphere_distance( self, query_sphere: torch.Tensor, @@ -183,7 +140,31 @@ def get_sphere_distance( return_loss: bool = False, sum_collisions: bool = True, compute_esdf: bool = False, - ): + ) -> torch.Tensor: + """Compute the signed distance between query spheres and world obstacles. + + This distance can be used as a collision cost for optimization. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Buffer to store collision query results. + weight: Weight of the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Signed distance between query spheres and world obstacles. + """ if "blox" not in self.collision_types or not self.collision_types["blox"]: return super().get_sphere_distance( query_sphere, @@ -234,7 +215,21 @@ def get_sphere_collision( env_query_idx=None, return_loss: bool = False, **kwargs, - ): + ) -> torch.Tensor: + """Compute binary collision between query spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Weight to scale the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: True is not supported for binary classification. Set to False. + + Returns: + Tensor with binary collision results. + """ if "blox" not in self.collision_types or not self.collision_types["blox"]: return super().get_sphere_collision( query_sphere, @@ -280,7 +275,35 @@ def get_swept_sphere_distance( env_query_idx: Optional[torch.Tensor] = None, return_loss: bool = False, sum_collisions: bool = True, - ): + ) -> torch.Tensor: + """Compute the signed distance between trajectory of spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + + Returns: + Collision cost between trajectory of spheres and world obstacles. + """ if "blox" not in self.collision_types or not self.collision_types["blox"]: return super().get_swept_sphere_distance( query_sphere, @@ -337,7 +360,30 @@ def get_swept_sphere_collision( enable_speed_metric=False, env_query_idx: Optional[torch.Tensor] = None, return_loss: bool = False, - ): + ) -> torch.Tensor: + """Get binary collision between trajectory of spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. This is not used. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. This is not used. + env_query_idx: Environment index for each batch of query spheres. + return_loss: This is not supported for binary classification. Set to False. + + Returns: + Collision value between trajectory of spheres and world obstacles. + """ if "blox" not in self.collision_types or not self.collision_types["blox"]: return super().get_swept_sphere_collision( query_sphere, @@ -385,12 +431,25 @@ def enable_obstacle( enable: bool = True, env_idx: int = 0, ): + """Enable/Disable object in collision checking functions. + + Args: + name: Name of the obstacle to enable. + enable: True to enable, False to disable. + env_idx: Index of the environment to enable the obstacle in. + """ if self._blox_names is not None and name in self._blox_names: self.enable_blox(enable, name) else: super().enable_obstacle(name, enable, env_idx) def enable_blox(self, enable: bool = True, name: Optional[str] = None): + """Enable/Disable nvblox layer for collision checking. + + Args: + enable: True to enable, False to disable. + name: Name of the nvblox layer to enable. + """ index = self._blox_names.index(name) self._blox_tensor_list[1][index] = int(enable) @@ -400,6 +459,13 @@ def update_blox_pose( obj_w_pose: Optional[Pose] = None, name: Optional[str] = None, ): + """Update pose of a nvblox layer. + + Args: + w_obj_pose: Pose of layer in world frame. + obj_w_pose: Inverse pose of layer. If w_obj_pose is provided, this is not required. + name: Name of the nvblox layer to update. + """ obj_w_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) index = self._blox_names.index(name) self._blox_tensor_list[0][index][:7] = obj_w_pose.get_pose_vector() @@ -409,6 +475,12 @@ def clear_bounding_box( cuboid: Cuboid, layer_name: Optional[str] = None, ): + """Clear occupied regions of a nvblox layer. Not implemented. + + Args: + cuboid: Bounding box to clear. + layer_name: Name of nvblox layer. + """ log_error("Not implemented") def get_bounding_spheres( @@ -421,10 +493,34 @@ def get_bounding_spheres( voxelize_method: str = "ray", pre_transform_pose: Optional[Pose] = None, clear_region: bool = False, + clear_region_layer: Optional[str] = None, ) -> List[Sphere]: + """Get spheres within a bounding box. + + Args: + bounding_box: Bounding box to find occupied region. + obstacle_name: Name to use for created occupied region. Not useful, use any random + name. + n_spheres: Number of spheres to use for approximating the occupied region. + surface_sphere_radius: Radius to use for surface spheres. + fit_type: Sphere fit algorithm to use. See :ref:`attach_object_note` for more + details. The default method :attr:`SphereFitType.VOXEL_VOLUME_SAMPLE_SURFACE` + voxelizes the volume of the objects and adds spheres representing the voxels, then + samples points on the surface of the object, adds :attr:`surface_sphere_radius` to + these points. This should be used for most cases. + voxelize_method: Method to use for voxelization, passed to + :py:func:`trimesh.voxel.creation.voxelize`. + + pre_transform_pose: Optional pose to transform the bounding box before finding spheres. + clear_region: Clear region in nvblox layer. Not supported. + clear_region_layer: Layer of nvblox to clear region. + + Returns: + Spheres approximating occupied region. + """ mesh = self.get_mesh_in_bounding_box(bounding_box, obstacle_name, clear_region=clear_region) if clear_region: - self.clear_bounding_box(bounding_box, obstacle_name) + self.clear_bounding_box(bounding_box, clear_region_layer) spheres = mesh.get_bounding_spheres( n_spheres=n_spheres, surface_sphere_radius=surface_sphere_radius, @@ -437,6 +533,12 @@ def get_bounding_spheres( @profiler.record_function("world_blox/add_camera_frame") def add_camera_frame(self, camera_observation: CameraObservation, layer_name: str): + """Add a camera image to nvblox layer. + + Args: + camera_observation: New image to add to nvblox layer. + layer_name: Name of nvblox layer. + """ index = self._blox_names.index(layer_name) pose_mat = camera_observation.pose.get_matrix().view(4, 4) if camera_observation.rgb_image is not None: @@ -460,16 +562,29 @@ def add_camera_frame(self, camera_observation: CameraObservation, layer_name: st ) def process_camera_frames(self, layer_name: Optional[str] = None, process_aux: bool = False): + """Integrate ESDF data from camera frames into nvblox layer. + + Args: + layer_name: Name of nvblox layer. If None, all layers are processed. + process_aux: Process color frames, useful for visualization. + """ self.update_blox_esdf(layer_name) if process_aux: self.update_blox_mesh(layer_name) @profiler.record_function("world_blox/update_hashes") def update_blox_hashes(self): + """Update hashmaps for nvblox layers. Required after processing camera frames.""" self._blox_mapper.update_hashmaps() @profiler.record_function("world_blox/update_esdf") def update_blox_esdf(self, layer_name: Optional[str] = None): + """Integrate ESDF data from camera frames into nvblox layer. + + Args: + layer_name: Name of nvblox layer. If None, all layers are processed. + """ + index = -1 if layer_name is not None: index = self._blox_names.index(layer_name) @@ -477,6 +592,11 @@ def update_blox_esdf(self, layer_name: Optional[str] = None): @profiler.record_function("world_blox/update_mesh") def update_blox_mesh(self, layer_name: Optional[str] = None): + """Update mesh data for nvblox layer. Requires RGB data. + + Args: + layer_name: Name of nvblox layer. If None, all layers are processed. + """ index = -1 if layer_name is not None: index = self._blox_names.index(layer_name) @@ -484,6 +604,17 @@ def update_blox_mesh(self, layer_name: Optional[str] = None): @profiler.record_function("world_blox/get_mesh") def get_mesh_from_blox_layer(self, layer_name: str, mode: str = "nvblox") -> Mesh: + """Get Mesh from nvblox layer. + + Args: + layer_name: Name of nvblox layer. + mode: If mode is "nvblox", mesh is generated using nvblox's internal mesh construction + method. This relies on RGB data from camera frames. If mode is "voxel", mesh is + generated using occupancy. + + Returns: + Mesh object. + """ index = self._blox_names.index(layer_name) if mode == "nvblox": mesh_data = self._blox_mapper.get_mesh(index) @@ -504,14 +635,133 @@ def get_mesh_from_blox_layer(self, layer_name: str, mode: str = "nvblox") -> Mes return mesh def save_layer(self, layer_name: str, file_name: str) -> bool: + """Save nvblox layer to disk. + + Args: + layer_name: Name of nvblox layer. + file_name: File path to save layer. + + Returns: + True if successful, False otherwise. + """ index = self._blox_names.index(layer_name) status = self._blox_mapper.save_map(file_name, index) return status def decay_layer(self, layer_name: str): + """Decay nvblox layer. This will remove any stale voxels in the layer. + + Args: + layer_name: Name of nvblox layer to decay. + """ index = self._blox_names.index(layer_name) self._blox_mapper.decay_occupancy(mapper_id=index) - def get_obstacle_names(self, env_idx: int = 0): + def get_obstacle_names(self, env_idx: int = 0) -> List[str]: + """Get names of all obstacles in the environment. + + Args: + env_idx: Environment index to get obstacles from. + + Returns: + List of obstacle names. + """ base_obstacles = super().get_obstacle_names(env_idx) return self._blox_names + base_obstacles + + def _get_blox_sdf( + self, + query_spheres: torch.Tensor, + collision_query_buffer: CollisionQueryBuffer, + weight: torch.Tensor, + activation_distance: torch.Tensor, + return_loss: bool = False, + compute_esdf: bool = False, + ) -> torch.Tensor: + """Compute the signed distance between query spheres and world obstacles. + + This distance can be used as a collision cost for optimization. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Buffer to store collision query results. + weight: Weight of the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Signed distance between query spheres and world obstacles. + """ + d = self._blox_mapper.query_sphere_sdf_cost( + query_spheres, + collision_query_buffer.blox_collision_buffer.distance_buffer, + collision_query_buffer.blox_collision_buffer.grad_distance_buffer, + collision_query_buffer.blox_collision_buffer.sparsity_index_buffer, + weight, + activation_distance, + self.max_esdf_distance, + self._blox_tensor_list[0], + self._blox_tensor_list[1], + return_loss, + compute_esdf, + ) + return d + + def _get_blox_swept_sdf( + self, + query_spheres: torch.Tensor, + collision_query_buffer: CollisionQueryBuffer, + weight: torch.Tensor, + activation_distance: torch.Tensor, + speed_dt: torch.Tensor, + sweep_steps: int, + enable_speed_metric: bool, + return_loss: bool = False, + ) -> torch.Tensor: + """Compute the signed distance between trajectory of spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + + Returns: + Collision cost between trajectory of spheres and world obstacles. + """ + d = self._blox_mapper.query_sphere_trajectory_sdf_cost( + query_spheres, + collision_query_buffer.blox_collision_buffer.distance_buffer, + collision_query_buffer.blox_collision_buffer.grad_distance_buffer, + collision_query_buffer.blox_collision_buffer.sparsity_index_buffer, + weight, + activation_distance, + speed_dt, + self._blox_tensor_list[0], + self._blox_tensor_list[1], + sweep_steps, + enable_speed_metric, + return_loss, + use_experimental=False, + ) + return d diff --git a/src/curobo/geom/sdf/world_mesh.py b/src/curobo/geom/sdf/world_mesh.py index e343e304..4cb9e204 100644 --- a/src/curobo/geom/sdf/world_mesh.py +++ b/src/curobo/geom/sdf/world_mesh.py @@ -8,6 +8,7 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""World represented as Meshes can be used with this module for collision checking.""" # Standard Library from dataclasses import dataclass @@ -33,22 +34,29 @@ @dataclass(frozen=True) class WarpMeshData: + """Data helper to use with warp for representing a mesh""" + + #: Name of the mesh. name: str + + #: Mesh ID, created by warp once mesh is loaded to device. m_id: int + + #: Vertices of the mesh. vertices: wp.array + + #: Faces of the mesh. faces: wp.array + + #: Warp mesh instance. mesh: wp.Mesh class WorldMeshCollision(WorldPrimitiveCollision): - """World Mesh Collision using Nvidia's warp library - - This currently requires passing int64 array from torch to warp which is only - available when compiled from source. - """ + """World Mesh Collision using Nvidia's warp library.""" def __init__(self, config: WorldCollisionConfig): - # WorldCollision.(self) + """Initialize World Mesh Collision with given configuration.""" init_warp() self.tensor_args = config.tensor_args @@ -62,6 +70,7 @@ def __init__(self, config: WorldCollisionConfig): super().__init__(config) def _init_cache(self): + """Initialize cache for storing meshes.""" if ( self.cache is not None and "mesh" in self.cache @@ -78,6 +87,17 @@ def load_collision_model( load_obb_obs: bool = True, fix_cache_reference: bool = False, ): + """Load world obstacles into collision checker. + + Args: + world_model: Obstacles to load. + env_idx: Environment index to load obstacles into. + load_obb_obs: Load OBB (cuboid) obstacles. + fix_cache_reference: If True, throws error if number of obstacles is greater than + cache. If False, creates a larger cache. Note that when using collision checker + inside a recorded cuda graph, recreating the cache will break the graph as the + reference pointer to the cache will change. + """ max_nmesh = len(world_model.mesh) if max_nmesh > 0: if self._mesh_tensor_list is None or self._mesh_tensor_list[0].shape[1] < max_nmesh: @@ -103,6 +123,11 @@ def load_collision_model( self.world_model = world_model def load_batch_collision_model(self, world_config_list: List[WorldConfig]): + """Load multiple world obstacles into collision checker across environments. + + Args: + world_config_list: List of obstacles to load. + """ max_nmesh = max([len(x.mesh) for x in world_config_list]) if self._mesh_tensor_list is None or self._mesh_tensor_list[0].shape[1] < max_nmesh: log_warn("Creating new Mesh cache: " + str(max_nmesh)) @@ -112,7 +137,15 @@ def load_batch_collision_model(self, world_config_list: List[WorldConfig]): self.load_collision_model(world_model, env_idx=env_idx, load_obb_obs=False) super().load_batch_collision_model(world_config_list) - def _load_mesh_to_warp(self, mesh: Mesh): + def _load_mesh_to_warp(self, mesh: Mesh) -> WarpMeshData: + """Load cuRobo mesh into warp. + + Args: + mesh: mesh instance to load. + + Returns: + loaded mesh data. + """ verts, faces = mesh.get_mesh_data() v = wp.array(verts, dtype=wp.vec3, device=self._wp_device) f = wp.array(np.ravel(faces), dtype=int, device=self._wp_device) @@ -120,7 +153,14 @@ def _load_mesh_to_warp(self, mesh: Mesh): return WarpMeshData(mesh.name, new_mesh.id, v, f, new_mesh) def _load_mesh_into_cache(self, mesh: Mesh) -> WarpMeshData: - # + """Load cuRobo mesh into cache. + + Args: + mesh: mesh instance to load. + + Returns: + loaded mesh data. + """ if mesh.name not in self._wp_mesh_cache: # load mesh into cache: self._wp_mesh_cache[mesh.name] = self._load_mesh_to_warp(mesh) @@ -129,7 +169,15 @@ def _load_mesh_into_cache(self, mesh: Mesh) -> WarpMeshData: log_warn("Object already in warp cache, using existing instance for: " + mesh.name) return self._wp_mesh_cache[mesh.name] - def _load_batch_mesh_to_warp(self, mesh_list: List[Mesh]): + def _load_batch_mesh_to_warp(self, mesh_list: List[Mesh]) -> List: + """Load multiple meshes into warp. + + Args: + mesh_list: List of meshes to load. + + Returns: + List of mesh names, mesh ids, and inverse poses. + """ # First load all verts and faces: name_list = [] pose_list = [] @@ -145,6 +193,12 @@ def _load_batch_mesh_to_warp(self, mesh_list: List[Mesh]): return name_list, id_list, inv_pose_buffer.get_pose_vector() def add_mesh(self, new_mesh: Mesh, env_idx: int = 0): + """Add a mesh to the world. + + Args: + new_mesh: Mesh to add. + env_idx: Environment index to add mesh to. + """ if self._env_n_mesh[env_idx] >= self._mesh_tensor_list[0].shape[1]: log_error( "Cannot add new mesh as we are at mesh cache limit, increase cache limit in WorldMeshCollision" @@ -169,11 +223,32 @@ def get_mesh_idx( name: str, env_idx: int = 0, ) -> int: + """Get index of mesh with given name. + + Args: + name: Name of the mesh. + env_idx: Environment index to search in. + + Returns: + Mesh index. + """ if name not in self._env_mesh_names[env_idx]: log_error("Obstacle with name: " + name + " not found in current world", exc_info=True) return self._env_mesh_names[env_idx].index(name) - def create_collision_cache(self, mesh_cache=None, obb_cache=None, n_envs=None): + def create_collision_cache( + self, + mesh_cache: Optional[int] = None, + obb_cache: Optional[int] = None, + n_envs: Optional[int] = None, + ): + """Create cache of obstacles. + + Args: + mesh_cache: Number of mesh obstacles to cache. + obb_cache: Number of OBB (cuboid) obstacles to cache. + n_envs: Number of environments to cache. + """ if n_envs is not None: self.n_envs = n_envs if mesh_cache is not None: @@ -181,7 +256,12 @@ def create_collision_cache(self, mesh_cache=None, obb_cache=None, n_envs=None): if obb_cache is not None: self._create_obb_cache(obb_cache) - def _create_mesh_cache(self, mesh_cache): + def _create_mesh_cache(self, mesh_cache: int): + """Create cache for mesh obstacles. + + Args: + mesh_cache: Number of mesh obstacles to cache. + """ # create cache to store meshes, mesh poses and inverse poses self._env_n_mesh = torch.zeros( @@ -199,9 +279,7 @@ def _create_mesh_cache(self, mesh_cache): obs_ids = torch.zeros( (self.n_envs, mesh_cache), device=self.tensor_args.device, dtype=torch.int64 ) - # v_empty = [[None for _ in range(mesh_cache)] for _ in range(self.n_envs)] - # @f_empty = [[None for _ in range(mesh_cache)] for _ in range(self.n_envs)] - # wp_m_empty = [[None for _ in range(mesh_cache)] for _ in range(self.n_envs)] + # warp requires uint64 for mesh indices, supports conversion from int64 to uint64 self._mesh_tensor_list = [ obs_ids, @@ -221,6 +299,15 @@ def update_mesh_pose( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): + """Update pose of mesh. + + Args: + w_obj_pose: Pose of the mesh in world frame. + obj_w_pose: Inverse pose of the mesh. + name: Name of mesh to update. + env_obj_idx: Index of mesh in environment. If name is given, this is ignored. + env_idx: Environment index to update mesh in. + """ w_inv_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) if name is not None: @@ -229,53 +316,7 @@ def update_mesh_pose( elif env_obj_idx is not None: self._mesh_tensor_list[1][env_idx, env_obj_idx, :7] = w_inv_pose.get_pose_vector() else: - raise ValueError("name or env_obj_idx needs to be given to update mesh pose") - - def update_all_mesh_pose( - self, - w_obj_pose: Optional[Pose] = None, - obj_w_pose: Optional[Pose] = None, - name: Optional[List[str]] = None, - env_obj_idx: Optional[torch.Tensor] = None, - env_idx: int = 0, - ): - """Update poses for a list of meshes in the same environment - - Args: - w_obj_pose (Optional[Pose], optional): _description_. Defaults to None. - obj_w_pose (Optional[Pose], optional): _description_. Defaults to None. - name (Optional[List[str]], optional): _description_. Defaults to None. - env_obj_idx (Optional[torch.Tensor], optional): _description_. Defaults to None. - env_idx (int, optional): _description_. Defaults to 0. - """ - w_inv_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) - raise NotImplementedError - - def update_mesh_pose_env( - self, - w_obj_pose: Optional[Pose] = None, - obj_w_pose: Optional[Pose] = None, - name: Optional[str] = None, - env_obj_idx: Optional[torch.Tensor] = None, - env_idx: List[int] = [0], - ): - """Update pose of a single object in a list of environments - - Args: - w_obj_pose (Optional[Pose], optional): _description_. Defaults to None. - obj_w_pose (Optional[Pose], optional): _description_. Defaults to None. - name (Optional[List[str]], optional): _description_. Defaults to None. - env_obj_idx (Optional[torch.Tensor], optional): _description_. Defaults to None. - env_idx (List[int], optional): _description_. Defaults to [0]. - """ - w_inv_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) - # collect index of mesh across environments: - # index_tensor = torch.zeros((1, len(env_idx)), dtype=torch.long, device=self.tensor_args.device) - - # for i, e in enumerate[env_idx]: - # index_tensor[0,i] = self.get_mesh_idx(name, e) - raise NotImplementedError - # self._mesh_tensor_list[1][env_idx, obj_idx] + log_error("name or env_obj_idx needs to be given to update mesh pose") def update_mesh_from_warp( self, @@ -286,6 +327,16 @@ def update_mesh_from_warp( env_idx: int = 0, name: Optional[str] = None, ): + """Update mesh information in world cache. + + Args: + warp_mesh_idx: New warp mesh index. + w_obj_pose: Pose of the mesh in world frame. + obj_w_pose: Invserse pose of the mesh. Not required if w_obj_pose is given. + obj_idx: Index of mesh in environment. If name is given, this is ignored. + env_idx: Environment index to update mesh in. + name: Name of mesh to update. + """ if name is not None: obj_idx = self.get_mesh_idx(name, env_idx) @@ -305,13 +356,28 @@ def update_obstacle_pose( name: str, w_obj_pose: Pose, env_idx: int = 0, + update_cpu_reference: bool = False, ): + """Update pose of obstacle. + + Args: + name: Name of the obstacle. + w_obj_pose: Pose of obstacle in world frame. + env_idx: Environment index to update obstacle in. + update_cpu_reference: If True, updates the CPU reference with the new pose. This is + useful for debugging and visualization. Only supported for env_idx=0. + """ if self._env_mesh_names is not None and name in self._env_mesh_names[env_idx]: self.update_mesh_pose(name=name, w_obj_pose=w_obj_pose, env_idx=env_idx) - elif self._env_obbs_names is not None and name in self._env_obbs_names[env_idx]: - self.update_obb_pose(name=name, w_obj_pose=w_obj_pose, env_idx=env_idx) + if update_cpu_reference: + self.update_obstacle_pose_in_world_model(name, w_obj_pose, env_idx) else: - log_error("obstacle not found in OBB world model: " + name) + super().update_obstacle_pose( + name=name, + w_obj_pose=w_obj_pose, + env_idx=env_idx, + update_cpu_reference=update_cpu_reference, + ) def enable_obstacle( self, @@ -319,6 +385,13 @@ def enable_obstacle( enable: bool = True, env_idx: int = 0, ): + """Enable/Disable object in collision checking functions. + + Args: + name: Name of the obstacle to enable. + enable: True to enable, False to disable. + env_idx: Index of the environment to enable the obstacle in. + """ if self._env_mesh_names is not None and name in self._env_mesh_names[env_idx]: self.enable_mesh(enable, name, None, env_idx) elif self._env_obbs_names is not None and name in self._env_obbs_names[env_idx]: @@ -327,7 +400,15 @@ def enable_obstacle( log_error("Obstacle not found in world model: " + name) self.world_model.objects - def get_obstacle_names(self, env_idx: int = 0): + def get_obstacle_names(self, env_idx: int = 0) -> List[str]: + """Get names of all obstacles in the environment. + + Args: + env_idx: Environment index to get obstacles from. + + Returns: + List of obstacle names. + """ base_obstacles = super().get_obstacle_names(env_idx) return self._env_mesh_names[env_idx] + base_obstacles @@ -338,12 +419,13 @@ def enable_mesh( env_mesh_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update obstacle dimensions + """Enable/Disable mesh in collision checking functions. Args: - obj_dims (torch.Tensor): [dim.x,dim.y, dim.z], give as [b,3] - obj_idx (torch.Tensor or int): - + enable: True to enable, False to disable. + name: Name of the mesh to enable. + env_mesh_idx: Index of the mesh in environment. If name is given, this is ignored. + env_idx: Environment index to enable the mesh in. """ if env_mesh_idx is not None: self._mesh_tensor_list[2][env_mesh_idx] = int(enable) # enable == 1 @@ -352,66 +434,6 @@ def enable_mesh( obs_idx = self.get_mesh_idx(name, env_idx) self._mesh_tensor_list[2][env_idx, obs_idx] = int(enable) - def _get_sdf( - self, - query_spheres, - collision_query_buffer: CollisionQueryBuffer, - weight: torch.Tensor, - activation_distance: torch.Tensor, - env_query_idx=None, - return_loss=False, - compute_esdf=False, - ): - d = SdfMeshWarpPy.apply( - query_spheres, - collision_query_buffer.mesh_collision_buffer.distance_buffer, - collision_query_buffer.mesh_collision_buffer.grad_distance_buffer, - collision_query_buffer.mesh_collision_buffer.sparsity_index_buffer, - weight, - activation_distance, - self._mesh_tensor_list[0], - self._mesh_tensor_list[1], - self._mesh_tensor_list[2], - self._env_n_mesh, - self.max_distance, - env_query_idx, - return_loss, - compute_esdf, - ) - return d - - def _get_swept_sdf( - self, - query_spheres, - collision_query_buffer: CollisionQueryBuffer, - weight: torch.Tensor, - activation_distance: torch.Tensor, - speed_dt: torch.Tensor, - sweep_steps: int, - enable_speed_metric=False, - env_query_idx=None, - return_loss: bool = False, - ): - d = SweptSdfMeshWarpPy.apply( - query_spheres, - collision_query_buffer.mesh_collision_buffer.distance_buffer, - collision_query_buffer.mesh_collision_buffer.grad_distance_buffer, - collision_query_buffer.mesh_collision_buffer.sparsity_index_buffer, - weight, - activation_distance, - speed_dt, - self._mesh_tensor_list[0], - self._mesh_tensor_list[1], - self._mesh_tensor_list[2], - self._env_n_mesh, - self.max_distance, - sweep_steps, - enable_speed_metric, - env_query_idx, - return_loss, - ) - return d - def get_sphere_distance( self, query_sphere: torch.Tensor, @@ -423,7 +445,30 @@ def get_sphere_distance( sum_collisions: bool = True, compute_esdf: bool = False, ): - # TODO: if no mesh object exist, call primitive + """Compute the signed distance between query spheres and world obstacles. + + This distance can be used as a collision cost for optimization. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Buffer to store collision query results. + weight: Weight of the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Signed distance between query spheres and world obstacles. + """ if "mesh" not in self.collision_types or not self.collision_types["mesh"]: return super().get_sphere_distance( query_sphere, @@ -474,6 +519,20 @@ def get_sphere_collision( return_loss=False, **kwargs, ): + """Compute binary collision between query spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Weight to scale the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: True is not supported for binary classification. Set to False. + + Returns: + Tensor with binary collision results. + """ if "mesh" not in self.collision_types or not self.collision_types["mesh"]: return super().get_sphere_collision( query_sphere, @@ -521,6 +580,34 @@ def get_swept_sphere_distance( return_loss: bool = False, sum_collisions: bool = True, ): + """Compute the signed distance between trajectory of spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + + Returns: + Collision cost between trajectory of spheres and world obstacles. + """ # log_warn("Swept: Mesh + Primitive Collision Checking is experimental") if "mesh" not in self.collision_types or not self.collision_types["mesh"]: return super().get_swept_sphere_distance( @@ -578,6 +665,29 @@ def get_swept_sphere_collision( env_query_idx: Optional[torch.Tensor] = None, return_loss: bool = False, ): + """Get binary collision between trajectory of spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. This is not used. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. This is not used. + env_query_idx: Environment index for each batch of query spheres. + return_loss: This is not supported for binary classification. Set to False. + + Returns: + Collision value between trajectory of spheres and world obstacles. + """ if "mesh" not in self.collision_types or not self.collision_types["mesh"]: return super().get_swept_sphere_collision( query_sphere, @@ -620,6 +730,7 @@ def get_swept_sphere_collision( return d_val def clear_cache(self): + """Delete all cuboid and mesh obstacles from the world.""" self._wp_mesh_cache = {} if self._mesh_tensor_list is not None: self._mesh_tensor_list[2][:] = 0 @@ -631,3 +742,107 @@ def clear_cache(self): ] super().clear_cache() + + def _get_sdf( + self, + query_spheres: torch.Tensor, + collision_query_buffer: CollisionQueryBuffer, + weight: torch.Tensor, + activation_distance: torch.Tensor, + env_query_idx: Optional[torch.Tensor] = None, + return_loss: bool = False, + compute_esdf: bool = False, + ) -> torch.Tensor: + """Compute signed distance for mesh obstacles using warp kernel. + + Args: + query_spheres: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Weight to scale the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Collision cost between query spheres and world obstacles. + """ + d = SdfMeshWarpPy.apply( + query_spheres, + collision_query_buffer.mesh_collision_buffer.distance_buffer, + collision_query_buffer.mesh_collision_buffer.grad_distance_buffer, + collision_query_buffer.mesh_collision_buffer.sparsity_index_buffer, + weight, + activation_distance, + self._mesh_tensor_list[0], + self._mesh_tensor_list[1], + self._mesh_tensor_list[2], + self._env_n_mesh, + self.max_distance, + env_query_idx, + return_loss, + compute_esdf, + ) + return d + + def _get_swept_sdf( + self, + query_spheres: torch.Tensor, + collision_query_buffer: CollisionQueryBuffer, + weight: torch.Tensor, + activation_distance: torch.Tensor, + speed_dt: torch.Tensor, + sweep_steps: int, + enable_speed_metric=False, + env_query_idx=None, + return_loss: bool = False, + ) -> torch.Tensor: + """Compute signed distance for mesh obstacles using warp kernel. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + + Returns: + Collision cost between trajectory of spheres and world obstacles. + """ + d = SweptSdfMeshWarpPy.apply( + query_spheres, + collision_query_buffer.mesh_collision_buffer.distance_buffer, + collision_query_buffer.mesh_collision_buffer.grad_distance_buffer, + collision_query_buffer.mesh_collision_buffer.sparsity_index_buffer, + weight, + activation_distance, + speed_dt, + self._mesh_tensor_list[0], + self._mesh_tensor_list[1], + self._mesh_tensor_list[2], + self._env_n_mesh, + self.max_distance, + sweep_steps, + enable_speed_metric, + env_query_idx, + return_loss, + ) + return d diff --git a/src/curobo/geom/sdf/world_voxel.py b/src/curobo/geom/sdf/world_voxel.py index 973d0eb3..11d55545 100644 --- a/src/curobo/geom/sdf/world_voxel.py +++ b/src/curobo/geom/sdf/world_voxel.py @@ -8,6 +8,8 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""World represented by euclidean signed distance grids.""" + # Standard Library import math from typing import Any, Dict, List, Optional @@ -29,6 +31,7 @@ class WorldVoxelCollision(WorldMeshCollision): """Voxel grid representation of World, with each voxel containing Euclidean Signed Distance.""" def __init__(self, config: WorldCollisionConfig): + """Initialize with a world collision configuration.""" self._env_n_voxels = None self._voxel_tensor_list = None self._env_voxel_names = None @@ -36,6 +39,7 @@ def __init__(self, config: WorldCollisionConfig): super().__init__(config) def _init_cache(self): + """Initialize the cache for the world.""" if ( self.cache is not None and "voxel" in self.cache @@ -45,6 +49,14 @@ def _init_cache(self): return super()._init_cache() def _create_voxel_cache(self, voxel_cache: Dict[str, Any]): + """Create a cache for voxel grid representation of the world. + + Args: + voxel_cache: Parameters for the voxel grid representation. The dictionary should + contain the following keys: layers, dims, voxel_size, feature_dtype. Current + implementation assumes that all voxel grids have the same number of voxels. Though + different layers can have different resolutions, this is not yet thoroughly tested. + """ n_layers = voxel_cache["layers"] dims = voxel_cache["dims"] voxel_size = voxel_cache["voxel_size"] @@ -93,24 +105,35 @@ def _create_voxel_cache(self, voxel_cache: Dict[str, Any]): def load_collision_model( self, world_model: WorldConfig, env_idx=0, fix_cache_reference: bool = False ): + """Load collision representation from world obstacles. + + Args: + world_model: Obstacles in world to load. + env_idx: Environment index to load obstacles into. + fix_cache_reference: If True, throws error if number of obstacles is greater than + cache. If False, creates a larger cache. Note that when using collision checker + inside a recorded cuda graph, recreating the cache will break the graph as the + reference pointer to the cache will change. + """ self._load_voxel_collision_model_in_cache( world_model, env_idx, fix_cache_reference=fix_cache_reference ) - return super().load_collision_model( + super().load_collision_model( world_model, env_idx=env_idx, fix_cache_reference=fix_cache_reference ) def _load_voxel_collision_model_in_cache( self, world_config: WorldConfig, env_idx: int = 0, fix_cache_reference: bool = False ): - """TODO: - - _extended_summary_ + """Load voxel grid representation of the world into the cache. Args: - world_config: _description_ - env_idx: _description_ - fix_cache_reference: _description_ + world_config: Obstacles in world to load. + env_idx: Environment index to load obstacles into. + fix_cache_reference: If True, throws error if number of obstacles is greater than + cache. If False, creates a larger cache. Note that when using collision checker + inside a recorded cuda graph, recreating the cache will break the graph as the + reference pointer to the cache will change. """ voxel_objs = world_config.voxel max_obs = len(voxel_objs) @@ -153,7 +176,17 @@ def _load_voxel_collision_model_in_cache( def _batch_tensor_voxel( self, pose: List[List[float]], dims: List[float], voxel_size: List[float] - ): + ) -> List[torch.Tensor]: + """Create a list of tensors that represent the voxel parameters. + + Args: + pose: Pose of voxel grids. + dims: Dimensions of voxel grids. + voxel_size: Resolution of voxel grids. + + Returns: + List of tensors representing the voxel parameters. + """ w_T_b = Pose.from_batch_list(pose, tensor_args=self.tensor_args) b_T_w = w_T_b.inverse() dims_t = torch.as_tensor( @@ -170,13 +203,8 @@ def _batch_tensor_voxel( def load_batch_collision_model(self, world_config_list: List[WorldConfig]): """Load voxel grid for batched environments - _extended_summary_ - Args: - world_config_list: _description_ - - Returns: - _description_ + world_config_list: List of world obstacles for each environment. """ log_error("Not Implemented") # First find largest number of cuboid: @@ -244,7 +272,7 @@ def load_batch_collision_model(self, world_config_list: List[WorldConfig]): ) self.collision_types["voxel"] = True - return super().load_batch_collision_model(world_config_list) + super().load_batch_collision_model(world_config_list) def enable_obstacle( self, @@ -252,12 +280,27 @@ def enable_obstacle( enable: bool = True, env_idx: int = 0, ): + """Enable/Disable object in collision checking functions. + + Args: + name: Name of the obstacle to enable. + enable: True to enable, False to disable. + env_idx: Index of the environment to enable the obstacle in. + """ if self._env_voxel_names is not None and name in self._env_voxel_names[env_idx]: self.enable_voxel(enable, name, None, env_idx) else: return super().enable_obstacle(name, enable, env_idx) - def get_obstacle_names(self, env_idx: int = 0): + def get_obstacle_names(self, env_idx: int = 0) -> List[str]: + """Get names of all obstacles in the environment. + + Args: + env_idx: Environment index to get obstacles from. + + Returns: + List of obstacle names. + """ base_obstacles = super().get_obstacle_names(env_idx) return self._env_voxel_names[env_idx] + base_obstacles @@ -268,12 +311,13 @@ def enable_voxel( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update obstacle dimensions + """Enable/Disable voxel grid in collision checking functions. Args: - obj_dims (torch.Tensor): [dim.x,dim.y, dim.z], give as [b,3] - obj_idx (torch.Tensor or int): - + enable: True to enable, False to disable. + name: Name of voxel grid to enable. + env_obj_idx: Index of voxel grid. If name is provided, this is ignored. + env_idx: Environment index to enable the voxel grid in. """ if env_obj_idx is not None: self._voxel_tensor_list[2][env_obj_idx] = int(enable) # enable == 1 @@ -288,13 +332,31 @@ def update_obstacle_pose( name: str, w_obj_pose: Pose, env_idx: int = 0, + update_cpu_reference: bool = False, ): + """Update pose of obstacle. + + Args: + name: Name of the obstacle. + w_obj_pose: Pose of obstacle in world frame. + env_idx: Environment index to update obstacle in. + update_cpu_reference: If True, updates the CPU reference with the new pose. This is + useful for debugging and visualization. Only supported for env_idx=0. + """ if self._env_voxel_names is not None and name in self._env_voxel_names[env_idx]: self.update_voxel_pose(name=name, w_obj_pose=w_obj_pose, env_idx=env_idx) + if update_cpu_reference: + self.update_obstacle_pose_in_world_model(name, w_obj_pose, env_idx) else: - log_error("obstacle not found in OBB world model: " + name) + super().update_obstacle_pose(name, w_obj_pose, env_idx, update_cpu_reference) def update_voxel_data(self, new_voxel: VoxelGrid, env_idx: int = 0): + """Update parameters of a voxel grid. This can also updates signed distance values. + + Args: + new_voxel: New parameters. + env_idx: Environment index to update voxel grid in. + """ obs_idx = self.get_voxel_idx(new_voxel.name, env_idx) self._voxel_tensor_list[3][env_idx, obs_idx, :, :] = new_voxel.feature_tensor.view( new_voxel.feature_tensor.shape[0], -1 @@ -315,12 +377,15 @@ def update_voxel_features( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update pose of a specific objects. - This also updates the signed distance grid to account for the updated object pose. + """Update signed distance values in a voxel grid. + Args: - obj_w_pose: Pose - obj_idx: + features: New signed distance values. + name: Name of voxel grid obstacle. + env_obj_idx: Index of voxel grid. If name is provided, this is ignored. + env_idx: Environment index to update voxel grid in. """ + if env_obj_idx is not None: self._voxel_tensor_list[3][env_obj_idx, :] = features.to( dtype=self._voxel_tensor_list[3].dtype @@ -339,11 +404,14 @@ def update_voxel_pose( env_obj_idx: Optional[torch.Tensor] = None, env_idx: int = 0, ): - """Update pose of a specific objects. - This also updates the signed distance grid to account for the updated object pose. + """Update pose of voxel grid. + Args: - obj_w_pose: Pose - obj_idx: + w_obj_pose: Pose of voxel grid in world frame. + obj_w_pose: Inverse pose of voxel grid. If provided, w_obj_pose is ignored. + name: Name of the voxel grid. + env_obj_idx: Index of voxel grid. If name is provided, this is ignored. + env_idx: Environment index to update voxel grid in. """ obj_w_pose = self._get_obstacle_poses(w_obj_pose, obj_w_pose) if env_obj_idx is not None: @@ -357,6 +425,15 @@ def get_voxel_idx( name: str, env_idx: int = 0, ) -> int: + """Get index of voxel grid in the environment. + + Args: + name: Name of the voxel grid. + env_idx: Environment index to get voxel grid from. + + Returns: + Index of voxel grid. + """ if name not in self._env_voxel_names[env_idx]: log_error("Obstacle with name: " + name + " not found in current world", exc_info=True) return self._env_voxel_names[env_idx].index(name) @@ -365,7 +442,16 @@ def get_voxel_grid( self, name: str, env_idx: int = 0, - ): + ) -> VoxelGrid: + """Get voxel grid from world obstacles. + + Args: + name: Name of voxel grid. + env_idx: Environment index to get voxel grid from. + + Returns: + Voxel grid object. + """ obs_idx = self.get_voxel_idx(name, env_idx) voxel_params = np.round( self._voxel_tensor_list[0][env_idx, obs_idx, :].cpu().numpy().astype(np.float64), 6 @@ -394,7 +480,31 @@ def get_sphere_distance( return_loss=False, sum_collisions: bool = True, compute_esdf: bool = False, - ): + ) -> torch.Tensor: + """Compute the signed distance between query spheres and world obstacles. + + This distance can be used as a collision cost for optimization. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Buffer to store collision query results. + weight: Weight of the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + compute_esdf: Compute Euclidean signed distance instead of collision cost. When True, + the returned tensor will be the signed distance with positive values inside an + obstacle and negative values outside obstacles. + + Returns: + Signed distance between query spheres and world obstacles. + """ if "voxel" not in self.collision_types or not self.collision_types["voxel"]: return super().get_sphere_distance( query_sphere, @@ -469,7 +579,21 @@ def get_sphere_collision( env_query_idx: Optional[torch.Tensor] = None, return_loss=False, **kwargs, - ): + ) -> torch.Tensor: + """Compute binary collision between query spheres and world obstacles. + + Args: + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Weight to scale the collision cost. + activation_distance: Distance outside the object to start computing the cost. + env_query_idx: Environment index for each batch of query spheres. + return_loss: True is not supported for binary classification. Set to False. + + Returns: + Tensor with binary collision results. + """ if "voxel" not in self.collision_types or not self.collision_types["voxel"]: return super().get_sphere_collision( query_sphere, @@ -481,7 +605,7 @@ def get_sphere_collision( ) if return_loss: - raise ValueError("cannot return loss for classification, use get_sphere_distance") + log_error("cannot return loss for classification, use get_sphere_distance") b, h, n, _ = query_sphere.shape use_batch_env = True env_query_idx_voxel = env_query_idx @@ -541,11 +665,34 @@ def get_swept_sphere_distance( env_query_idx: Optional[torch.Tensor] = None, return_loss=False, sum_collisions: bool = True, - ): - """ - Computes the signed distance via analytic function + ) -> torch.Tensor: + """Compute the signed distance between trajectory of spheres and world obstacles. + Args: - tensor_sphere: b, n, 4 + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. + env_query_idx: Environment index for each batch of query spheres. + return_loss: If the returned tensor will be scaled or changed before calling backward, + set this to True. If the returned tensor will be used directly through addition, + set this to False. + sum_collisions: Sum the collision cost across all obstacles. This variable is currently + not passed to the underlying CUDA kernel as setting this to False caused poor + performance. + + Returns: + Collision cost between trajectory of spheres and world obstacles. """ if "voxel" not in self.collision_types or not self.collision_types["voxel"]: return super().get_swept_sphere_distance( @@ -625,11 +772,29 @@ def get_swept_sphere_collision( enable_speed_metric=False, env_query_idx: Optional[torch.Tensor] = None, return_loss=False, - ): - """ - Computes the signed distance via analytic function + ) -> torch.Tensor: + """Get binary collision between trajectory of spheres and world obstacles. + Args: - tensor_sphere: b, n, 4 + query_sphere: Input tensor with query spheres [batch, horizon, number of spheres, 4]. + With [x, y, z, radius] as the last column for each sphere. + collision_query_buffer: Collision query buffer to store the results. + weight: Collision cost weight. + activation_distance: Distance outside the object to start computing the cost. A smooth + scaling is applied to the cost starting from this distance. See + :ref:`research_page` for more details. + speed_dt: Length of time (seconds) to use when calculating the speed of the sphere + using finite difference. This is not used. + sweep_steps: Number of steps to sweep the sphere along the trajectory. More steps will + allow for catching small obstacles, taking more time to compute. + enable_speed_metric: True will scale the collision cost by the speed of the sphere. + This has the effect of slowing down the robot when near obstacles. This also has + shown to improve convergence from poor initialization. This is not used. + env_query_idx: Environment index for each batch of query spheres. + return_loss: This is not supported for binary classification. Set to False. + + Returns: + Collision value between trajectory of spheres and world obstacles. """ if "voxel" not in self.collision_types or not self.collision_types["voxel"]: return super().get_swept_sphere_collision( @@ -644,7 +809,7 @@ def get_swept_sphere_collision( return_loss=return_loss, ) if return_loss: - raise ValueError("cannot return loss for classify, use get_swept_sphere_distance") + log_error("cannot return loss for classify, use get_swept_sphere_distance") b, h, n, _ = query_sphere.shape use_batch_env = True @@ -698,6 +863,7 @@ def get_swept_sphere_collision( return d_val def clear_cache(self): + """Clear obstacles in world cache.""" if self._voxel_tensor_list is not None: self._voxel_tensor_list[2][:] = 0 if self._voxel_tensor_list[3].dtype in [torch.float32, torch.float16, torch.bfloat16]: @@ -710,5 +876,14 @@ def clear_cache(self): self._env_n_voxels[:] = 0 super().clear_cache() - def get_voxel_grid_shape(self, env_idx: int = 0, obs_idx: int = 0): + def get_voxel_grid_shape(self, env_idx: int = 0, obs_idx: int = 0) -> torch.Size: + """Get dimensions of the voxel grid. + + Args: + env_idx: Environment index. + obs_idx: Obstacle index. + + Returns: + Shape of the voxel grid. + """ return self._voxel_tensor_list[3][env_idx, obs_idx].shape diff --git a/src/curobo/geom/sphere_fit.py b/src/curobo/geom/sphere_fit.py index 2542aa0b..f83055f7 100644 --- a/src/curobo/geom/sphere_fit.py +++ b/src/curobo/geom/sphere_fit.py @@ -8,12 +8,14 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Approximate mesh geometry with spheres.""" # Standard Library from enum import Enum -from typing import List, Tuple +from typing import List, Tuple, Union # Third Party +import numpy import numpy as np import torch import trimesh @@ -42,13 +44,37 @@ class SphereFitType(Enum): VOXEL_VOLUME_SAMPLE_SURFACE = "voxel_volume_sample_surface" -def sample_even_fit_mesh(mesh: trimesh.Trimesh, n_spheres: int, sphere_radius: float): +def sample_even_fit_mesh( + mesh: trimesh.Trimesh, + n_spheres: int, + sphere_radius: float, +) -> Tuple[numpy.array, List[float]]: + """Sample even points on the surface of the mesh and return them with the given radius. + + Args: + mesh: Mesh to sample points from. + n_spheres: Number of spheres to sample. + sphere_radius: Sphere radius. + + Returns: + Tuple of points and radius. + """ + n_pts = trimesh.sample.sample_surface_even(mesh, n_spheres)[0] n_radius = [sphere_radius for _ in range(len(n_pts))] return n_pts, n_radius -def get_voxel_pitch(mesh: trimesh.Trimesh, n_cubes: int): +def get_voxel_pitch(mesh: trimesh.Trimesh, n_cubes: int) -> float: + """Get the pitch of the voxel grid based on the mesh and number of cubes. + + Args: + mesh: Mesh to get the pitch from. + n_cubes: Number of voxels to fit. + + Returns: + float: Pitch of the voxel grid. + """ d = mesh.extents cube_volume = d[0] * d[1] * d[2] v = mesh.volume @@ -68,7 +94,18 @@ def voxel_fit_surface_mesh( n_spheres: int, sphere_radius: float, voxelize_method: str = "ray", -): +) -> Tuple[numpy.array, List[float]]: + """Get voxel grid from mesh and fit spheres to the surface. + + Args: + mesh: Input mesh. + n_spheres: Number of spheres to fit. + sphere_radius: Radius of the spheres. + voxelize_method: TriMesh Voxelization method. Defaults to "ray". + + Returns: + Tuple of sphere positions and sphere radius. + """ pts, rad = get_voxelgrid_from_mesh(mesh, n_spheres, voxelize_method) if pts is None: return pts, rad @@ -82,7 +119,7 @@ def voxel_fit_surface_mesh( dist = pr.signed_distance(pts) # calculate distance to boundary: - dist = np.abs(dist - rad) + dist = numpy.abs(dist - rad) # get the first n points closest to boundary: _, idx = torch.topk(torch.as_tensor(dist), k=n_spheres, largest=False) @@ -91,15 +128,28 @@ def voxel_fit_surface_mesh( return n_pts, n_radius -def get_voxelgrid_from_mesh(mesh: trimesh.Trimesh, n_spheres: int, voxelize_method: str = "ray"): - """Get voxel grid from mesh using :py:func:`trimesh.voxel.creation.voxelize`.""" +def get_voxelgrid_from_mesh( + mesh: trimesh.Trimesh, n_spheres: int, voxelize_method: str = "ray" +) -> Tuple[Union[numpy.array, None], Union[numpy.array, None]]: + """Get voxel grid from mesh using :py:func:`trimesh.voxel.creation.voxelize`. + + Args: + mesh: Input mesh. + n_spheres: Number of voxels to fit. + voxelize_method: Voxelize method. Defaults to "ray". + + Returns: + Tuple of occupied voxels and side of voxels (length of cube). Returns [None, None] if + voxelization fails. + + """ pitch = get_voxel_pitch(mesh, n_spheres) radius = pitch / 2.0 try: voxel = voxelize(mesh, pitch, voxelize_method) voxel = voxel.fill("base") pts = voxel.points - rad = np.ravel([radius for _ in range(len(pts))]) + rad = numpy.ravel([radius for _ in range(len(pts))]) except: log_warn("voxelization failed") pts = rad = None @@ -111,10 +161,25 @@ def voxel_fit_mesh( n_spheres: int, surface_sphere_radius: float, voxelize_method: str = "ray", -): +) -> Tuple[numpy.array, List[float]]: + """Voxelize mesh, fit spheres to volume and near surface. Return the fitted spheres. + + Args: + mesh: Input mesh. + n_spheres: Number of spheres to fit. + surface_sphere_radius: Radius of the spheres on the surface. This radius will be added + to points on the surface of the mesh, causing the spheres to inflate the mesh volume + by this amount. + voxelize_method: Voxelization method to use, select from + :py:func:`trimesh.voxel.creation.voxelize`. + + Returns: + Tuple of sphere positions and their radius. + """ pts, rad = get_voxelgrid_from_mesh(mesh, n_spheres, voxelize_method) if pts is None: return pts, rad + # compute signed distance: pr = trimesh.proximity.ProximityQuery(mesh) dist = pr.signed_distance(pts) @@ -132,9 +197,9 @@ def voxel_fit_mesh( inside_idx = dist >= 0.0 inside_pts = pts[inside_idx] if len(inside_pts) < n_spheres: - new_pts = np.zeros((n_spheres, 3)) + new_pts = numpy.zeros((n_spheres, 3)) new_pts[: len(inside_pts)] = inside_pts - new_radius = np.zeros(n_spheres) + new_radius = numpy.zeros(n_spheres) new_radius[: len(inside_pts)] = rad[inside_idx] new_pts[len(inside_pts) :] = surface_pts[: n_spheres - len(inside_pts)] @@ -148,34 +213,22 @@ def voxel_fit_mesh( return n_pts, n_radius -def voxel_fit_volume_sample_surface_mesh( +def voxel_fit_volume_inside_mesh( mesh: trimesh.Trimesh, n_spheres: int, - surface_sphere_radius: float, voxelize_method: str = "ray", -): - pts, rad = voxel_fit_volume_inside_mesh(mesh, 0.75 * n_spheres, voxelize_method) - if pts is None: - return pts, rad - # compute surface points: - if len(pts) >= n_spheres: - return pts, rad - - sample_count = n_spheres - (len(pts)) - - surface_sample_pts, sample_radius = sample_even_fit_mesh( - mesh, sample_count, surface_sphere_radius - ) - pts = np.concatenate([pts, surface_sample_pts]) - rad = np.concatenate([rad, sample_radius]) - return pts, rad +) -> Tuple[numpy.ndarray, numpy.array]: + """Voxelize mesh, fit spheres to volume. Return the fitted spheres. + Args: + mesh: Input mesh. + n_spheres: Number of spheres to fit. + voxelize_method: Voxelization method to use, select from + :py:func:`trimesh.voxel.creation.voxelize`. -def voxel_fit_volume_inside_mesh( - mesh: trimesh.Trimesh, - n_spheres: int, - voxelize_method: str = "ray", -): + Returns: + Tuple of sphere positions and their radius. + """ pts, rad = get_voxelgrid_from_mesh(mesh, 2 * n_spheres, voxelize_method) if pts is None: return pts, rad @@ -192,25 +245,63 @@ def voxel_fit_volume_inside_mesh( return n_pts, n_radius +def voxel_fit_volume_sample_surface_mesh( + mesh: trimesh.Trimesh, + n_spheres: int, + surface_sphere_radius: float, + voxelize_method: str = "ray", +) -> Tuple[numpy.ndarray, numpy.array]: + """Voxelize mesh, fit spheres to volume, and sample surface for points. + + Args: + mesh: Input mesh. + n_spheres: Number of spheres to fit. + surface_sphere_radius: Radius of the spheres on the surface. This radius will be added + to points on the surface of the mesh, causing the spheres to inflate the mesh volume + by this amount. + voxelize_method: Voxelization method to use, select from + :py:func:`trimesh.voxel.creation.voxelize`. + Returns: + Tuple of sphere positions and their radius. + """ + pts, rad = voxel_fit_volume_inside_mesh(mesh, 0.75 * n_spheres, voxelize_method) + if pts is None: + return pts, rad + # compute surface points: + if len(pts) >= n_spheres: + return pts, rad + + sample_count = n_spheres - (len(pts)) + + surface_sample_pts, sample_radius = sample_even_fit_mesh( + mesh, sample_count, surface_sphere_radius + ) + pts = numpy.concatenate([pts, surface_sample_pts]) + rad = numpy.concatenate([rad, sample_radius]) + return pts, rad + + def fit_spheres_to_mesh( mesh: trimesh.Trimesh, n_spheres: int, surface_sphere_radius: float = 0.01, fit_type: SphereFitType = SphereFitType.VOXEL_VOLUME_SAMPLE_SURFACE, voxelize_method: str = "ray", -) -> Tuple[np.ndarray, List[float]]: +) -> Tuple[numpy.ndarray, numpy.array]: """Approximate a mesh with spheres. See :ref:`attach_object_note` for more details. - Args: - mesh: Input trimesh - n_spheres: _description_ - surface_sphere_radius: _description_. Defaults to 0.01. - fit_type: _description_. Defaults to SphereFitType.VOXEL_VOLUME_SAMPLE_SURFACE. - voxelize_method: _description_. Defaults to "ray". + mesh: Input mesh. + n_spheres: Number of spheres to fit. + surface_sphere_radius: Radius of the spheres on the surface. This radius will be added + to points on the surface of the mesh, causing the spheres to inflate the mesh volume + by this amount. + fit_type: Sphere fit type, select from :py:class:`~SphereFitType`. + voxelize_method: Voxelization method to use, select from + :py:func:`trimesh.voxel.creation.voxelize`. Returns: - _description_ + Tuple of spehre positions and their radius. """ n_pts = n_radius = None if fit_type == SphereFitType.SAMPLE_SURFACE: @@ -236,5 +327,5 @@ def fit_spheres_to_mesh( dist = torch.linalg.norm(samples - torch.mean(samples, dim=-1).unsqueeze(1), dim=-1) _, knn_i = dist.topk(n_spheres, largest=True) n_pts = samples[knn_i].cpu().numpy() - n_radius = np.ravel(n_radius)[knn_i.cpu().flatten().tolist()].tolist() + n_radius = numpy.ravel(n_radius)[knn_i.cpu().flatten().tolist()].tolist() return n_pts, n_radius diff --git a/src/curobo/geom/transform.py b/src/curobo/geom/transform.py index fd956713..dacc2d2f 100644 --- a/src/curobo/geom/transform.py +++ b/src/curobo/geom/transform.py @@ -8,8 +8,12 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +""" +Implements differentiable point and pose transformations leveraging Warp kernels. +Most of these implementations are available through :class:`~curobo.types.math.Pose`. +""" # Standard Library -from typing import Optional +from typing import Optional, Tuple # Third Party import torch @@ -23,8 +27,35 @@ def transform_points( - position, quaternion, points, out_points=None, out_gp=None, out_gq=None, out_gpt=None -): + position: torch.Tensor, + quaternion: torch.Tensor, + points: torch.Tensor, + out_points: Optional[torch.Tensor] = None, + out_gp: Optional[torch.Tensor] = None, + out_gq: Optional[torch.Tensor] = None, + out_gpt: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Transforms the given points using the provided position and quaternion. + + Args: + position: The position tensor representing the translation of the transformation. + quaternion: The quaternion tensor representing the rotation of the transformation. + Quaternion format is [w, x, y, z]. + points: The points to be transformed. + out_points: If provided, the transformed points will be stored in this tensor. If not + provided, a new tensor will be created. + out_gp: If provided, the gradient of the transformed points with respect to the position + will be stored in this tensor. If not provided, a new tensor will be created. + out_gq: If provided, the gradient of the transformed points with respect to the quaternion + will be stored in this tensor. If not provided, a new tensor will be created. + out_gpt: If provided, the gradient of the transformed points with respect to the original + points will be stored in this tensor. If not provided, a new tensor will be created. + + Returns: + torch.Tensor: The transformed points. + """ + if out_points is None: out_points = torch.zeros((points.shape[0], 3), device=points.device, dtype=points.dtype) if out_gp is None: @@ -40,8 +71,35 @@ def transform_points( def batch_transform_points( - position, quaternion, points, out_points=None, out_gp=None, out_gq=None, out_gpt=None -): + position: torch.Tensor, + quaternion: torch.Tensor, + points: torch.Tensor, + out_points: Optional[torch.Tensor] = None, + out_gp: Optional[torch.Tensor] = None, + out_gq: Optional[torch.Tensor] = None, + out_gpt: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Transforms the given points using the provided batch of position and quaternion. + + Args: + position: The position tensor representing the translation of the transformation. Shape + should be (batch_size, 3). + quaternion: The quaternion tensor representing the rotation of the transformation. + Quaternion format is [w, x, y, z]. Shape should be (batch_size, 4). + points: The points to be transformed. Shape should be (batch_size, num_points, 3). + out_points: If provided, the transformed points will be stored in this tensor. If not + provided, a new tensor will be created. + out_gp: If provided, the gradient of the transformed points with respect to the position + will be stored in this tensor. If not provided, a new tensor will be created. + out_gq: If provided, the gradient of the transformed points with respect to the quaternion + will be stored in this tensor. If not provided, a new tensor will be created. + out_gpt: If provided, the gradient of the transformed points with respect to the original + points will be stored in this tensor. If not provided, a new tensor will be created. + + Returns: + torch.Tensor: The transformed points with shape (batch_size, num_points, 3). + """ if out_points is None: out_points = torch.zeros( (points.shape[0], points.shape[1], 3), device=points.device, dtype=points.dtype @@ -61,33 +119,69 @@ def batch_transform_points( @get_torch_jit_decorator() -def get_inv_transform(w_rot_c, w_trans_c): - # type: (Tensor, Tensor) -> Tuple[Tensor, Tensor] +def get_inv_transform( + w_rot_c: torch.Tensor, w_trans_c: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the inverse of the given transformation. + + Args: + w_rot_c: Rotation matrix in world frame. + w_trans_c: Translation vector in world frame. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The inverse rotation matrix and translation vector. + """ c_rot_w = w_rot_c.transpose(-1, -2) c_trans_w = -1.0 * (c_rot_w @ w_trans_c.unsqueeze(-1)).squeeze(-1) return c_rot_w, c_trans_w @get_torch_jit_decorator() -def transform_point_inverse(point, rot, trans): - # type: (Tensor, Tensor, Tensor) -> Tensor +def transform_point_inverse( + point: torch.Tensor, rot: torch.Tensor, trans: torch.Tensor +) -> torch.Tensor: + """Transforms the given point using the inverse of the provided transformation. + Args: + point: Input point to be transformed. + rot: Rotation matrix. + trans: Translation vector. + + Returns: + torch.Tensor: The transformed point. + """ # new_point = (rot @ (point).unsqueeze(-1)).squeeze(-1) + trans n_rot, n_trans = get_inv_transform(rot, trans) new_point = (point @ n_rot.transpose(-1, -2)) + n_trans return new_point -def matrix_to_quaternion(matrix, out_quat=None, adj_matrix=None): +def matrix_to_quaternion( + matrix: torch.Tensor, + out_quat: Optional[torch.Tensor] = None, + adj_matrix: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """Converts the given rotation matrix to quaternion. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + out_quat: Output tensor to store the quaternions. If not provided, a new tensor will be + created. + adj_matrix: Gradient tensor, if not provided, a new tensor will be created. + + Returns: + torch.Tensor: Quaternions with real part first, as tensor of shape (..., 4) [qw, qx,qy,qz]. + """ matrix = matrix.view(-1, 3, 3) out_quat = MatrixToQuaternion.apply(matrix, out_quat, adj_matrix) # out_quat = cuda_matrix_to_quaternion(matrix) return out_quat -def cuda_matrix_to_quaternion(matrix): - """ - Convert rotations given as rotation matrices to quaternions. +def cuda_matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: + """Convert rotations given as rotation matrices to quaternions. + + This is not differentiable. Use :func:`~matrix_to_quaternion` for differentiable conversion. Args: matrix: Rotation matrices as tensor of shape (..., 3, 3). @@ -108,19 +202,32 @@ def cuda_matrix_to_quaternion(matrix): return out_quat -def quaternion_to_matrix(quaternions, out_mat=None, adj_quaternion=None): +def quaternion_to_matrix( + quaternions: torch.Tensor, + out_mat: Optional[torch.Tensor] = None, + adj_quaternion: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """Convert quaternion to rotation matrix. + + Args: + quaternions: Input quaternions with real part first, as tensor of shape (..., 4). + out_mat: Output rotation matrices as tensor of shape (..., 3, 3). If not provided, a new + tensor will be created. + adj_quaternion: Gradient tensor, if not provided, a new tensor will be created. + + Returns: + torch.Tensor: Rotation matrices as tensor of shape (..., 3, 3). + """ # return torch_quaternion_to_matrix(quaternions) out_mat = QuatToMatrix.apply(quaternions, out_mat, adj_quaternion) return out_mat -def torch_quaternion_to_matrix(quaternions): - """ - Convert rotations given as quaternions to rotation matrices. +def torch_quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """Convert rotations given as quaternions to rotation matrices. Args: - quaternions: quaternions with real part first, - as tensor of shape (..., 4). + quaternions: quaternions with real part first, as tensor of shape (..., 4). Returns: Rotation matrices as tensor of shape (..., 3, 3). @@ -149,7 +256,20 @@ def torch_quaternion_to_matrix(quaternions): def pose_to_matrix( position: torch.Tensor, quaternion: torch.Tensor, out_matrix: Optional[torch.Tensor] = None -): +) -> torch.Tensor: + """Converts the given pose to a transformation matrix. + + Args: + position: The position tensor representing the translation of the transformation. + quaternion: The quaternion tensor representing the rotation of the transformation. + Quaternion format is [w, x, y, z]. + out_matrix: If provided, the transformation matrix will be stored in this tensor. If not + provided, a new tensor will be created. + + Returns: + torch.Tensor: The transformation matrix. + """ + if out_matrix is None: if len(position.shape) == 2: out_matrix = torch.zeros( @@ -168,17 +288,44 @@ def pose_to_matrix( def pose_multiply( - position, - quaternion, - position2, - quaternion2, - out_position=None, - out_quaternion=None, - adj_pos=None, - adj_quat=None, - adj_pos2=None, - adj_quat2=None, -): + position: torch.Tensor, + quaternion: torch.Tensor, + position2: torch.Tensor, + quaternion2: torch.Tensor, + out_position: Optional[torch.Tensor] = None, + out_quaternion: Optional[torch.Tensor] = None, + adj_pos: Optional[torch.Tensor] = None, + adj_quat: Optional[torch.Tensor] = None, + adj_pos2: Optional[torch.Tensor] = None, + adj_quat2: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Multiplies two poses. + + The input poses can either be of shape (3,) or (batch_size, 3). + + Args: + position: The position tensor representing the translation of the first transformation. + quaternion: The quaternion tensor representing the rotation of the first transformation. + The quaternion format is [w, x, y, z]. + position2: The position tensor representing the translation of the second transformation. + quaternion2: The quaternion tensor representing the rotation of the second transformation. + out_position: If provided, the position tensor of the multiplied pose will be stored in + this tensor. If not provided, a new tensor will be created. + out_quaternion: If provided, the quaternion tensor of the multiplied pose will be stored in + this tensor. If not provided, a new tensor will be created. + adj_pos: Gradient tensor for the position of the first pose. If not provided, a new tensor + will be created. + adj_quat: Gradient tensor for the quaternion of the first pose. If not provided, a new + tensor will be created. + adj_pos2: Gradient tensor for the position of the second pose. If not provided, a new + tensor will be created. + adj_quat2: Gradient tensor for the quaternion of the second pose. If not provided, a new + tensor will be created. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The position and quaternion tensors of the multiplied + pose. + """ if position.shape == position2.shape: out_position, out_quaternion = BatchTransformPose.apply( position, @@ -212,13 +359,31 @@ def pose_multiply( def pose_inverse( - position, - quaternion, - out_position=None, - out_quaternion=None, - adj_pos=None, - adj_quat=None, -): + position: torch.Tensor, + quaternion: torch.Tensor, + out_position: Optional[torch.Tensor] = None, + out_quaternion: Optional[torch.Tensor] = None, + adj_pos: Optional[torch.Tensor] = None, + adj_quat: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the inverse of the given pose. + + Args: + position: The position tensor representing the translation of the transformation. + quaternion: The quaternion tensor representing the rotation of the transformation. + out_position: If provided, the position tensor of the inverse pose will be stored in this + tensor. If not provided, a new tensor will be created. + out_quaternion: If provided, the quaternion tensor of the inverse pose will be stored in + this tensor. If not provided, a new tensor will be created. + adj_pos: Gradient tensor for the position of the pose. If not provided, a new tensor will + be created. + adj_quat: Gradient tensor for the quaternion of the pose. If not provided, a new tensor + will be created. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The position and quaternion tensors of the inverse pose. + """ + out_position, out_quaternion = PoseInverse.apply( position, quaternion, @@ -237,7 +402,17 @@ def compute_pose_inverse( quat: wp.array(dtype=wp.vec4), out_position: wp.array(dtype=wp.vec3), out_quat: wp.array(dtype=wp.vec4), -): # b pose_1 and b pose_2, compute pose_1 * pose_2 +): + """Compute inverse of pose. This is a warp kernel. + + Args: + position: Input position. + quat: Input quaternion. + out_position: Output position. + out_quat: Output quaternion. + """ + + # b pose_1 and b pose_2, compute pose_1 * pose_2 b_idx = wp.tid() # read data: @@ -267,6 +442,7 @@ def compute_matrix_to_quat( in_mat: wp.array(dtype=wp.mat33), out_quat: wp.array(dtype=wp.vec4), ): + """A warp kernel to convert rotation matrix to quaternion.""" # b pose_1 and b pose_2, compute pose_1 * pose_2 b_idx = wp.tid() # read data: @@ -294,7 +470,9 @@ def compute_transform_point( n_pts: wp.int32, n_poses: wp.int32, out_pt: wp.array(dtype=wp.vec3), -): # given n,3 points and b poses, get b,n,3 transformed points +): + """A warp kernel to transform the given points using the provided position and quaternion.""" + # given n,3 points and b poses, get b,n,3 transformed points # we tile as tid = wp.tid() b_idx = tid / (n_pts) @@ -325,7 +503,10 @@ def compute_batch_transform_point( n_pts: wp.int32, n_poses: wp.int32, out_pt: wp.array(dtype=wp.vec3), -): # given n,3 points and b poses, get b,n,3 transformed points +): + """A warp kernel to transform batch of points by batch of poses.""" + + # given n,3 points and b poses, get b,n,3 transformed points # we tile as tid = wp.tid() b_idx = tid / (n_pts) @@ -356,7 +537,9 @@ def compute_batch_pose_multipy( quat2: wp.array(dtype=wp.vec4), out_position: wp.array(dtype=wp.vec3), out_quat: wp.array(dtype=wp.vec4), -): # b pose_1 and b pose_2, compute pose_1 * pose_2 +): + """A warp kernel multiplying two batch of poses.""" + # b pose_1 and b pose_2, compute pose_1 * pose_2 b_idx = wp.tid() # read data: @@ -394,6 +577,7 @@ def compute_quat_to_matrix( quat: wp.array(dtype=wp.vec4), out_mat: wp.array(dtype=wp.mat33), ): + """A warp kernel to convert quaternion to rotation matrix.""" # b pose_1 and b pose_2, compute pose_1 * pose_2 b_idx = wp.tid() # read data: @@ -417,7 +601,9 @@ def compute_pose_multipy( quat2: wp.array(dtype=wp.vec4), out_position: wp.array(dtype=wp.vec3), out_quat: wp.array(dtype=wp.vec4), -): # b pose_1 and b pose_2, compute pose_1 * pose_2 +): + """A warp kernel to multiply a batch of poses (position2) by a pose.""" + # b pose_1 and b pose_2, compute pose_1 * pose_2 b_idx = wp.tid() # read data: @@ -451,6 +637,8 @@ def compute_pose_multipy( class TransformPoint(torch.autograd.Function): + """A differentiable function to transform batch of points by a pose.""" + @staticmethod def forward( ctx, @@ -549,6 +737,8 @@ def backward(ctx, grad_output): class BatchTransformPoint(torch.autograd.Function): + """A differentiable function to transform batch of points by a batch of poses.""" + @staticmethod def forward( ctx, @@ -596,7 +786,6 @@ def backward(ctx, grad_output): adj_points, ) = ctx.saved_tensors init_warp() - # print(adj_quaternion.shape) wp_adj_out_points = wp.from_torch(grad_output.view(-1, 3).contiguous(), dtype=wp.vec3) adj_position = 0.0 * adj_position @@ -645,6 +834,8 @@ def backward(ctx, grad_output): class BatchTransformPose(torch.autograd.Function): + """A differentiable function to transform batch of poses by a pose.""" + @staticmethod def forward( ctx, @@ -790,6 +981,8 @@ def backward(ctx, grad_out_position, grad_out_quaternion): class TransformPose(torch.autograd.Function): + """A differentiable function to transform a batch of poses by another batch of poses.""" + @staticmethod def forward( ctx, @@ -934,6 +1127,8 @@ def backward(ctx, grad_out_position, grad_out_quaternion): class PoseInverse(torch.autograd.Function): + """A differentiable function to get the inverse of a pose (also supports batch).""" + @staticmethod def forward( ctx, @@ -1048,6 +1243,8 @@ def backward(ctx, grad_out_position, grad_out_quaternion): class QuatToMatrix(torch.autograd.Function): + """A differentiable function for converting quaternions to rotation matrices.""" + @staticmethod def forward( ctx, @@ -1097,7 +1294,7 @@ def backward(ctx, grad_out_mat): wp_adj_out_mat = wp.from_torch(grad_out_mat.view(-1, 3, 3).contiguous(), dtype=wp.mat33) - adj_quaternion = 0.0 * adj_quaternion + adj_quaternion[:] = 0.0 * adj_quaternion wp_adj_quat = wp.from_torch(adj_quaternion.view(-1, 4), dtype=wp.vec4) @@ -1131,6 +1328,8 @@ def backward(ctx, grad_out_mat): class MatrixToQuaternion(torch.autograd.Function): + """A differentiable function for converting rotation matrices to quaternions.""" + @staticmethod def forward( ctx, diff --git a/src/curobo/geom/types.py b/src/curobo/geom/types.py index 8b4ac4e4..0679fffc 100644 --- a/src/curobo/geom/types.py +++ b/src/curobo/geom/types.py @@ -15,12 +15,13 @@ # Standard Library import math from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Sequence, Union +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union # Third Party import numpy as np import torch import trimesh +import trimesh.scene # CuRobo from curobo.geom.sphere_fit import SphereFitType, fit_spheres_to_mesh @@ -83,6 +84,12 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> raise NotImplementedError def save_as_mesh(self, file_path: str, transform_with_pose: bool = False): + """Save obstacle as a mesh file. + + Args: + file_path: Path to save mesh file. + transform_with_pose: Transform obstacle with pose before saving. + """ mesh_scene = self.get_trimesh_mesh() if transform_with_pose: mesh_scene.apply_transform(self.get_transform_matrix()) @@ -246,10 +253,19 @@ class Cuboid(Obstacle): dims: List[float] = field(default_factory=lambda: [0.0, 0.0, 0.0]) def __post_init__(self): + """Post initialization checks if pose was set.""" if self.pose is None: log_error("Cuboid Obstacle requires Pose") - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + Args: + process: Flag is not used. + process_color: Flag is not used. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ m = trimesh.creation.box(extents=self.dims) if self.color is not None: color_visual = trimesh.visual.color.ColorVisuals( @@ -261,11 +277,27 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): @dataclass class Capsule(Obstacle): + """Represent obstacle as a capsule.""" + + #: Radius of capsule in meters. radius: float = 0.0 + + #: Base of capsule in meters [x, y, z]. base: List[float] = field(default_factory=lambda: [0.0, 0.0, 0.0]) + + #: Tip of capsule in meters [x, y, z]. tip: List[float] = field(default_factory=lambda: [0.0, 0.0, 0.0]) - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + + Args: + process: Flag is not used. + process_color: Flag is not used. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ height = self.tip[2] - self.base[2] if ( height < 0 @@ -288,10 +320,24 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): @dataclass class Cylinder(Obstacle): + """Obstacle represented as a cylinder.""" + + #: Radius of cylinder in meters. radius: float = 0.0 + + #: Height of cylinder in meters. height: float = 0.0 - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + + Args: + process: Flag is not used. + process_color: Flag is not used. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ m = trimesh.creation.cylinder(radius=self.radius, height=self.height) if self.color is not None: color_visual = trimesh.visual.color.ColorVisuals( @@ -303,19 +349,32 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): @dataclass class Sphere(Obstacle): + """Obstacle represented as a sphere.""" + + #: Radius of sphere in meters. radius: float = 0.0 - #: position is deprecated, use pose instead + #: Position is deprecated, use pose instead position: Optional[List[float]] = None def __post_init__(self): + """Post initialization checks if position was set, logs warning to use pose instead.""" if self.position is not None: self.pose = self.position + [1, 0, 0, 0] log_warn("Sphere.position is deprecated, use Sphere.pose instead") if self.pose is not None: self.position = self.pose[:3] - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + + Args: + process: Flag is not used. + process_color: Flag is not used. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ m = trimesh.creation.icosphere(radius=self.radius) if self.color is not None: color_visual = trimesh.visual.color.ColorVisuals( @@ -366,18 +425,35 @@ def get_cuboid(self) -> Cuboid: @dataclass class Mesh(Obstacle): + """Obstacle represented as mesh.""" + + #: Path to mesh file. file_path: Optional[str] = None - file_string: Optional[str] = ( - None # storing full mesh as a string, loading from this is not implemented yet. - ) - urdf_path: Optional[str] = None # useful for visualization in isaac gym. + + #: Full mesh as a string, loading from this is not implemented yet. + file_string: Optional[str] = None + + #: Path to urdf file, does not support loading from this yet. + urdf_path: Optional[str] = None + + #: Vertices of mesh. vertices: Optional[List[List[float]]] = None + + #: Faces of mesh. faces: Optional[List[int]] = None + + #: Vertex colors of mesh. vertex_colors: Optional[List[List[float]]] = None + + #: Vertex normals of mesh. vertex_normals: Optional[List[List[float]]] = None + + #: Face colors of mesh. face_colors: Optional[List[List[float]]] = None def __post_init__(self): + """Post initialization adds absolute path to file_path and scales vertices.""" + if self.file_path is not None: self.file_path = join_path(get_assets_path(), self.file_path) if self.urdf_path is not None: @@ -386,7 +462,17 @@ def __post_init__(self): self.vertices = np.ravel(self.scale) * self.vertices self.scale = None - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + + Args: + process: process flag passed to :class:`trimesh.load`. + process_color: if True, load mesh visual from texture. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ + # load mesh from filepath or verts and faces: if self.file_path is not None: m = trimesh.load(self.file_path, process=process, force="mesh") @@ -412,6 +498,8 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): return m def update_material(self): + """Load material into vertex_colors and face_colors.""" + if ( self.color is None and self.vertex_colors is None @@ -431,7 +519,15 @@ def update_material(self): else: self.vertex_colors = [m.visual.vertex_colors for x in range(len(m.vertices))] - def get_mesh_data(self, process: bool = True): + def get_mesh_data(self, process: bool = True) -> Tuple[List[List[float]], List[int]]: + """Get vertices and faces of mesh. + + Args: + process: process flag passed to :class:`trimesh.load`. + + Returns: + Tuple[List[List[float]], List[int]]: vertices and faces of mesh. + """ verts = faces = None if self.file_path is not None: m = self.get_trimesh_mesh(process=process) @@ -453,6 +549,19 @@ def from_pointcloud( pose: List[float] = [0, 0, 0, 1, 0, 0, 0], filter_close_points: float = 0.0, ): + """Create a mesh from a pointcloud using marching cubes. + + Args: + pointcloud: Input pointcloud of shape [n_pts, 3]. + pitch: Pitch of marching cubes. + name: Name to asiign to created mesh. + pose: Pose to assign to created mesh. + filter_close_points: filter points that are closer than this threshold. Threshold + is in meters and should be positive. + + Returns: + Mesh: Mesh created from pointcloud. + """ if filter_close_points > 0.0: # remove points that are closer than given threshold dist = np.linalg.norm(pointcloud, axis=-1) @@ -465,16 +574,31 @@ def from_pointcloud( @dataclass class BloxMap(Obstacle): + """Obstacle represented as a nvblox ESDF layer.""" + + #: Path to nvblox map file. map_path: Optional[str] = None + + #: Scale of the map. scale: List[float] = field(default_factory=lambda: [1.0, 1.0, 1.0]) + + #: Voxel size of the map. voxel_size: float = 0.02 - #: integrator type to use in nvblox. Options: ["tsdf", "occupancy"] + + #: Integrator type to use in nvblox. Options: ["tsdf", "occupancy"] integrator_type: str = "tsdf" + + #: File path to mesh file if available, useful for rendering. mesh_file_path: Optional[str] = None + + #: Instance of nvblox mapper. Useful for passing a pre-initialized mapper. mapper_instance: Any = None + + #: Mesh representation of the map. Useful for rendering. Not used in collision checking. mesh: Optional[Mesh] = None def __post_init__(self): + """Post initialization adds absolute path to map_path, mesh_file_path, and loads mesh.""" if self.map_path is not None: self.map_path = join_path(get_assets_path(), self.map_path) if self.mesh_file_path is not None: @@ -482,9 +606,20 @@ def __post_init__(self): name=self.name + "_mesh", file_path=self.mesh_file_path, pose=self.pose ) - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh( + self, process: bool = True, process_color: bool = True + ) -> Union[trimesh.Trimesh, None]: + """Get trimesh mesh representation of the map. Only available if mesh_file_path is set. + + Args: + process: Process flag passed to :class:`trimesh.load`. + process_color: Load mesh visual from texture. + + Returns: + Union[trimesh.Trimesh, None]: Trimesh mesh representation of the map. + """ if self.mesh is not None: - return self.mesh.get_trimesh_mesh(process) + return self.mesh.get_trimesh_mesh(process, process_color=process_color) else: log_warn("no mesh found for obstacle: " + self.name) return None @@ -492,15 +627,31 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): @dataclass class PointCloud(Obstacle): + """Obstacle represented as a pointcloud.""" + + #: Points of pointcloud. points: Union[torch.Tensor, np.ndarray, List[List[float]]] = None + + #: Features of pointcloud. points_features: Union[torch.Tensor, np.ndarray, List[List[float]], None] = None def __post_init__(self): + """Post initialization scales points if scale is set.""" if self.scale is not None and self.points is not None: self.points = np.ravel(self.scale) * self.points self.scale = None - def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): + def get_trimesh_mesh(self, process: bool = True, process_color: bool = True) -> trimesh.Trimesh: + """Create a trimesh instance from the obstacle representation. + + Args: + process: Not used. + process_color: Not used. + + Returns: + trimesh.Trimesh: Instance of obstacle as a trimesh. + """ + points = self.points if isinstance(points, torch.Tensor): points = points.view(-1, 3).cpu().numpy() @@ -510,7 +661,15 @@ def get_trimesh_mesh(self, process: bool = True, process_color: bool = True): mesh = Mesh.from_pointcloud(points, pose=self.pose) return mesh.get_trimesh_mesh() - def get_mesh_data(self, process: bool = True): + def get_mesh_data(self, process: bool = True) -> Tuple[List[List[float]], List[int]]: + """Get mesh data from pointcloud. + + Args: + process: process flag passed to :class:`trimesh.load`. + + Returns: + verts, faces: vertices and faces of mesh. + """ verts = faces = None m = self.get_trimesh_mesh(process=process) verts = m.vertices.view(np.ndarray) @@ -519,75 +678,50 @@ def get_mesh_data(self, process: bool = True): @staticmethod def from_camera_observation( - camera_obs: CameraObservation, name: str = "pc_obstacle", pose: Optional[List[float]] = None - ): - return PointCloud(name=name, pose=pose, points=camera_obs.get_pointcloud()) - - def get_bounding_spheres( - self, - n_spheres: int = 1, - surface_sphere_radius: float = 0.002, - fit_type: SphereFitType = SphereFitType.VOXEL_VOLUME_SAMPLE_SURFACE, - voxelize_method: str = "ray", - pre_transform_pose: Optional[Pose] = None, - tensor_args: TensorDeviceType = TensorDeviceType(), - ) -> List[Sphere]: - """Compute n spheres that fits in the volume of the object. + camera_obs: CameraObservation, + name: str = "pc_obstacle", + pose: Optional[List[float]] = None, + ) -> PointCloud: + """Create a pointcloud from a camera observation. Args: - n: number of spheres + camera_obs: Input camera observation. + name: Name to assign to created pointcloud. + pose: Pose to assign to created pointcloud. + Returns: - spheres + PointCloud: Pointcloud created from camera observation. """ - # sample points in pointcloud: - - # mesh = self.get_trimesh_mesh() - # pts, n_radius = fit_spheres_to_mesh( - # mesh, n_spheres, surface_sphere_radius, fit_type, voxelize_method=voxelize_method - # ) - - obj_pose = Pose.from_list(self.pose, tensor_args) - # transform object: - - # transform points: - if pre_transform_pose is not None: - obj_pose = pre_transform_pose.multiply(obj_pose) # convert object pose to another frame - - if pts is None or len(pts) == 0: - log_warn("spheres could not be fit!, adding one sphere at origin") - pts = np.zeros((1, 3)) - pts[0, :] = mesh.centroid - n_radius = [0.02] - obj_pose = Pose.from_list([0, 0, 0, 1, 0, 0, 0], tensor_args) - - points_cuda = tensor_args.to_device(pts) - pts = obj_pose.transform_points(points_cuda).cpu().view(-1, 3).numpy() - - new_spheres = [ - Sphere( - name="sph_" + str(i), - pose=[pts[i, 0], pts[i, 1], pts[i, 2], 1, 0, 0, 0], - radius=n_radius[i], - ) - for i in range(pts.shape[0]) - ] - - return new_spheres + return PointCloud(name=name, pose=pose, points=camera_obs.get_pointcloud()) @dataclass class VoxelGrid(Obstacle): + """VoxelGrid representation of an obstacle. Requires voxel to contain ESDF.""" + + #: Dimensions of voxel grid in meters [x_length, y_length, z_length]. dims: List[float] = field(default_factory=lambda: [0.0, 0.0, 0.0]) + + #: Voxel size in meters. voxel_size: float = 0.02 # meters + + #: Feature tensor of voxel grid, typically ESDF. feature_tensor: Optional[torch.Tensor] = None + + #: XYZR tensor of voxel grid. xyzr_tensor: Optional[torch.Tensor] = None + + #: Data type of feature tensor. feature_dtype: torch.dtype = torch.float32 def __post_init__(self): + """Post initialization checks.""" if self.feature_tensor is not None: self.feature_dtype = self.feature_tensor.dtype - def get_grid_shape(self): + def get_grid_shape(self) -> Tuple[List[int], List[float], List[float]]: + """Get shape of voxel grid.""" + bounds = self.dims low = [-bounds[0] / 2, -bounds[1] / 2, -bounds[2] / 2] high = [bounds[0] / 2, bounds[1] / 2, bounds[2] / 2] @@ -599,7 +733,17 @@ def get_grid_shape(self): def create_xyzr_tensor( self, transform_to_origin: bool = False, tensor_args: TensorDeviceType = TensorDeviceType() - ): + ) -> torch.Tensor: + """Create XYZR tensor of voxel grid. + + Args: + transform_to_origin: Transform points to origin. + tensor_args: Device and floating point precision to use for tensors. + + Returns: + xyzr_tensor: XYZR tensor of voxel grid with r referring to voxel size. + """ + trange, low, high = self.get_grid_shape() x = torch.linspace(low[0], high[0], trange[0], device=tensor_args.device) @@ -618,7 +762,15 @@ def create_xyzr_tensor( return xyzr - def get_occupied_voxels(self, feature_threshold: Optional[float] = None): + def get_occupied_voxels(self, feature_threshold: Optional[float] = None) -> torch.Tensor: + """Get occupied voxels from voxel grid. + + Args: + feature_threshold: esdf value threshold to consider as occupied. + + Returns: + occupied voxels as a tensor of shape [occupied_voxels]. + """ if feature_threshold is None: feature_threshold = -0.5 * self.voxel_size if self.xyzr_tensor is None or self.feature_tensor is None: @@ -628,12 +780,15 @@ def get_occupied_voxels(self, feature_threshold: Optional[float] = None): occupied = xyzr[self.feature_tensor > feature_threshold] return occupied - def clone(self): + def clone(self) -> VoxelGrid: + """Clone data of voxel grid.""" return VoxelGrid( name=self.name, pose=self.pose.copy(), dims=self.dims.copy(), - feature_tensor=self.feature_tensor.clone() if self.feature_tensor is not None else None, + feature_tensor=( + self.feature_tensor.clone() if self.feature_tensor is not None else None + ), xyzr_tensor=self.xyzr_tensor.clone() if self.xyzr_tensor is not None else None, feature_dtype=self.feature_dtype, voxel_size=self.voxel_size, @@ -662,12 +817,14 @@ class WorldConfig(Sequence): #: BloxMap obstacle. blox: Optional[List[BloxMap]] = None + #: List of ESDF voxel grid obstacles. voxel: Optional[List[VoxelGrid]] = None #: List of all obstacles in world. objects: Optional[List[Obstacle]] = None def __post_init__(self): + """Post initialization checks, also creates a list of all obstacles.""" # create objects list: if self.sphere is None: self.sphere = [] @@ -694,13 +851,16 @@ def __post_init__(self): + self.voxel ) - def __len__(self): + def __len__(self) -> int: + """Get number of obstacles.""" return len(self.objects) - def __getitem__(self, idx): + def __getitem__(self, idx: int) -> Obstacle: + """Get obstacle at index.""" return self.objects[idx] - def clone(self): + def clone(self) -> WorldConfig: + """Clone world configuration.""" return WorldConfig( cuboid=self.cuboid.copy() if self.cuboid is not None else None, sphere=self.sphere.copy() if self.sphere is not None else None, @@ -713,6 +873,14 @@ def clone(self): @staticmethod def from_dict(data_dict: Dict[str, Any]) -> WorldConfig: + """Load world configuration from dictionary. + + Args: + data_dict: World configuration dictionary. + + Returns: + Instance of WorldConfig. + """ cuboid = None sphere = None capsule = None @@ -748,7 +916,8 @@ def from_dict(data_dict: Dict[str, Any]) -> WorldConfig: # load world config as obbs: convert all types to obbs @staticmethod - def create_obb_world(current_world: WorldConfig): + def create_obb_world(current_world: WorldConfig) -> WorldConfig: + """Approximate all obstcales to oriented bounding boxes.""" sphere_obb = [] capsule_obb = [] cylinder_obb = [] @@ -778,7 +947,16 @@ def create_obb_world(current_world: WorldConfig): ) @staticmethod - def create_mesh_world(current_world: WorldConfig, process: bool = False): + def create_mesh_world(current_world: WorldConfig, process: bool = False) -> WorldConfig: + """Convert all obstacles to meshes. Does not convert :class:`VoxelGrid`, :class:`BloxMap`. + + Args: + current_world: Current world configuration. + process: process flag passed to :class:`trimesh.load`. + + Returns: + WorldConfig: World configuration with all obstacles converted to meshes. + """ sphere_obb = [] capsule_obb = [] cuboid_obb = [] @@ -812,7 +990,19 @@ def create_mesh_world(current_world: WorldConfig, process: bool = False): ) @staticmethod - def create_collision_support_world(current_world: WorldConfig, process: bool = True): + def create_collision_support_world( + current_world: WorldConfig, process: bool = True + ) -> WorldConfig: + """Converts all obstacles to only supported collision types. + + Args: + current_world: Current world configuration. + process: process flag passed to :class:`trimesh.load`. + + Returns: + WorldConfig: World configuration with all obstacles converted to supported collision + types. + """ sphere_obb = [] capsule_obb = [] cuboid_obb = [] @@ -841,7 +1031,18 @@ def create_collision_support_world(current_world: WorldConfig, process: bool = T ) @staticmethod - def get_scene_graph(current_world: WorldConfig, process_color: bool = True): + def get_scene_graph( + current_world: WorldConfig, process_color: bool = True + ) -> trimesh.scene.scene.Scene: + """Get trimesh scene graph of world. + + Args: + current_world: Current world configuration. + process_color: Load color of meshes. + + Returns: + trimesh.scene.scene.Scene: Scene graph of world. + """ m_world = WorldConfig.create_mesh_world(current_world) mesh_scene = trimesh.scene.scene.Scene(base_frame="world_origin") mesh_list = m_world @@ -858,7 +1059,17 @@ def get_scene_graph(current_world: WorldConfig, process_color: bool = True): @staticmethod def create_merged_mesh_world( current_world: WorldConfig, process: bool = True, process_color: bool = True - ): + ) -> WorldConfig: + """Merge all obstacles in the world to a single mesh. + + Args: + current_world: Current world configuration. + process: process flag passed to :class:`trimesh.load`. + process_color: Load color of meshes. + + Returns: + WorldConfig: World configuration with a single merged mesh as obstacle. + """ mesh_scene = WorldConfig.get_scene_graph(current_world, process_color=process_color) mesh_scene = mesh_scene.dump(concatenate=True) new_mesh = Mesh( @@ -869,21 +1080,31 @@ def create_merged_mesh_world( ) return WorldConfig(mesh=[new_mesh]) - def get_obb_world(self): + def get_obb_world(self) -> WorldConfig: + """Get world with all obstacles as oriented bounding boxes.""" return WorldConfig.create_obb_world(self) - def get_mesh_world(self, merge_meshes: bool = False, process: bool = False): + def get_mesh_world(self, merge_meshes: bool = False, process: bool = False) -> WorldConfig: + """Get world with all obstacles as meshes.""" if merge_meshes: return WorldConfig.create_merged_mesh_world(self, process=process) else: return WorldConfig.create_mesh_world(self, process=process) - def get_collision_check_world(self, mesh_process: bool = False): + def get_collision_check_world(self, mesh_process: bool = False) -> WorldConfig: + """Get world with all obstacles converted to supported collision types.""" return WorldConfig.create_collision_support_world(self, process=mesh_process) def save_world_as_mesh( self, file_path: str, save_as_scene_graph=False, process_color: bool = True ): + """Save world as a mesh file. + + Args: + file_path: Path to save mesh file. + save_as_scene_graph: Save as scene graph. + process_color: Load color of meshes. + """ mesh_scene = WorldConfig.get_scene_graph(self, process_color=process_color) if save_as_scene_graph: mesh_scene = mesh_scene.dump(concatenate=True) @@ -891,15 +1112,16 @@ def save_world_as_mesh( mesh_scene.export(file_path) def get_cache_dict(self) -> Dict[str, int]: - """Computes the number of obstacles in each type - - Returns: - _description_ - """ + """Computes the number of obstacles in each type.""" cache = {"obb": len(self.cuboid), "mesh": len(self.mesh)} return cache def add_obstacle(self, obstacle: Obstacle): + """Add obstacle to world. + + Args: + obstacle: Obstacle to add to world. + """ if isinstance(obstacle, Mesh): self.mesh.append(obstacle) elif isinstance(obstacle, Cuboid): @@ -920,12 +1142,9 @@ def randomize_color(self, r=[0, 1], g=[0, 1], b=[0, 1]): """Randomize color of objects within the given range Args: - r: _description_. Defaults to [0,1]. - g: _description_. Defaults to [0,1]. - b: _description_. Defaults to [0,1]. - - Returns: - _description_ + r: range of red color. + g: range of green color. + b: range of blue color. """ n = len(self.objects) r_l = np.random.uniform(r[0], r[1], n) @@ -935,32 +1154,72 @@ def randomize_color(self, r=[0, 1], g=[0, 1], b=[0, 1]): i_val.color = [r_l[i], g_l[i], b_l[i], 1.0] def add_color(self, rgba=[0.0, 0.0, 0.0, 1.0]): + """Update color of obstacles. + + Args: + rgba: red, green, blue, alpha values. + """ for i, i_val in enumerate(self.objects): i_val.color = rgba def add_material(self, material=Material()): + """Add material to all obstacles. + + Args: + material: material to add to obstacles. + """ for i, i_val in enumerate(self.objects): i_val.material = material def get_obstacle(self, name: str) -> Union[None, Obstacle]: + """Get obstacle by name. + + Args: + name: Name of obstacle. + + Returns: + Obstacle with given name. If not found, returns None. + """ for i in self.objects: if i.name == name: return i return None def remove_obstacle(self, name: str): + """Remove obstacle by name. + + Args: + name: Name of obstacle to remove. + """ for i in range(len(self.objects)): if self.objects[i].name == name: del self.objects[i] return - def remove_absolute_paths(self) -> WorldConfig: + def remove_absolute_paths(self): + """Remove absolute paths from file paths in obstacles. May not work on Windows.""" for obj in self.objects: if obj.name.startswith("/"): obj.name = obj.name[1:] -def tensor_sphere(pt, radius, tensor=None, tensor_args=TensorDeviceType()): +def tensor_sphere( + pt: Union[List[float], np.array, torch.Tensor], + radius: float, + tensor: Optional[torch.Tensor] = None, + tensor_args: TensorDeviceType = TensorDeviceType(), +) -> torch.Tensor: + """Tensor representation of a sphere. + + Args: + pt: Input point. + radius: Radius of sphere. + tensor: Tensor to update. If None, creates a new tensor. + tensor_args: Device and floating point precision to use for tensors. + + Returns: + tensor: Tensor representation of sphere. + """ if tensor is None: tensor = torch.empty(4, device=tensor_args.device, dtype=tensor_args.dtype) tensor[:3] = torch.as_tensor(pt, device=tensor_args.device, dtype=tensor_args.dtype) @@ -968,7 +1227,26 @@ def tensor_sphere(pt, radius, tensor=None, tensor_args=TensorDeviceType()): return tensor -def tensor_capsule(base, tip, radius, tensor=None, tensor_args=TensorDeviceType()): +def tensor_capsule( + base: Union[List[float], torch.Tensor, np.array], + tip: Union[List[float], torch.Tensor, np.array], + radius: float, + tensor: Optional[torch.Tensor] = None, + tensor_args: TensorDeviceType = TensorDeviceType(), +) -> torch.Tensor: + """Tensor representation of a capsule. + + Args: + base: Base of capsule. + tip: Tip of capsule. + radius: radius of capsule. + tensor: Tensor to update. If None, creates a new tensor. + tensor_args: Device and floating point precision to use for tensors. + + Returns: + torch.Tensor: Tensor representation of capsule. + """ + if tensor is None: tensor = torch.empty(7, device=tensor_args.device, dtype=tensor_args.dtype) tensor[:3] = torch.as_tensor(base, device=tensor_args.device, dtype=tensor_args.dtype) @@ -977,16 +1255,19 @@ def tensor_capsule(base, tip, radius, tensor=None, tensor_args=TensorDeviceType( return tensor -def tensor_cube(pose, dims, tensor_args=TensorDeviceType()): - """ +def tensor_cube( + pose: List[float], dims: List[float], tensor_args: TensorDeviceType = TensorDeviceType() +) -> List[torch.Tensor, torch.Tensor]: + """Tensor representation of a cube. Args: - pose (_type_): x,y,z, qw,qx,qy,qz - dims (_type_): _description_ - tensor_args (_type_, optional): _description_. Defaults to TensorDeviceType(). + pose: x,y,z, qw, qx, qy, qz. + dims: length, width, height in meters. Frame is at the center of the cube. + tensor_args: Device and floating point precision to use for tensors. Returns: - _type_: _description_ + List[torch.Tensor, torch.Tensor]: Tensor representation of cube, first tensor is + dimensions and second tensor is inverse of pose. """ w_T_b = Pose.from_list(pose, tensor_args=tensor_args) b_T_w = w_T_b.inverse() @@ -997,16 +1278,20 @@ def tensor_cube(pose, dims, tensor_args=TensorDeviceType()): return cube -def batch_tensor_cube(pose, dims, tensor_args=TensorDeviceType()): - """ - +def batch_tensor_cube( + pose: List[List[float]], + dims: List[List[float]], + tensor_args: TensorDeviceType = TensorDeviceType(), +) -> List[torch.Tensor]: + """Tensor representation of a batch of cubes Args: - pose (_type_): x,y,z, qw,qx,qy,qz - dims (_type_): _description_ - tensor_args (_type_, optional): _description_. Defaults to TensorDeviceType(). + pose : Poses of the cubes in x,y,z, qw,qx,qy,qz. + dims : Dimensions of the cubes. Frame is at the center of the cube. + tensor_args: Device and floating point precision to use for tensors. Returns: - _type_: _description_ + List[torch.Tensor]: Tensor representation of cubes, first tensor is dimensions and + second tensor is inverse of poses. """ w_T_b = Pose.from_batch_list(pose, tensor_args=tensor_args) b_T_w = w_T_b.inverse() diff --git a/src/curobo/rollout/cost/dist_cost.py b/src/curobo/rollout/cost/dist_cost.py index 983a96a7..ce1b9bb3 100644 --- a/src/curobo/rollout/cost/dist_cost.py +++ b/src/curobo/rollout/cost/dist_cost.py @@ -408,7 +408,9 @@ def forward(self, disp_vec, RETURN_GOAL_DIST=False): if self.terminal and self.run_weight is not None: if self._run_weight_vec is None or self._run_weight_vec.shape[1] != cost.shape[1]: self._run_weight_vec = torch.ones( - (1, cost.shape[1]), device=self.tensor_args.device, dtype=self.tensor_args.dtype + (1, cost.shape[1]), + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, ) self._run_weight_vec[:, :-1] *= self.run_weight if RETURN_GOAL_DIST: @@ -430,7 +432,9 @@ def forward_target(self, goal_vec, current_vec, RETURN_GOAL_DIST=False): if self.terminal and self.run_weight is not None: if self._run_weight_vec is None or self._run_weight_vec.shape[1] != cost.shape[1]: self._run_weight_vec = torch.ones( - (1, cost.shape[1]), device=self.tensor_args.device, dtype=self.tensor_args.dtype + (1, cost.shape[1]), + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, ) self._run_weight_vec[:, :-1] *= self.run_weight cost = self._run_weight_vec * dist diff --git a/src/curobo/types/camera.py b/src/curobo/types/camera.py index 05c5ea26..c2387229 100644 --- a/src/curobo/types/camera.py +++ b/src/curobo/types/camera.py @@ -112,13 +112,17 @@ def to(self, device: torch.device): return self - def get_pointcloud(self): + def get_pointcloud(self, project_to_pose: bool = False): if self.projection_rays is None: self.update_projection_rays() depth_image = self.depth_image if len(self.depth_image.shape) == 2: depth_image = self.depth_image.unsqueeze(0) point_cloud = project_depth_using_rays(depth_image, self.projection_rays) + + if project_to_pose and self.pose is not None: + point_cloud = self.pose.batch_transform(point_cloud) + return point_cloud def get_image_from_pointcloud(self, pointcloud, output_image: Optional[torch.Tensor] = None): diff --git a/src/curobo/util/trajectory.py b/src/curobo/util/trajectory.py index 8c8d479e..2fe61aa7 100644 --- a/src/curobo/util/trajectory.py +++ b/src/curobo/util/trajectory.py @@ -143,6 +143,13 @@ def get_batch_interpolated_trajectory( # given the dt required to run trajectory at maximum velocity, # we find the number of timesteps required: if optimize_dt: + if max_vel is None: + log_error("Max velocity not provided") + if max_acc is None: + log_error("Max acceleration not provided") + if max_jerk is None: + log_error("Max jerk not provided") + if max_vel is not None and max_acc is not None and max_jerk is not None: traj_vel = raw_traj.velocity traj_acc = raw_traj.acceleration traj_jerk = raw_traj.jerk @@ -168,7 +175,8 @@ def get_batch_interpolated_trajectory( ) else: traj_steps, steps_max = calculate_traj_steps(raw_dt, interpolation_dt, horizon) - opt_dt = raw_dt + opt_dt = torch.zeros(b, device=tensor_args.device) + opt_dt[:] = raw_dt # traj_steps contains the tsteps for each trajectory if steps_max <= 0: log_error("Steps max is less than 0") diff --git a/src/curobo/util/warp.py b/src/curobo/util/warp.py index 2167ea46..cd95ff5e 100644 --- a/src/curobo/util/warp.py +++ b/src/curobo/util/warp.py @@ -26,7 +26,10 @@ def init_warp(quiet=True, tensor_args: TensorDeviceType = TensorDeviceType()): # wp.config.print_launches = True # wp.config.verbose = True # wp.config.mode = "debug" + # wp.config.verify_cuda = True # wp.config.enable_backward = True + # wp.config.verify_autograd_array_access = True + # wp.config.cache_kernels = False wp.init() # wp.force_load(wp.device_from_torch(tensor_args.device)) diff --git a/src/curobo/util_file.py b/src/curobo/util_file.py index dfda7a34..d366f31b 100644 --- a/src/curobo/util_file.py +++ b/src/curobo/util_file.py @@ -8,11 +8,12 @@ # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. # +"""Contains helper functions for interacting with file systems.""" # Standard Library import os import shutil import sys -from typing import Dict, List, Union +from typing import Any, Dict, List, Union # Third Party import yaml @@ -23,41 +24,70 @@ # get paths -def get_module_path(): +def get_module_path() -> str: + """Get absolute path of cuRobo library.""" path = os.path.dirname(__file__) return path -def get_root_path(): +def get_root_path() -> str: + """Get absolute path of cuRobo library.""" path = os.path.dirname(get_module_path()) return path -def get_content_path(): +def get_content_path() -> str: + """Get path to content directory in cuRobo. + + Content directory contains configuration parameters for different tasks, some robot + parameters for using in examples, and some world assets. Use + :class:`~curobo.util.file_path.ContentPath` when running cuRobo with assets from a different + location. + + Returns: + str: path to content directory. + """ root_path = get_module_path() path = os.path.join(root_path, "content") return path -def get_configs_path(): +def get_configs_path() -> str: + """Get path to configuration parameters for different tasks(e.g., IK, TrajOpt, MPC) in cuRobo. + + Returns: + str: path to configuration directory. + """ content_path = get_content_path() path = os.path.join(content_path, "configs") return path -def get_assets_path(): +def get_assets_path() -> str: + """Get path to assets (robot urdf, meshes, world meshes) directory in cuRobo.""" + content_path = get_content_path() path = os.path.join(content_path, "assets") return path def get_weights_path(): + """Get path to neural network weights directory in cuRobo. Currently not used in cuRobo.""" content_path = get_content_path() path = os.path.join(content_path, "weights") return path -def join_path(path1, path2): +def join_path(path1: str, path2: str) -> str: + """Join two paths, considering OS specific path separators. + + Args: + path1: Path prefix. + path2: Path suffix. If path2 is an absolute path, path1 is ignored. + + Returns: + str: Joined path. + """ if path1[-1] == os.sep: log_warn("path1 has trailing slash, removing it") if isinstance(path2, str): @@ -66,7 +96,15 @@ def join_path(path1, path2): return path2 -def load_yaml(file_path): +def load_yaml(file_path: Union[str, Dict]) -> Dict: + """Load yaml file and return as dictionary. If file_path is a dictionary, return as is. + + Args: + file_path: File path to yaml file or dictionary. + + Returns: + Dict: Dictionary containing yaml file content. + """ if isinstance(file_path, str): with open(file_path) as file_p: yaml_params = yaml.load(file_p, Loader=Loader) @@ -75,47 +113,109 @@ def load_yaml(file_path): return yaml_params -def write_yaml(data: Dict, file_path): +def write_yaml(data: Dict, file_path: str): + """Write dictionary to yaml file. + + Args: + data: Dictionary to write to yaml file. + file_path: Path to write the yaml file. + """ with open(file_path, "w") as file: yaml.dump(data, file) -def get_robot_path(): +def get_robot_path() -> str: + """Get path to robot directory in cuRobo. + + Deprecated: Use :func:`~curobo.util_file.get_robot_configs_path` instead. + Robot directory contains robot configuration files in yaml format. See + :ref:`tut_robot_configuration` for how to create a robot configuration file. + + Returns: + str: path to robot directory. + """ config_path = get_configs_path() path = os.path.join(config_path, "robot") return path -def get_task_configs_path(): +def get_task_configs_path() -> str: + """Get path to task configuration directory in cuRobo. + + Task directory contains configuration parameters for different tasks (e.g., IK, TrajOpt, MPC). + + Returns: + str: path to task configuration directory. + """ config_path = get_configs_path() path = os.path.join(config_path, "task") return path -def get_robot_configs_path(): +def get_robot_configs_path() -> str: + """Get path to robot configuration directory in cuRobo. + + Robot configuration directory contains robot configuration files in yaml format. See + :ref:`tut_robot_configuration` for how to create a robot configuration file. + + Returns: + str: path to robot configuration directory. + """ config_path = get_configs_path() path = os.path.join(config_path, "robot") return path -def get_world_configs_path(): +def get_world_configs_path() -> str: + """Get path to world configuration directory in cuRobo. + + World configuration directory contains world configuration files in yaml format. World + information includes obstacles represented with respect to the robot base frame. + + Returns: + str: path to world configuration directory. + """ config_path = get_configs_path() path = os.path.join(config_path, "world") return path -def get_debug_path(): +def get_debug_path() -> str: + """Get path to debug directory in cuRobo. + + Debug directory can be used to store logs and debug information. + + Returns: + str: path to debug directory. + """ + asset_path = get_assets_path() path = join_path(asset_path, "debug") return path def get_cpp_path(): + """Get path to cpp directory in cuRobo. + + Directory contains CUDA implementations (kernels) of robotics algorithms, which are wrapped + in C++ and compiled with PyTorch to enable usage in Python. + + Returns: + str: path to cpp directory. + """ path = os.path.dirname(__file__) return os.path.join(path, "curobolib/cpp") -def add_cpp_path(sources): +def add_cpp_path(sources: List[str]) -> List[str]: + """Add cpp path to list of source files. + + Args: + sources: List of source files. + + Returns: + List[str]: List of source files with cpp path added. + """ cpp_path = get_cpp_path() new_list = [] for s in sources: @@ -124,8 +224,16 @@ def add_cpp_path(sources): return new_list -def copy_file_to_path(source_file, destination_path): - # +def copy_file_to_path(source_file: str, destination_path: str) -> str: + """Copy file from source to destination. + + Args: + source_file: Path of source file. + destination_path: Path of destination directory. + + Returns: + str: Destination path of copied file. + """ isExist = os.path.exists(destination_path) if not isExist: os.makedirs(destination_path) @@ -137,19 +245,47 @@ def copy_file_to_path(source_file, destination_path): return new_path -def get_filename(file_path, remove_extension: bool = False): +def get_filename(file_path: str, remove_extension: bool = False) -> str: + """Get file name from file path, removing extension if required. + + Args: + file_path: Path of file. + remove_extension: If True, remove file extension. + + Returns: + str: File name. + """ + _, file_name = os.path.split(file_path) if remove_extension: file_name = os.path.splitext(file_name)[0] return file_name -def get_path_of_dir(file_path): +def get_path_of_dir(file_path: str) -> str: + """Get path of directory containing the file. + + Args: + file_path: Path of file. + + Returns: + str: Path of directory containing the file. + """ dir_path, _ = os.path.split(file_path) return dir_path -def get_files_from_dir(dir_path, extension: List[str], contains: str): +def get_files_from_dir(dir_path, extension: List[str], contains: str) -> List[str]: + """Get list of files from directory with specified extension and containing a string. + + Args: + dir_path: Path of directory. + extension: List of file extensions to filter. + contains: String to filter file names. + + Returns: + List[str]: List of file names. Does not include path. + """ file_names = [ fn for fn in os.listdir(dir_path) @@ -159,7 +295,15 @@ def get_files_from_dir(dir_path, extension: List[str], contains: str): return file_names -def file_exists(path): +def file_exists(path: str) -> bool: + """Check if file exists. + + Args: + path: Path of file. + + Returns: + bool: True if file exists, False otherwise. + """ if path is None: return False isExist = os.path.exists(path) @@ -167,11 +311,7 @@ def file_exists(path): def get_motion_gen_robot_list() -> List[str]: - """returns list of robots available in curobo for motion generation - - Returns: - _description_ - """ + """Get list of robot configuration examples in cuRobo for motion generation.""" robot_list = [ "franka.yml", "ur5e.yml", @@ -187,10 +327,12 @@ def get_motion_gen_robot_list() -> List[str]: def get_robot_list() -> List[str]: + """Get list of robots example configurations in cuRobo.""" return get_motion_gen_robot_list() def get_multi_arm_robot_list() -> List[str]: + """Get list of multi-arm robot configuration examples in cuRobo.""" robot_list = [ "dual_ur10e.yml", "tri_ur10e.yml", @@ -199,23 +341,43 @@ def get_multi_arm_robot_list() -> List[str]: return robot_list -def merge_dict_a_into_b(a, b): +def merge_dict_a_into_b(a: Dict[str, Any], b: Dict[str, Any]) -> Dict[str, Any]: + """Merge dictionary values in "a" into dictionary "b". Overwrite values in "b" if key exists. + + Args: + a: New dictionary to merge. + b: Base dictionary to merge into. + + Returns: + Merged dictionary. + """ for k, v in a.items(): if isinstance(v, dict): merge_dict_a_into_b(v, b[k]) else: b[k] = v + return b -def is_platform_windows(): +def is_platform_windows() -> bool: + """Check if platform is Windows.""" return sys.platform == "win32" -def is_platform_linux(): +def is_platform_linux() -> bool: + """Check if platform is Linux.""" return sys.platform == "linux" def is_file_xrdf(file_path: str) -> bool: + """Check if file is an `XRDF `_ file. + + Args: + file_path: Path of file. + + Returns: + bool: True if file is xrdf, False otherwise. + """ if file_path.endswith(".xrdf") or file_path.endswith(".XRDF"): return True return False diff --git a/src/curobo/wrap/model/robot_world.py b/src/curobo/wrap/model/robot_world.py index 6e0afe20..bfe50794 100644 --- a/src/curobo/wrap/model/robot_world.py +++ b/src/curobo/wrap/model/robot_world.py @@ -35,11 +35,7 @@ from curobo.types.state import JointState from curobo.util.logger import log_error from curobo.util.sample_lib import HaltonGenerator -from curobo.util.torch_utils import ( - get_torch_compile_options, - get_torch_jit_decorator, - is_torch_compile_available, -) +from curobo.util.torch_utils import get_torch_jit_decorator from curobo.util.warp import init_warp from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml diff --git a/src/curobo/wrap/reacher/motion_gen.py b/src/curobo/wrap/reacher/motion_gen.py index dae67a95..be15672f 100644 --- a/src/curobo/wrap/reacher/motion_gen.py +++ b/src/curobo/wrap/reacher/motion_gen.py @@ -1792,7 +1792,7 @@ def update_world(self, world: WorldConfig): world: New world configuration for collision checking. """ self.world_coll_checker.load_collision_model(world, fix_cache_reference=self.use_cuda_graph) - self.graph_planner.reset_graph() + self.graph_planner.reset_buffer() def clear_world_cache(self): """Remove all collision objects from collision cache.""" @@ -2214,7 +2214,6 @@ def update_pose_cost_metric( for rollout in rollouts if isinstance(rollout, ArmReacher) ] - torch.cuda.synchronize(device=self.tensor_args.device) return True def get_all_rollout_instances(self) -> List[RolloutBase]: @@ -3471,21 +3470,22 @@ def _plan_from_solve_state( opt_dt = traj_result.optimized_dt if plan_config.parallel_finetune: - opt_dt = torch.min(opt_dt[traj_result.success]) seed_override = solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds + if self.optimize_dt: + opt_dt = torch.min(opt_dt[traj_result.success]) finetune_time = 0 newton_iters = None for k in range(plan_config.finetune_attempts): - - scaled_dt = torch.clamp( - opt_dt - * plan_config.finetune_dt_scale - * (plan_config.finetune_dt_decay ** (k)), - self.trajopt_solver.minimum_trajectory_dt, - ) if self.optimize_dt: + + scaled_dt = torch.clamp( + opt_dt + * plan_config.finetune_dt_scale + * (plan_config.finetune_dt_decay ** (k)), + self.trajopt_solver.minimum_trajectory_dt, + ) self.finetune_trajopt_solver.update_solver_dt(scaled_dt.item()) traj_result = self._solve_trajopt_from_solve_state( diff --git a/src/curobo/wrap/reacher/mpc.py b/src/curobo/wrap/reacher/mpc.py index 74002dea..fba5ea2e 100644 --- a/src/curobo/wrap/reacher/mpc.py +++ b/src/curobo/wrap/reacher/mpc.py @@ -51,8 +51,11 @@ from curobo.opt.particle.parallel_es import ParallelES, ParallelESConfig from curobo.opt.particle.parallel_mppi import ParallelMPPI, ParallelMPPIConfig from curobo.rollout.arm_reacher import ArmReacher, ArmReacherConfig +from curobo.rollout.cost.pose_cost import PoseCostMetric +from curobo.rollout.dynamics_model.kinematic_model import KinematicModelState from curobo.rollout.rollout_base import Goal from curobo.types.base import TensorDeviceType +from curobo.types.math import Pose from curobo.types.robot import JointState, RobotConfig from curobo.util.logger import log_error, log_info, log_warn from curobo.util_file import ( @@ -113,6 +116,7 @@ def load_from_robot_config( use_mppi: bool = True, particle_file: str = "particle_mpc.yml", override_particle_file: str = None, + project_pose_to_goal_frame: bool = True, ): """Create an MPC solver configuration from robot and world configuration. @@ -160,6 +164,9 @@ def load_from_robot_config( particle_file: Particle based MPC config file. override_particle_file: Optional config file for overriding the parameters in the particle based MPC config file. + project_pose_to_goal_frame: Project pose to goal frame when calculating distance + between reached and goal pose. Use this to constrain motion to specific axes + either in the global frame or the goal frame. Returns: MpcSolverConfig: Configuration for the MPC solver. @@ -184,6 +191,9 @@ def load_from_robot_config( if n_collision_envs is not None: base_cfg["world_collision_checker_cfg"]["n_envs"] = n_collision_envs + base_cfg["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame + base_cfg["convergence"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame + config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame if collision_activation_distance is not None: config_data["cost"]["primitive_collision_cfg"][ "activation_distance" @@ -217,6 +227,7 @@ def load_from_robot_config( config_data["model"] = grad_config_data["model"] if use_cuda_graph is not None: grad_config_data["lbfgs"]["use_cuda_graph"] = use_cuda_graph + grad_config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame cfg = ArmReacherConfig.from_dict( robot_cfg, @@ -604,6 +615,99 @@ def get_visual_rollouts(self): """Get rollouts for debugging.""" return self.solver.optimizers[0].get_rollouts() + def update_pose_cost_metric( + self, + metric: PoseCostMetric, + start_state: Optional[JointState] = None, + goal_pose: Optional[Pose] = None, + check_validity: bool = True, + ) -> bool: + """Update the pose cost metric. + + Only supports for the main end-effector. Does not support for multiple links that are + specified with `link_poses` in planning methods. + + Args: + metric: Type and parameters for pose constraint to add. + start_state: Start joint state for the constraint. + goal_pose: Goal pose for the constraint. + + Returns: + bool: True if the constraint can be added, False otherwise. + """ + if check_validity: + # check if constraint is valid: + if metric.hold_partial_pose and metric.offset_tstep_fraction < 0.0: + if start_state is None: + log_error("Need start state to hold partial pose") + if goal_pose is None: + log_error("Need goal pose to hold partial pose") + start_pose = self.compute_kinematics(start_state).ee_pose.clone() + if self.project_pose_to_goal_frame: + # project start pose to goal frame: + projected_pose = goal_pose.compute_local_pose(start_pose) + if torch.count_nonzero(metric.hold_vec_weight[:3] > 0.0) > 0: + # angular distance should be zero: + distance = projected_pose.angular_distance( + Pose.from_list([0, 0, 0, 1, 0, 0, 0], tensor_args=self.tensor_args) + ) + if torch.max(distance) > 0.05: + log_warn( + "Partial orientation between start and goal is not equal" + + str(distance) + ) + return False + + # check linear distance: + if ( + torch.count_nonzero( + torch.abs( + projected_pose.position[..., metric.hold_vec_weight[3:] > 0.0] + ) + > 0.005 + ) + > 0 + ): + log_warn("Partial position between start and goal is not equal.") + return False + else: + # project start pose to goal frame: + projected_position = goal_pose.position - start_pose.position + # check linear distance: + if ( + torch.count_nonzero( + torch.abs(projected_position[..., metric.hold_vec_weight[3:] > 0.0]) + > 0.005 + ) + > 0 + ): + log_warn("Partial position between start and goal is not equal.") + return False + + rollout_list = [] + for opt in self.solver.optimizers: + rollout_list.append(opt.rollout_fn) + rollout_list += [self.solver.safety_rollout, self.rollout_fn] + + [ + rollout.update_pose_cost_metric(metric) + for rollout in rollout_list + if isinstance(rollout, ArmReacher) + ] + return True + + def compute_kinematics(self, state: JointState) -> KinematicModelState: + """Compute kinematics for a given joint state. + + Args: + state: Joint state of the robot. Only :attr:`JointState.position` is used. + + Returns: + KinematicModelState: Kinematic state of the robot. + """ + out = self.rollout_fn.compute_kinematics(state) + return out + @property def joint_names(self): """Get the ordered joint names of the robot.""" @@ -624,6 +728,11 @@ def world_collision(self) -> WorldCollision: """Get the world collision checker.""" return self.world_coll_checker + @property + def project_pose_to_goal_frame(self) -> bool: + """Check if the pose cost metric is projected to goal frame.""" + return self.rollout_fn.goal_cost.project_distance + def _step_once( self, current_state: JointState, diff --git a/tests/curobo_robot_world_model_test.py b/tests/curobo_robot_world_model_test.py index 4991e7db..28517a33 100644 --- a/tests/curobo_robot_world_model_test.py +++ b/tests/curobo_robot_world_model_test.py @@ -135,8 +135,9 @@ def test_cu_robot_batch_world_collision(): assert d_world.shape[0] == b assert torch.sum(d_world) == 0.0 + def test_cu_robot_get_link_transform(): model = load_robot_world() world_T_panda_hand = model.kinematics.get_link_transform("panda_hand") # It seems the panda hand is initialized at the origin. - assert torch.sum(world_T_panda_hand.position) == 0.0 \ No newline at end of file + assert torch.sum(world_T_panda_hand.position) == 0.0 diff --git a/tests/mpc_test.py b/tests/mpc_test.py index 0867dcf6..86af2e85 100644 --- a/tests/mpc_test.py +++ b/tests/mpc_test.py @@ -21,7 +21,7 @@ from curobo.types.math import Pose from curobo.types.robot import JointState, RobotConfig from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml -from curobo.wrap.reacher.mpc import MpcSolver, MpcSolverConfig +from curobo.wrap.reacher.mpc import MpcSolver, MpcSolverConfig, PoseCostMetric @pytest.fixture(scope="module") @@ -233,3 +233,65 @@ def test_mpc_batch_env(mpc_batch_env): if tstep > 1000: break assert converged + + +@pytest.mark.parametrize( + "mpc_str, expected", + [ + ("mpc_single_env", True), + # ("mpc_single_env_lbfgs", True), unstable + ], +) +def test_mpc_single_pose_metric(mpc_str, expected, request): + mpc_val = request.getfixturevalue(mpc_str) + mpc = mpc_val[0] + retract_cfg = mpc_val[1] + start_state = retract_cfg + state = mpc.rollout_fn.compute_kinematics(JointState.from_position(retract_cfg)) + + state = mpc.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1))) + + goal_pose = state.ee_pose.clone() + + start_state = JointState.from_position( + retract_cfg.view(1, -1) + 0.3, joint_names=mpc.joint_names + ) + + start_pose = mpc.compute_kinematics(start_state).ee_pose.clone() + goal_pose.position = start_pose.position.clone() + goal_pose.quaternion = start_pose.quaternion.clone() + + if mpc.project_pose_to_goal_frame: + offset_pose = Pose.from_list([0, 0.1, 0, 1, 0, 0, 0]) + goal_pose = goal_pose.multiply(offset_pose) + else: + goal_pose.position[0, 1] += 0.2 + + goal = Goal( + current_state=JointState.from_position(retract_cfg + 0.5), + goal_state=JointState.from_position(retract_cfg), + goal_pose=goal_pose, + ) + goal_buffer = mpc.setup_solve_single(goal, 1) + + converged = False + tstep = 0 + mpc.update_goal(goal_buffer) + current_state = start_state.clone() + + pose_cost_metric = PoseCostMetric( + hold_partial_pose=True, + hold_vec_weight=mpc.tensor_args.to_device([1, 1, 1, 1, 0, 1]), + ) + mpc.update_pose_cost_metric(pose_cost_metric, start_state, goal_pose) + while not converged: + result = mpc.step(current_state, max_attempts=1) + torch.cuda.synchronize() + current_state.copy_(result.action) + tstep += 1 + if result.metrics.pose_error.item() < 0.05: + converged = True + break + if tstep > 200: + break + assert converged == expected diff --git a/tests/world_config_test.py b/tests/world_config_test.py index 4bf6216b..bbab847a 100644 --- a/tests/world_config_test.py +++ b/tests/world_config_test.py @@ -34,9 +34,6 @@ def test_world_modify(): color=[0.8, 0.0, 0.0, 1.0], ) - # describe a mesh obstacle - # import a mesh file: - mesh_file = join_path(get_assets_path(), "scene/nvblox/srl_ur10_bins.obj") obstacle_2 = Mesh( @@ -170,3 +167,70 @@ def test_batch_collision(): ) assert d[0] == 0.2 and d[1] == 0.0 + + +def test_world_modify_cpu(): + tensor_args = TensorDeviceType() + obstacle_1 = Cuboid( + name="cube_1", + pose=[0.0, 0.0, 0.0, 0.043, -0.471, 0.284, 0.834], + dims=[0.2, 1.0, 0.2], + color=[0.8, 0.0, 0.0, 1.0], + ) + + mesh_file = join_path(get_assets_path(), "scene/nvblox/srl_ur10_bins.obj") + + obstacle_2 = Mesh( + name="mesh_1", + pose=[0.0, 2, 0.5, 0.043, -0.471, 0.284, 0.834], + file_path=mesh_file, + scale=[0.5, 0.5, 0.5], + ) + + obstacle_3 = Capsule( + name="capsule", + radius=0.2, + base=[0, 0, 0], + tip=[0, 0, 0.5], + pose=[0.0, 5, 0.0, 0.043, -0.471, 0.284, 0.834], + # pose=[0.0, 5, 0.0, 1,0,0,0], + color=[0, 1.0, 0, 1.0], + ) + + obstacle_4 = Cylinder( + name="cylinder_1", + radius=0.2, + height=0.5, + pose=[0.0, 6, 0.0, 0.043, -0.471, 0.284, 0.834], + color=[0, 1.0, 0, 1.0], + ) + + obstacle_5 = Sphere( + name="sphere_1", + radius=0.2, + pose=[0.0, 7, 0.0, 0.043, -0.471, 0.284, 0.834], + color=[0, 1.0, 0, 1.0], + ) + + world_model = WorldConfig( + mesh=[obstacle_2], + cuboid=[obstacle_1], + capsule=[obstacle_3], + cylinder=[obstacle_4], + sphere=[obstacle_5], + ) + world_model.randomize_color(r=[0.2, 0.7], g=[0.4, 0.8], b=[0.0, 0.4]) + + collision_support_world = WorldConfig.create_collision_support_world(world_model) + + world_collision_config = WorldCollisionConfig(tensor_args, world_model=collision_support_world) + world_ccheck = WorldMeshCollision(world_collision_config) + + world_ccheck.enable_obstacle("sphere_1", False) + + w_pose = Pose.from_list([0, 0, 1, 1, 0, 0, 0], tensor_args) + world_ccheck.update_obstacle_pose( + name="cylinder_1", w_obj_pose=w_pose, update_cpu_reference=True + ) + + assert world_ccheck.world_model.get_obstacle("cylinder_1").pose[2] == 1