Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update pytorch interface #65

Merged
merged 7 commits into from
Jan 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions src/adam/parametric/pytorch/computations_parametric.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ def __init__(
joints_name_list: list,
links_name_list: list,
root_link: str = "root_link",
gravity: np.array = torch.FloatTensor([0, 0, -9.80665, 0, 0, 0]),
gravity: np.array = torch.tensor(
[0, 0, -9.80665, 0, 0, 0], dtype=torch.float64
),
) -> None:
"""
Args:
Expand Down Expand Up @@ -119,7 +121,7 @@ def forward_kinematics(
self,
frame,
base_transform: torch.Tensor,
s: torch.Tensor,
joint_positions: torch.Tensor,
length_multiplier: torch.Tensor,
densities: torch.Tensor,
) -> torch.Tensor:
Expand All @@ -128,7 +130,7 @@ def forward_kinematics(
Args:
frame (str): The frame to which the fk will be computed
base_transform (torch.tensor): The homogenous transform from base to world frame
s (torch.tensor): The joints position
joint_positions (torch.tensor): The joints position
length_multiplier (torch.tensor): The length multiplier of the parametrized links
densities (torch.tensor): The densities of the parametrized links

Expand All @@ -149,7 +151,9 @@ def forward_kinematics(
self.NDoF = self.rbdalgos.NDoF
return (
self.rbdalgos.forward_kinematics(
frame, torch.FloatTensor(base_transform), torch.FloatTensor(s)
frame,
base_transform,
joint_positions,
)
).array

Expand Down Expand Up @@ -414,7 +418,7 @@ def gravity_term(
base_positions,
torch.zeros(6).reshape(6, 1),
torch.zeros(self.NDoF),
torch.FloatTensor(self.g),
self.g,
).array.squeeze()

def get_total_mass(
Expand Down
10 changes: 6 additions & 4 deletions src/adam/pytorch/computations.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ def __init__(
urdfstring: str,
joints_name_list: list = None,
root_link: str = "root_link",
gravity: np.array = torch.FloatTensor([0, 0, -9.80665, 0, 0, 0]),
gravity: np.array = torch.tensor(
[0, 0, -9.80665, 0, 0, 0], dtype=torch.float64
),
) -> None:
"""
Args:
Expand Down Expand Up @@ -92,8 +94,8 @@ def forward_kinematics(
return (
self.rbdalgos.forward_kinematics(
frame,
torch.FloatTensor(base_transform),
torch.FloatTensor(joint_position),
base_transform,
joint_position,
)
).array

Expand Down Expand Up @@ -240,7 +242,7 @@ def gravity_term(
joint_positions,
torch.zeros(6).reshape(6, 1),
torch.zeros(self.NDoF),
torch.FloatTensor(self.g),
self.g,
).array.squeeze()

def get_total_mass(self) -> float:
Expand Down
47 changes: 29 additions & 18 deletions src/adam/pytorch/torch_like.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@

import numpy.typing as ntp
import torch
import numpy as np

from adam.core.spatial_math import ArrayLike, ArrayLikeFactory, SpatialMath
from adam.numpy import NumpyLike


@dataclass
Expand All @@ -18,12 +18,17 @@ class TorchLike(ArrayLike):

array: torch.Tensor

def __post_init__(self):
"""Converts array to double precision"""
if self.array.dtype != torch.float64:
self.array = self.array.double()

def __setitem__(self, idx, value: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""Overrides set item operator"""
if type(self) is type(value):
self.array[idx] = value.array.reshape(self.array[idx].shape)
else:
self.array[idx] = torch.tensor(value)
self.array[idx] = torch.tensor(value) if isinstance(value, float) else value

def __getitem__(self, idx):
"""Overrides get item operator"""
Expand All @@ -42,24 +47,29 @@ def T(self) -> "TorchLike":
Returns:
TorchLike: transpose of array
"""
if len(self.array.shape) != 1:
return TorchLike(self.array.mT)
# check if self.array is a 0-D tensor

if len(self.array.shape) == 0:
return TorchLike(self.array)
x = self.array
return TorchLike(x.permute(*torch.arange(x.ndim - 1, -1, -1)))

def __matmul__(self, other: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""Overrides @ operator"""

if type(self) is type(other):
return TorchLike(self.array @ other.array.float())
return TorchLike(self.array @ other.array)
if isinstance(other, torch.Tensor):
return TorchLike(self.array @ other)
else:
return TorchLike(self.array @ torch.tensor(other).float())
return TorchLike(self.array @ torch.tensor(other))

def __rmatmul__(self, other: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""Overrides @ operator"""
if type(self) is type(other):
return TorchLike(other.array @ self.array)
else:
return TorchLike(torch.tensor(other).float() @ self.array)
return TorchLike(torch.tensor(other) @ self.array)

def __mul__(self, other: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""Overrides * operator"""
Expand Down Expand Up @@ -120,7 +130,7 @@ def zeros(*x: int) -> "TorchLike":
Returns:
TorchLike: zero matrix of dimension *x
"""
return TorchLike(torch.zeros(x).float())
return TorchLike(torch.zeros(x))

@staticmethod
def eye(x: int) -> "TorchLike":
Expand All @@ -131,15 +141,15 @@ def eye(x: int) -> "TorchLike":
Returns:
TorchLike: identity matrix of dimension x
"""
return TorchLike(torch.eye(x).float())
return TorchLike(torch.eye(x))

@staticmethod
def array(x: ntp.ArrayLike) -> "TorchLike":
"""
Returns:
TorchLike: vector wrapping x
"""
return TorchLike(torch.FloatTensor(x))
return TorchLike(torch.tensor(x))


class SpatialMath(SpatialMath):
Expand All @@ -155,7 +165,8 @@ def sin(x: ntp.ArrayLike) -> "TorchLike":
Returns:
TorchLike: sin value of x
"""
x = torch.tensor(x)
if isinstance(x, float):
x = torch.tensor(x)
return TorchLike(torch.sin(x))

@staticmethod
Expand All @@ -167,7 +178,9 @@ def cos(x: ntp.ArrayLike) -> "TorchLike":
Returns:
TorchLike: cos value of x
"""
x = torch.tensor(x)
# transform to torch tensor, if not already
if isinstance(x, float):
x = torch.tensor(x)
return TorchLike(torch.cos(x))

@staticmethod
Expand All @@ -193,13 +206,11 @@ def skew(x: Union["TorchLike", ntp.ArrayLike]) -> "TorchLike":
"""
if not isinstance(x, TorchLike):
return TorchLike(
torch.FloatTensor(
[[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]]
)
torch.tensor([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
)
x = x.array
return TorchLike(
torch.FloatTensor([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
torch.tensor([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
)

@staticmethod
Expand All @@ -211,7 +222,7 @@ def vertcat(*x: ntp.ArrayLike) -> "TorchLike":
if isinstance(x[0], TorchLike):
v = torch.vstack([x[i].array for i in range(len(x))])
else:
v = torch.FloatTensor(x)
v = torch.tensor(x)
return TorchLike(v)

@staticmethod
Expand All @@ -223,5 +234,5 @@ def horzcat(*x: ntp.ArrayLike) -> "TorchLike":
if isinstance(x[0], TorchLike):
v = torch.hstack([x[i].array for i in range(len(x))])
else:
v = torch.FloatTensor(x)
v = torch.tensor(x)
return TorchLike(v)
42 changes: 28 additions & 14 deletions tests/body_fixed/test_pytorch_body_fixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from adam import Representations
from adam.geometry import utils
from adam.pytorch import KinDynComputations
from adam.pytorch.torch_like import SpatialMath

np.random.seed(42)
torch.set_default_dtype(torch.float64)
Expand Down Expand Up @@ -71,17 +72,18 @@ def H_from_Pos_RPY_idyn(xyz, rpy):
kinDyn.setFrameVelocityRepresentation(idyntree.BODY_FIXED_REPRESENTATION)
n_dofs = len(joints_name_list)

# base pose quantities
xyz = (np.random.rand(3) - 0.5) * 5
rpy = (np.random.rand(3) - 0.5) * 5
base_vel = (np.random.rand(6) - 0.5) * 5
xyz = (torch.rand(3) - 0.5) * 5
rpy = (torch.rand(3) - 0.5) * 5
base_vel = (torch.rand(6) - 0.5) * 5
# joints quantitites
joints_val = (np.random.rand(n_dofs) - 0.5) * 5
joints_dot_val = (np.random.rand(n_dofs) - 0.5) * 5
joints_val = (torch.rand(n_dofs) - 0.5) * 5
joints_dot_val = (torch.rand(n_dofs) - 0.5) * 5

g = np.array([0, 0, -9.80665])
H_b = utils.H_from_Pos_RPY(xyz, rpy)
kinDyn.setRobotState(H_b, joints_val, base_vel, joints_dot_val, g)
g = torch.tensor([0, 0, -9.80665])
H_b = SpatialMath().H_from_Pos_RPY(xyz, rpy).array
kinDyn.setRobotState(
H_b.numpy(), joints_val.numpy(), base_vel.numpy(), joints_dot_val.numpy(), g.numpy()
)


def test_mass_matrix():
Expand Down Expand Up @@ -165,8 +167,14 @@ def test_bias_force():


def test_coriolis_term():
g0 = np.zeros(3)
kinDyn.setRobotState(H_b, joints_val, base_vel, joints_dot_val, g0)
g0 = torch.zeros(3)
kinDyn.setRobotState(
H_b.numpy(),
joints_val.numpy(),
base_vel.numpy(),
joints_dot_val.numpy(),
g0.numpy(),
)
C_iDyn = idyntree.FreeFloatingGeneralizedTorques(kinDyn.model())
assert kinDyn.generalizedBiasForces(C_iDyn)
C_iDyn_np = np.concatenate(
Expand All @@ -181,9 +189,15 @@ def test_gravity_term():
kinDyn2.loadRobotModel(robot_iDyn.model())
kinDyn2.setFloatingBase(root_link)
kinDyn2.setFrameVelocityRepresentation(idyntree.BODY_FIXED_REPRESENTATION)
base_vel0 = np.zeros(6)
joints_dot_val0 = np.zeros(n_dofs)
kinDyn2.setRobotState(H_b, joints_val, base_vel0, joints_dot_val0, g)
base_vel0 = torch.zeros(6)
joints_dot_val0 = torch.zeros(n_dofs)
kinDyn2.setRobotState(
H_b.numpy(),
joints_val.numpy(),
base_vel0.numpy(),
joints_dot_val0.numpy(),
g.numpy(),
)
G_iDyn = idyntree.FreeFloatingGeneralizedTorques(kinDyn2.model())
assert kinDyn2.generalizedBiasForces(G_iDyn)
G_iDyn_np = np.concatenate(
Expand Down
42 changes: 28 additions & 14 deletions tests/mixed/test_pytorch_mixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from adam import Representations
from adam.geometry import utils
from adam.pytorch import KinDynComputations
from adam.pytorch.torch_like import SpatialMath

np.random.seed(42)
torch.set_default_dtype(torch.float64)
Expand Down Expand Up @@ -71,17 +72,18 @@ def H_from_Pos_RPY_idyn(xyz, rpy):
kinDyn.setFrameVelocityRepresentation(idyntree.MIXED_REPRESENTATION)
n_dofs = len(joints_name_list)

# base pose quantities
xyz = (np.random.rand(3) - 0.5) * 5
rpy = (np.random.rand(3) - 0.5) * 5
base_vel = (np.random.rand(6) - 0.5) * 5
xyz = (torch.rand(3) - 0.5) * 5
rpy = (torch.rand(3) - 0.5) * 5
base_vel = (torch.rand(6) - 0.5) * 5
# joints quantitites
joints_val = (np.random.rand(n_dofs) - 0.5) * 5
joints_dot_val = (np.random.rand(n_dofs) - 0.5) * 5
joints_val = (torch.rand(n_dofs) - 0.5) * 5
joints_dot_val = (torch.rand(n_dofs) - 0.5) * 5

g = np.array([0, 0, -9.80665])
H_b = utils.H_from_Pos_RPY(xyz, rpy)
kinDyn.setRobotState(H_b, joints_val, base_vel, joints_dot_val, g)
g = torch.tensor([0, 0, -9.80665])
H_b = SpatialMath().H_from_Pos_RPY(xyz, rpy).array
kinDyn.setRobotState(
H_b.numpy(), joints_val.numpy(), base_vel.numpy(), joints_dot_val.numpy(), g.numpy()
)


def test_mass_matrix():
Expand Down Expand Up @@ -165,8 +167,14 @@ def test_bias_force():


def test_coriolis_term():
g0 = np.zeros(3)
kinDyn.setRobotState(H_b, joints_val, base_vel, joints_dot_val, g0)
g0 = torch.zeros(3)
kinDyn.setRobotState(
H_b.numpy(),
joints_val.numpy(),
base_vel.numpy(),
joints_dot_val.numpy(),
g0.numpy(),
)
C_iDyn = idyntree.FreeFloatingGeneralizedTorques(kinDyn.model())
assert kinDyn.generalizedBiasForces(C_iDyn)
C_iDyn_np = np.concatenate(
Expand All @@ -181,9 +189,15 @@ def test_gravity_term():
kinDyn2.loadRobotModel(robot_iDyn.model())
kinDyn2.setFloatingBase(root_link)
kinDyn2.setFrameVelocityRepresentation(idyntree.MIXED_REPRESENTATION)
base_vel0 = np.zeros(6)
joints_dot_val0 = np.zeros(n_dofs)
kinDyn2.setRobotState(H_b, joints_val, base_vel0, joints_dot_val0, g)
base_vel0 = torch.zeros(6)
joints_dot_val0 = torch.zeros(n_dofs)
kinDyn2.setRobotState(
H_b.numpy(),
joints_val.numpy(),
base_vel0.numpy(),
joints_dot_val0.numpy(),
g.numpy(),
)
G_iDyn = idyntree.FreeFloatingGeneralizedTorques(kinDyn2.model())
assert kinDyn2.generalizedBiasForces(G_iDyn)
G_iDyn_np = np.concatenate(
Expand Down
Loading
Loading