diff --git a/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py b/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py
index fc92fe7cd..2642267d8 100644
--- a/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py
+++ b/common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py
@@ -37,6 +37,7 @@ def run_clip(
Returns:
List[float]: the cosine similarity scores between the image and label embeddings.
"""
+
txt = model.encode(labels)
img = model.encode(img)
with torch.no_grad():
diff --git a/common/vision/lasr_vision_deepface/examples/check_known_people_test b/common/vision/lasr_vision_deepface/examples/check_known_people_test
new file mode 100755
index 000000000..ff0c97673
--- /dev/null
+++ b/common/vision/lasr_vision_deepface/examples/check_known_people_test
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+
+import rospy
+from lasr_vision_msgs.srv import CheckKnownPeopleRequest, CheckKnownPeopleResponse, CheckKnownPeople
+
+if __name__ == "__main__":
+
+ # get the known people from the service
+ rospy.wait_for_service("/check_known_people")
+ check_known_people = rospy.ServiceProxy("/check_known_people", CheckKnownPeople)
+ response = check_known_people("receptionist")
+ print(response.known_people)
diff --git a/common/vision/lasr_vision_deepface/nodes/check_known_people_service b/common/vision/lasr_vision_deepface/nodes/check_known_people_service
new file mode 100755
index 000000000..5003544f2
--- /dev/null
+++ b/common/vision/lasr_vision_deepface/nodes/check_known_people_service
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+
+import rospy
+import os
+import rospkg
+from lasr_vision_msgs.srv import CheckKnownPeopleRequest, CheckKnownPeopleResponse, CheckKnownPeople
+
+DATASET_ROOT = os.path.join(
+ rospkg.RosPack().get_path("lasr_vision_deepface"), "datasets"
+)
+
+rospy.init_node("check_known_people_service")
+
+def check_known_people(request : CheckKnownPeopleRequest) -> CheckKnownPeopleResponse:
+ dataset_path = os.path.join(DATASET_ROOT, request.task_name)
+ known_people_names = [
+ f
+ for f in os.listdir(dataset_path)
+ if os.path.isdir(os.path.join(dataset_path, f))
+ ]
+ rospy.set_param("/known_people", known_people_names)
+ return CheckKnownPeopleResponse(known_people_names)
+
+rospy.Service("/check_known_people", CheckKnownPeople, check_known_people)
+rospy.loginfo("Check known people service started")
+rospy.spin()
\ No newline at end of file
diff --git a/common/vision/lasr_vision_deepface/nodes/service b/common/vision/lasr_vision_deepface/nodes/service
index 424bcaf1c..0bffa407e 100644
--- a/common/vision/lasr_vision_deepface/nodes/service
+++ b/common/vision/lasr_vision_deepface/nodes/service
@@ -16,7 +16,6 @@ from lasr_vision_msgs.srv import (
DetectFacesResponse,
)
-
rospy.init_node("recognise_service")
# Determine variables
diff --git a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py
index fe822e94b..a563aff7d 100644
--- a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py
+++ b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py
@@ -24,7 +24,6 @@
rospkg.RosPack().get_path("lasr_vision_deepface"), "datasets"
)
-
Mat = np.ndarray
@@ -100,8 +99,8 @@ def create_dataset(
face_cropped_cv_im = _extract_face(cv_im)
if face_cropped_cv_im is None:
continue
- cv2.imwrite(os.path.join(dataset_path, f"{name}_{i+1}.png"), face_cropped_cv_im) # type: ignore
- rospy.loginfo(f"Took picture {i+1}")
+ cv2.imwrite(os.path.join(dataset_path, f"{name}_{i + 1}.png"), face_cropped_cv_im) # type: ignore
+ rospy.loginfo(f"Took picture {i + 1}")
images.append(face_cropped_cv_im)
if debug_publisher is not None:
debug_publisher.publish(
@@ -132,6 +131,7 @@ def recognise(
# Run inference
rospy.loginfo("Running inference")
+
try:
result = DeepFace.find(
cv_im,
@@ -197,7 +197,6 @@ def detect_faces(
request: DetectFacesRequest,
debug_publisher: Union[rospy.Publisher, None],
) -> DetectFacesResponse:
-
cv_im = cv2_img.msg_to_cv2_img(request.image_raw)
response = DetectFacesResponse()
diff --git a/common/vision/lasr_vision_feature_extraction/requirements.in b/common/vision/lasr_vision_feature_extraction/requirements.in
index 421d8a65e..e69de29bb 100644
--- a/common/vision/lasr_vision_feature_extraction/requirements.in
+++ b/common/vision/lasr_vision_feature_extraction/requirements.in
@@ -1 +0,0 @@
-torchvision==0.16.0
diff --git a/common/vision/lasr_vision_feature_extraction/requirements.txt b/common/vision/lasr_vision_feature_extraction/requirements.txt
index a765cae91..8b1378917 100644
--- a/common/vision/lasr_vision_feature_extraction/requirements.txt
+++ b/common/vision/lasr_vision_feature_extraction/requirements.txt
@@ -1,30 +1 @@
-certifi==2023.11.17 # via requests
-charset-normalizer==3.3.2 # via requests
-filelock==3.13.1 # via torch, triton
-fsspec==2023.10.0 # via torch
-idna==3.4 # via requests
-jinja2==3.1.2 # via torch
-markupsafe==2.1.3 # via jinja2
-mpmath==1.3.0 # via sympy
-networkx==3.2.1 # via torch
-numpy==1.26.2 # via torchvision
-nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch
-nvidia-cuda-cupti-cu12==12.1.105 # via torch
-nvidia-cuda-nvrtc-cu12==12.1.105 # via torch
-nvidia-cuda-runtime-cu12==12.1.105 # via torch
-nvidia-cudnn-cu12==8.9.2.26 # via torch
-nvidia-cufft-cu12==11.0.2.54 # via torch
-nvidia-curand-cu12==10.3.2.106 # via torch
-nvidia-cusolver-cu12==11.4.5.107 # via torch
-nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch
-nvidia-nccl-cu12==2.18.1 # via torch
-nvidia-nvjitlink-cu12==12.3.101 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12
-nvidia-nvtx-cu12==12.1.105 # via torch
-pillow==10.1.0 # via torchvision
-requests==2.31.0 # via torchvision
-sympy==1.12 # via torch
-torch==2.1.0 # via torchvision
-torchvision==0.16.0 # via -r requirements.in
-triton==2.1.0 # via torch
-typing-extensions==4.8.0 # via torch
-urllib3==2.1.0 # via requests
+
diff --git a/common/vision/lasr_vision_msgs/CMakeLists.txt b/common/vision/lasr_vision_msgs/CMakeLists.txt
index 911af4f06..3cde22083 100644
--- a/common/vision/lasr_vision_msgs/CMakeLists.txt
+++ b/common/vision/lasr_vision_msgs/CMakeLists.txt
@@ -65,6 +65,7 @@ add_service_files(
PointingDirection.srv
TorchFaceFeatureDetectionDescription.srv
DetectFaces.srv
+ CheckKnownPeople.srv
)
# Generate actions in the 'action' folder
diff --git a/common/vision/lasr_vision_msgs/srv/CheckKnownPeople.srv b/common/vision/lasr_vision_msgs/srv/CheckKnownPeople.srv
new file mode 100644
index 000000000..6e7061476
--- /dev/null
+++ b/common/vision/lasr_vision_msgs/srv/CheckKnownPeople.srv
@@ -0,0 +1,5 @@
+# Task name - receptionist, carry my luggage, etc
+string task_name
+---
+# Known people names
+string[] known_people
diff --git a/common/vision/lasr_vision_msgs/srv/LearnFace.srv b/common/vision/lasr_vision_msgs/srv/LearnFace.srv
index 5376e53d5..9410424fa 100644
--- a/common/vision/lasr_vision_msgs/srv/LearnFace.srv
+++ b/common/vision/lasr_vision_msgs/srv/LearnFace.srv
@@ -1,9 +1,9 @@
-# Name to associate
-string name
-
# Dataset to add face to
string dataset
+# Name to associate
+string name
+
# Number of images to take
int32 n_images
diff --git a/skills/scripts/test_learn_face.py b/skills/scripts/test_learn_face.py
new file mode 100755
index 000000000..e85672d29
--- /dev/null
+++ b/skills/scripts/test_learn_face.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import rospy
+import smach
+from lasr_skills import LearnFace
+
+if __name__ == "__main__":
+ rospy.init_node("learn_face")
+ # make segmentation instead for create dataset
+
+ s = smach.StateMachine(outcomes=["succeeded", "failed"])
+ with s:
+ smach.StateMachine.add(
+ "LEARN_FACE",
+ LearnFace(dataset="receptionist", name="nicole", n_images=10),
+ transitions={
+ "succeeded": "succeeded",
+ "failed": "failed",
+ },
+ )
+
+ s.execute()
diff --git a/skills/scripts/test_look_at_person.py b/skills/scripts/test_look_at_person.py
new file mode 100755
index 000000000..7b0bc7bf7
--- /dev/null
+++ b/skills/scripts/test_look_at_person.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+
+import rospy
+import smach
+import smach_ros
+from lasr_skills.vision import GetPointCloud
+from lasr_skills import LookAtPerson
+
+
+if __name__ == "__main__":
+ rospy.init_node("look_at_person")
+ sm = smach.StateMachine(outcomes=["succeeded", "failed"])
+ with sm:
+ smach.StateMachine.add(
+ "GET_POINTCLOUD",
+ GetPointCloud("/xtion/depth_registered/points"),
+ transitions={"succeeded": "succeeded"},
+ remapping={"pcl_msg": "pcl_msg"},
+ )
+
+ sis = smach_ros.IntrospectionServer("pointcloud_server", sm, "/LOOK_AT_PERSON")
+ sis.start()
+ sm.execute()
+ pcl = sm.userdata.pcl_msg
+ sis.stop()
+
+ sm = LookAtPerson(filter=False)
+ sm.userdata.pcl_msg = pcl
+ sm.userdata.deepface_detection = []
+ outcome = sm.execute()
+ print(outcome)
diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py
index 329549c0c..86ce90ccb 100755
--- a/skills/src/lasr_skills/__init__.py
+++ b/skills/src/lasr_skills/__init__.py
@@ -20,4 +20,6 @@
from .detect_faces import DetectFaces
from .recognise import Recognise
from .detect_gesture import DetectGesture
+from .learn_face import LearnFace
+from .look_at_person import LookAtPerson
from .find_gesture_person import FindGesturePerson
diff --git a/skills/src/lasr_skills/find_named_person.py b/skills/src/lasr_skills/find_named_person.py
index e4a2773fb..0631bbc57 100755
--- a/skills/src/lasr_skills/find_named_person.py
+++ b/skills/src/lasr_skills/find_named_person.py
@@ -6,7 +6,6 @@
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion
-
from typing import List, Union
diff --git a/skills/src/lasr_skills/learn_face.py b/skills/src/lasr_skills/learn_face.py
new file mode 100755
index 000000000..064efc67e
--- /dev/null
+++ b/skills/src/lasr_skills/learn_face.py
@@ -0,0 +1,26 @@
+import rospy
+import smach
+from lasr_vision_msgs.srv import LearnFace as LearnFaceSrv
+
+
+class LearnFace(smach.State):
+ def __init__(
+ self,
+ dataset: str,
+ name: str,
+ n_images: int,
+ ):
+ smach.State.__init__(self, outcomes=["succeeded", "failed"])
+
+ self._dataset = dataset
+ self._name = name
+ self._n_images = n_images
+ self._learn_face = rospy.ServiceProxy("/learn_face", LearnFaceSrv)
+
+ def execute(self, userdata):
+ try:
+ result = self._learn_face(self._dataset, self._name, self._n_images)
+ return "succeeded"
+ except rospy.ServiceException as e:
+ rospy.logwarn(f"Unable to learn face. ({str(e)})")
+ return "failed"
diff --git a/skills/src/lasr_skills/look_at_person.py b/skills/src/lasr_skills/look_at_person.py
index 1fc477518..11275023e 100755
--- a/skills/src/lasr_skills/look_at_person.py
+++ b/skills/src/lasr_skills/look_at_person.py
@@ -1,7 +1,7 @@
import smach_ros
from geometry_msgs.msg import PointStamped
import smach
-from vision import GetPointCloud
+from .vision import GetPointCloud
from lasr_vision_msgs.srv import BodyPixDetection, BodyPixDetectionRequest
from lasr_vision_msgs.msg import BodyPixMaskRequest
from lasr_skills import LookToPoint, DetectFaces
@@ -35,11 +35,17 @@
class LookAtPerson(smach.StateMachine):
class CheckEyes(smach.State):
- def __init__(self, debug=True):
+ def __init__(self, debug=True, filter=False):
smach.State.__init__(
self,
outcomes=["succeeded", "failed", "no_detection"],
- input_keys=["bbox_eyes", "pcl_msg", "masks", "detections"],
+ input_keys=[
+ "bbox_eyes",
+ "pcl_msg",
+ "masks",
+ "detections",
+ "deepface_detection",
+ ],
output_keys=["pointstamped"],
)
self.DEBUG = debug
@@ -48,6 +54,7 @@ def __init__(self, debug=True):
self.look_at_pub = actionlib.SimpleActionClient(
"/head_controller/point_head_action", PointHeadAction
)
+ self._filter = filter
def execute(self, userdata):
rospy.sleep(1)
@@ -58,6 +65,16 @@ def execute(self, userdata):
):
return "no_detection"
+ if self._filter:
+ if userdata.deepface_detection:
+ deepface = userdata.deepface_detection[0]
+ for bbox in userdata.bbox_eyes:
+ if bbox["bbox"] == deepface:
+ userdata.bbox_eyes = [bbox]
+ break
+ else:
+ return "failed"
+
for det in userdata.bbox_eyes:
left_eye = det["left_eye"]
right_eye = det["right_eye"]
@@ -92,11 +109,25 @@ def execute(self, userdata):
userdata.pointstamped = look_at
self.look_at_pub.wait_for_server()
+ if any(
+ [
+ True
+ for i in [look_at.point.x, look_at.point.y, look_at.point.z]
+ if i != i
+ ]
+ ):
+ look_at.point.x = 0.0
+ look_at.point.y = 0.0
+ look_at.point.z = 0.0
+
goal = PointHeadGoal()
goal.pointing_frame = "head_2_link"
goal.pointing_axis = Point(1.0, 0.0, 0.0)
goal.max_velocity = 1.0
goal.target = look_at
+ rospy.loginfo(
+ f"LOOKING AT POINT {look_at.point.x}, {look_at.point.y}, {look_at.point.z}"
+ )
self.look_at_pub.send_goal(goal)
return "succeeded"
@@ -140,12 +171,14 @@ def match_poses_and_detections(ud):
return "succeeded"
- def __init__(self):
- super(LookAtPerson, self).__init__(
- outcomes=["succeeded", "failed"],
- input_keys=[],
- output_keys=["masks", "poses", "pointstamped"],
+ def __init__(self, filter=False):
+ smach.StateMachine.__init__(
+ self,
+ outcomes=["succeeded", "failed", "no_detection"],
+ input_keys=["pcl_msg", "deepface_detection"],
+ output_keys=[],
)
+
self.DEBUG = rospy.get_param("/debug", True)
IS_SIMULATION = (
"/pal_startup_control/start" not in rosservice.get_service_list()
@@ -166,19 +199,11 @@ def __init__(self):
request=StartupStopRequest("head_manager"),
),
transitions={
- "succeeded": "GET_IMAGE",
+ "succeeded": "SEGMENT_PERSON",
"aborted": "failed",
"preempted": "failed",
},
)
- smach.StateMachine.add(
- "GET_IMAGE",
- GetPointCloud("/xtion/depth_registered/points"),
- transitions={
- "succeeded": "SEGMENT_PERSON",
- },
- remapping={"pcl_msg": "pcl_msg"},
- )
eyes = BodyPixMaskRequest()
eyes.parts = ["left_eye", "right_eye"]
@@ -225,16 +250,17 @@ def __init__(self):
)
smach.StateMachine.add(
"CHECK_EYES",
- self.CheckEyes(self.DEBUG),
+ self.CheckEyes(self.DEBUG, filter=filter),
transitions={
- "succeeded": "LOOK_TO_POINT",
+ "succeeded": "LOOP",
"failed": "failed",
- "no_detection": "succeeded",
+ "no_detection": "no_detection",
},
remapping={
"pcl_msg": "pcl_msg",
"bbox_eyes": "bbox_eyes",
"pointstamped": "pointstamped",
+ "deepface_detection": "deepface_detection",
},
)
@@ -258,25 +284,6 @@ def __init__(self):
),
transitions={
"succeeded": "CHECK_EYES",
- "finish": "ENABLE_HEAD_MANAGER",
+ "finish": "succeeded",
},
)
- if not IS_SIMULATION:
- if PUBLIC_CONTAINER:
- rospy.logwarn(
- "You are using a public container. The head manager will not be start following navigation."
- )
- else:
- smach.StateMachine.add(
- "ENABLE_HEAD_MANAGER",
- smach_ros.ServiceState(
- "/pal_startup_control/start",
- StartupStart,
- request=StartupStartRequest("head_manager", ""),
- ),
- transitions={
- "succeeded": "succeeded",
- "preempted": "failed",
- "aborted": "failed",
- },
- )
diff --git a/skills/src/lasr_skills/look_to_point.py b/skills/src/lasr_skills/look_to_point.py
index 92fe2e3e1..3d5e41816 100755
--- a/skills/src/lasr_skills/look_to_point.py
+++ b/skills/src/lasr_skills/look_to_point.py
@@ -1,19 +1,28 @@
-import smach_ros
-from control_msgs.msg import PointHeadGoal, PointHeadAction
-from geometry_msgs.msg import Point, PointStamped
-from std_msgs.msg import Header
+import smach
+import rospy
+import actionlib
+from control_msgs.msg import PointHeadAction, PointHeadGoal
+from geometry_msgs.msg import Point
-class LookToPoint(smach_ros.SimpleActionState):
+class LookToPoint(smach.State):
def __init__(self):
- super(LookToPoint, self).__init__(
- "/head_controller/point_head_action",
- PointHeadAction,
- goal_cb=lambda ud, _: PointHeadGoal(
- pointing_frame="head_2_link",
- pointing_axis=Point(1.0, 0.0, 0.0),
- max_velocity=1.0,
- target=ud.pointstamped,
- ),
+ smach.State.__init__(
+ self,
+ outcomes=["succeeded", "preempted", "aborted"],
input_keys=["pointstamped"],
)
+ self.look_at_pub = actionlib.SimpleActionClient(
+ "/head_controller/point_head_action", PointHeadAction
+ )
+
+ def execute(self, userdata):
+ goal = PointHeadGoal()
+ goal.pointing_frame = "head_2_link"
+ goal.pointing_axis = Point(1.0, 0.0, 0.0)
+ goal.max_velocity = 1.0
+ goal.target = userdata.pointstamped
+ self.look_at_pub.send_goal(goal)
+ self.look_at_pub.wait_for_result()
+ rospy.sleep(3.0)
+ return "succeeded"
diff --git a/tasks/receptionist/CMakeLists.txt b/tasks/receptionist/CMakeLists.txt
index 85d832a1b..4a53b4d0e 100644
--- a/tasks/receptionist/CMakeLists.txt
+++ b/tasks/receptionist/CMakeLists.txt
@@ -163,6 +163,7 @@ include_directories(
## in contrast to setup.py, you can choose the destination
catkin_install_python(PROGRAMS
scripts/main.py
+ scripts/test_find_and_look_at.py
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
diff --git a/tasks/receptionist/launch/setup.launch b/tasks/receptionist/launch/setup.launch
index 894406256..145b552f7 100644
--- a/tasks/receptionist/launch/setup.launch
+++ b/tasks/receptionist/launch/setup.launch
@@ -4,11 +4,11 @@
-
+
-
+
@@ -17,6 +17,8 @@
-
+
+
+
diff --git a/tasks/receptionist/scripts/main.py b/tasks/receptionist/scripts/main.py
index 5e9cd28e6..c6d6d2652 100755
--- a/tasks/receptionist/scripts/main.py
+++ b/tasks/receptionist/scripts/main.py
@@ -31,8 +31,10 @@
seat_pose,
seat_area,
{
- "name": "John",
+ "name": "nicole",
"drink": "beer",
+ "dataset": "receptionist",
+ "confidence": 0.5,
},
)
outcome = receptionist.execute()
diff --git a/tasks/receptionist/scripts/test_find_and_look_at.py b/tasks/receptionist/scripts/test_find_and_look_at.py
new file mode 100755
index 000000000..d6e2a8c35
--- /dev/null
+++ b/tasks/receptionist/scripts/test_find_and_look_at.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+import rospy
+import smach
+from receptionist.states import FindAndLookAt
+
+if __name__ == "__main__":
+ rospy.init_node("test_find_and_look_at")
+
+ sm = FindAndLookAt(
+ "nicole",
+ [
+ [0.0, 0.0],
+ [-1.0, 0.0],
+ [1.0, 0.0],
+ ],
+ )
+ sm.userdata.guest_name = "nicole"
+ sm.userdata.dataset = "receptionist"
+ sm.userdata.confidence = 0.5
+
+ outcome = sm.execute()
+
+ rospy.loginfo(f"Outcome: {outcome}")
diff --git a/tasks/receptionist/src/receptionist/state_machine.py b/tasks/receptionist/src/receptionist/state_machine.py
index 8ab24d0d2..2dacd7461 100755
--- a/tasks/receptionist/src/receptionist/state_machine.py
+++ b/tasks/receptionist/src/receptionist/state_machine.py
@@ -8,6 +8,7 @@
GetGuestAttributes,
Introduce,
SeatGuest,
+ FindAndLookAt,
)
@@ -23,7 +24,11 @@ def __init__(
smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"])
with self:
- self.userdata.guest_data = {"host": host_data, "guest1": {}, "guest2": {}}
+ self.userdata.guest_data = {
+ "host": host_data,
+ "guest1": {"name": ""},
+ "guest2": {"name": ""},
+ }
smach.StateMachine.add(
"GO_TO_WAIT_LOCATION_GUEST_1",
@@ -104,11 +109,26 @@ def __init__(
"SAY_WAIT_GUEST_1",
Say(text="Please wait here on my left"),
transitions={
- "succeeded": "INTRODUCE_GUEST_1_TO_HOST",
+ "succeeded": "FIND_AND_LOOK_AT",
"preempted": "failed",
"aborted": "failed",
},
)
+ smach.StateMachine.add(
+ "FIND_AND_LOOK_AT",
+ FindAndLookAt(
+ "host",
+ [
+ [0.0, 0.0],
+ [-1.0, 0.0],
+ [1.0, 0.0],
+ ],
+ ),
+ transitions={
+ "succeeded": "INTRODUCE_GUEST_1_TO_HOST",
+ "failed": "failed",
+ },
+ )
smach.StateMachine.add(
"INTRODUCE_GUEST_1_TO_HOST",
@@ -229,6 +249,21 @@ def __init__(
smach.StateMachine.add(
"INTRODUCE_GUEST_2_TO_EVERYONE",
Introduce(guest_to_introduce="guest2", everyone=True),
+ transitions={
+ "succeeded": "FIND_AND_LOOK_AT_2",
+ "failed": "failed",
+ },
+ )
+ smach.StateMachine.add(
+ "FIND_AND_LOOK_AT_2",
+ FindAndLookAt(
+ "host",
+ [
+ [0.0, 0.0],
+ [-1.0, 0.0],
+ [1.0, 0.0],
+ ],
+ ),
transitions={
"succeeded": "INTRODUCE_HOST_TO_GUEST_2",
"failed": "failed",
@@ -238,6 +273,21 @@ def __init__(
smach.StateMachine.add(
"INTRODUCE_HOST_TO_GUEST_2",
Introduce(guest_to_introduce="host", guest_to_introduce_to="guest2"),
+ transitions={
+ "succeeded": "FIND_AND_LOOK_AT_3",
+ "failed": "failed",
+ },
+ )
+ smach.StateMachine.add(
+ "FIND_AND_LOOK_AT_3",
+ FindAndLookAt(
+ "guest1",
+ [
+ [0.0, 0.0],
+ [-1.0, 0.0],
+ [1.0, 0.0],
+ ],
+ ),
transitions={
"succeeded": "INTRODUCE_GUEST_1_TO_GUEST_2",
"failed": "failed",
diff --git a/tasks/receptionist/src/receptionist/states/__init__.py b/tasks/receptionist/src/receptionist/states/__init__.py
index 3a25c381c..41c25b53d 100644
--- a/tasks/receptionist/src/receptionist/states/__init__.py
+++ b/tasks/receptionist/src/receptionist/states/__init__.py
@@ -2,3 +2,4 @@
from .get_attributes import GetGuestAttributes
from .introduce import Introduce
from .seat_guest import SeatGuest
+from .find_and_look_at import FindAndLookAt
diff --git a/tasks/receptionist/src/receptionist/states/find_and_look_at.py b/tasks/receptionist/src/receptionist/states/find_and_look_at.py
new file mode 100755
index 000000000..2734b7f7c
--- /dev/null
+++ b/tasks/receptionist/src/receptionist/states/find_and_look_at.py
@@ -0,0 +1,269 @@
+"""
+State machine for the receptionist task. It finds a person by given name and then looks at them.
+"""
+
+import rospy
+import smach_ros
+import smach
+from lasr_skills import LookAtPerson, LookToPoint
+from typing import List, Union
+from geometry_msgs.msg import Point, PointStamped
+from lasr_vision_msgs.srv import Recognise, RecogniseRequest
+from lasr_skills.vision import GetPointCloud
+from cv2_pcl import pcl_to_img_msg
+import actionlib
+from geometry_msgs.msg import Point
+from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
+from trajectory_msgs.msg import JointTrajectoryPoint
+from smach import CBState
+import rosservice
+
+PUBLIC_CONTAINER = False
+try:
+ from pal_startup_msgs.srv import (
+ StartupStart,
+ StartupStop,
+ StartupStartRequest,
+ StartupStopRequest,
+ )
+except ModuleNotFoundError:
+ PUBLIC_CONTAINER = True
+
+
+def send_head_goal(_point, look_at_pub):
+ goal = FollowJointTrajectoryGoal()
+ goal.trajectory.joint_names = ["head_1_joint", "head_2_joint"]
+ point = JointTrajectoryPoint()
+ point.positions = _point
+ point.time_from_start = rospy.Duration(1)
+ goal.trajectory.points.append(point)
+ look_at_pub.send_goal(goal)
+
+
+class FindAndLookAt(smach.StateMachine):
+ class GetLookPoint(smach.State):
+ def __init__(self, look_positions: List[List[float]]):
+ smach.State.__init__(
+ self,
+ outcomes=["succeeded", "failed"],
+ input_keys=[],
+ output_keys=["look_positions"],
+ )
+ self.look_positions = look_positions
+
+ def execute(self, userdata):
+ userdata.look_positions = self.look_positions
+ return "succeeded"
+
+ class GetGuestName(smach.State):
+ def __init__(self, guest_name_in: str):
+ super().__init__(
+ outcomes=["succeeded"],
+ input_keys=["guest_data"],
+ output_keys=["guest_name"],
+ )
+ self._guest_name_in = guest_name_in
+
+ def execute(self, userdata) -> str:
+ # print("Guest data", self.userdata.guest_data)
+ # self.userdata.guest_name = self.userdata.guest_data[guest_name_in]["name"]
+ guest_name = userdata.guest_data[self._guest_name_in]["name"]
+ userdata.guest_name = guest_name
+ return "succeeded"
+
+ class GetPoint(smach.State):
+ def __init__(self):
+ smach.State.__init__(
+ self,
+ outcomes=["succeeded", "failed"],
+ input_keys=["point_index", "look_positions"],
+ output_keys=["pointstamped"],
+ )
+ self.look_at_pub = actionlib.SimpleActionClient(
+ "/head_controller/follow_joint_trajectory", FollowJointTrajectoryAction
+ )
+
+ def execute(self, userdata):
+ rospy.sleep(3.0)
+ _point = userdata.look_positions[userdata.point_index]
+ print(f"Looking at {_point}")
+ userdata.pointstamped = PointStamped(
+ point=Point(x=_point[0], y=_point[1], z=1.0)
+ )
+ send_head_goal(_point, self.look_at_pub)
+ rospy.sleep(3.0)
+
+ return "succeeded"
+
+ def check_name(self, ud):
+ rospy.logwarn(
+ f"Checking name {ud.guest_name} in detections {ud.deepface_detection}"
+ )
+ if len(ud.deepface_detection) == 0:
+ return "no_detection"
+ for detection in ud.deepface_detection:
+ if detection.name == ud.guest_name and detection.confidence > ud.confidence:
+ return "succeeded"
+ return "failed"
+
+ def __init__(
+ self,
+ guest_name_in: str,
+ look_positions: Union[List[List[float]], None] = None,
+ ):
+ smach.StateMachine.__init__(
+ self,
+ outcomes=["succeeded", "failed"],
+ input_keys=[
+ "dataset",
+ "confidence",
+ "guest_data",
+ ],
+ output_keys=[],
+ )
+
+ if look_positions is None:
+ all_look_positions: List[List[float]] = []
+ look_positions = [
+ [0.0, 0.0],
+ [-1.0, 0.0],
+ [1.0, 0.0],
+ ]
+
+ all_look_positions = look_positions
+ IS_SIMULATION = (
+ "/pal_startup_control/start" not in rosservice.get_service_list()
+ )
+
+ with self:
+ smach.StateMachine.add(
+ "GET_LOOK_POINT",
+ self.GetLookPoint(all_look_positions),
+ transitions={"succeeded": "LOOK_ITERATOR", "failed": "failed"},
+ )
+ look_iterator = smach.Iterator(
+ outcomes=["succeeded", "failed"],
+ it=lambda: range(len(all_look_positions)),
+ it_label="point_index",
+ input_keys=["look_positions", "dataset", "confidence", "guest_data"],
+ output_keys=[],
+ exhausted_outcome="failed",
+ )
+ with look_iterator:
+ container_sm = smach.StateMachine(
+ outcomes=["succeeded", "failed", "continue"],
+ input_keys=[
+ "point_index",
+ "look_positions",
+ "dataset",
+ "confidence",
+ "guest_data",
+ ],
+ output_keys=[],
+ )
+ with container_sm:
+ if not IS_SIMULATION:
+ if PUBLIC_CONTAINER:
+ rospy.logwarn(
+ "You are using a public container. The head manager will not be stopped during navigation."
+ )
+ else:
+ smach.StateMachine.add(
+ "DISABLE_HEAD_MANAGER",
+ smach_ros.ServiceState(
+ "/pal_startup_control/stop",
+ StartupStop,
+ request=StartupStopRequest("head_manager"),
+ ),
+ transitions={
+ "succeeded": "GET_GUEST_NAME",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+ smach.StateMachine.add(
+ "GET_GUEST_NAME",
+ self.GetGuestName(guest_name_in=guest_name_in),
+ transitions={"succeeded": "GET_POINT"},
+ )
+ smach.StateMachine.add(
+ "GET_POINT",
+ self.GetPoint(),
+ transitions={"succeeded": "GET_IMAGE", "failed": "failed"},
+ remapping={"pointstamped": "pointstamped"},
+ )
+ smach.StateMachine.add(
+ "LOOK_TO_POINT",
+ LookToPoint(),
+ transitions={
+ "succeeded": "GET_IMAGE",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+ smach.StateMachine.add(
+ "GET_IMAGE",
+ GetPointCloud("/xtion/depth_registered/points"),
+ transitions={
+ "succeeded": "RECOGNISE",
+ },
+ remapping={"pcl_msg": "pcl_msg"},
+ )
+ smach.StateMachine.add(
+ "RECOGNISE",
+ smach_ros.ServiceState(
+ "/recognise",
+ Recognise,
+ input_keys=["pcl_msg", "dataset", "confidence"],
+ request_cb=lambda ud, _: RecogniseRequest(
+ image_raw=pcl_to_img_msg(ud.pcl_msg),
+ dataset=ud.dataset,
+ confidence=ud.confidence,
+ ),
+ response_slots=["detections"],
+ output_keys=["detections"],
+ ),
+ transitions={
+ "succeeded": "CHECK_NAME",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ remapping={
+ "pcl_msg": "pcl_msg",
+ "detections": "deepface_detection",
+ },
+ )
+ smach.StateMachine.add(
+ "CHECK_NAME",
+ CBState(
+ self.check_name,
+ outcomes=["succeeded", "failed", "no_detection"],
+ input_keys=[
+ "deepface_detection",
+ "guest_name",
+ "confidence",
+ ],
+ ),
+ transitions={
+ "succeeded": "LOOK_AT_PERSON",
+ "failed": "GET_IMAGE",
+ "no_detection": "continue",
+ },
+ )
+ smach.StateMachine.add(
+ "LOOK_AT_PERSON",
+ LookAtPerson(filter=True),
+ transitions={
+ "succeeded": "succeeded",
+ "no_detection": "continue",
+ "failed": "failed",
+ },
+ )
+ look_iterator.set_contained_state(
+ "CONTAINER_STATE", container_sm, loop_outcomes=["continue"]
+ )
+ smach.StateMachine.add(
+ "LOOK_ITERATOR",
+ look_iterator,
+ transitions={"succeeded": "succeeded", "failed": "failed"},
+ )
diff --git a/tasks/receptionist/src/receptionist/states/get_attributes.py b/tasks/receptionist/src/receptionist/states/get_attributes.py
index 2f0dfd3bb..d3390d373 100644
--- a/tasks/receptionist/src/receptionist/states/get_attributes.py
+++ b/tasks/receptionist/src/receptionist/states/get_attributes.py
@@ -5,7 +5,6 @@
class GetGuestAttributes(smach.StateMachine):
-
class HandleGuestAttributes(smach.State):
def __init__(self, guest_id: str):
smach.State.__init__(
diff --git a/tasks/receptionist/src/receptionist/states/get_name_and_drink.py b/tasks/receptionist/src/receptionist/states/get_name_and_drink.py
index e2da4fede..a6203793f 100644
--- a/tasks/receptionist/src/receptionist/states/get_name_and_drink.py
+++ b/tasks/receptionist/src/receptionist/states/get_name_and_drink.py
@@ -13,13 +13,13 @@ class ParseNameAndDrink(smach.State):
def __init__(
self,
guest_id: str,
- param_key: str = "receptionist/priors",
+ param_key: str = "/receptionist/priors",
):
"""Parses the transcription of the guests' name and favourite drink.
Args:
param_key (str, optional): Name of the parameter that contains the list of
- possible . Defaults to "receptionist/priors".
+ possible . Defaults to "/receptionist/priors".
"""
smach.State.__init__(
self,
@@ -48,8 +48,6 @@ def execute(self, userdata: UserData) -> str:
outcome = "succeeded"
name_found = False
drink_found = False
- print(userdata)
- print(type(userdata.guest_transcription))
transcription = userdata.guest_transcription.lower()
transcription = userdata["guest_transcription"].lower()
diff --git a/tasks/receptionist/src/receptionist/states/introduce.py b/tasks/receptionist/src/receptionist/states/introduce.py
index 77dcc96d9..3a5ab5a4d 100644
--- a/tasks/receptionist/src/receptionist/states/introduce.py
+++ b/tasks/receptionist/src/receptionist/states/introduce.py
@@ -24,7 +24,6 @@ def stringify_guest_data(guest_data: Dict[str, Any], guest_id: str) -> str:
"""
relevant_guest_data = guest_data[guest_id]
-
guest_str = ""
if "attributes" in relevant_guest_data.keys():
diff --git a/tasks/receptionist/src/receptionist/states/seat_guest.py b/tasks/receptionist/src/receptionist/states/seat_guest.py
index 11da964fe..1957cfe37 100755
--- a/tasks/receptionist/src/receptionist/states/seat_guest.py
+++ b/tasks/receptionist/src/receptionist/states/seat_guest.py
@@ -6,6 +6,7 @@
import numpy as np
from lasr_skills import PlayMotion, Detect3DInArea, LookToPoint, Say
+from geometry_msgs.msg import Point, PointStamped
class SeatGuest(smach.StateMachine):
@@ -49,7 +50,7 @@ def execute(self, userdata) -> str:
break
if seat_is_empty:
- userdata.seat_position = seat.point
+ userdata.seat_position = PointStamped(point=seat.point)
print(seat.point)
return "succeeded"
@@ -120,7 +121,7 @@ def __init__(
"aborted": "failed",
"preempted": "failed",
},
- remapping={"point": "seat_position"},
+ remapping={"pointstamped": "seat_position"},
)
smach.StateMachine.add(
"SAY_SIT",