diff --git a/common/vision/lasr_vision_cropped_detection/src/lasr_vision_cropped_detection/cropped_detection.py b/common/vision/lasr_vision_cropped_detection/src/lasr_vision_cropped_detection/cropped_detection.py
index 2aca0696..c6252324 100644
--- a/common/vision/lasr_vision_cropped_detection/src/lasr_vision_cropped_detection/cropped_detection.py
+++ b/common/vision/lasr_vision_cropped_detection/src/lasr_vision_cropped_detection/cropped_detection.py
@@ -1,28 +1,23 @@
#!/usr/bin/env python3
from typing import List, Tuple
-import numpy as np
+
import cv2
+import numpy as np
import rospy
-from shapely.validation import explain_validity
-from sensor_msgs.msg import Image, PointCloud2
-from geometry_msgs.msg import Point, PoseWithCovarianceStamped, Polygon
-from shapely.geometry.polygon import Polygon as ShapelyPolygon
-from shapely.geometry.point import Point as ShapelyPoint
-
-from lasr_vision_msgs.msg import (
- Detection,
- Detection3D,
- CDRequest,
- CDResponse,
-)
+from cv2_img import cv2_img_to_msg, msg_to_cv2_img
+from cv2_pcl import pcl_to_cv2
+from geometry_msgs.msg import Point, Polygon, PoseWithCovarianceStamped
+from lasr_vision_msgs.msg import CDRequest, CDResponse, Detection, Detection3D
from lasr_vision_msgs.srv import (
- YoloDetection,
- YoloDetection3D,
CroppedDetectionRequest,
CroppedDetectionResponse,
+ YoloDetection,
+ YoloDetection3D,
)
-from cv2_img import cv2_img_to_msg, msg_to_cv2_img
-from cv2_pcl import pcl_to_cv2
+from sensor_msgs.msg import Image, PointCloud2
+from shapely.geometry.point import Point as ShapelyPoint
+from shapely.geometry.polygon import Polygon as ShapelyPolygon
+from shapely.validation import explain_validity
def _2d_bbox_crop(
@@ -460,7 +455,7 @@ def process_detection_requests(
depth_image_topic: str = "/xtion/depth_registered/points",
yolo_2d_service_name: str = "/yolov8/detect",
yolo_3d_service_name: str = "/yolov8/detect3d",
- robot_pose_topic: str = "/amcl_pose",
+ robot_pose_topic: str = "/robot_pose",
debug_topic: str = "/lasr_vision/cropped_detection/debug",
) -> CroppedDetectionResponse:
"""Processes a list of detection requests.
diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py
index e4523441..f637eb6f 100755
--- a/skills/src/lasr_skills/__init__.py
+++ b/skills/src/lasr_skills/__init__.py
@@ -30,4 +30,4 @@
from .find_person_and_tell import FindPersonAndTell
from .count_people import CountPeople
from .json_qa import JsonQuestionAnswer
-
+from .rotate import Rotate
diff --git a/skills/src/lasr_skills/go_to_location.py b/skills/src/lasr_skills/go_to_location.py
index 30728a4f..d9d2ae5c 100755
--- a/skills/src/lasr_skills/go_to_location.py
+++ b/skills/src/lasr_skills/go_to_location.py
@@ -1,27 +1,24 @@
-import smach_ros
-import smach
+import os
+from typing import Union
+
+import rosparam
+import rospkg
import rospy
import rosservice
+import smach
+import smach_ros
+from geometry_msgs.msg import Point, Pose, PoseStamped, Quaternion
+from lasr_skills import PlayMotion
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
-from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion
from std_msgs.msg import Header
-from typing import Union
-
-from lasr_skills import PlayMotion
-
-import rospkg
-import rosparam
-
-import os
-
PUBLIC_CONTAINER = False
try:
from pal_startup_msgs.srv import (
StartupStart,
- StartupStop,
StartupStartRequest,
+ StartupStop,
StartupStopRequest,
)
except ModuleNotFoundError:
@@ -33,6 +30,7 @@ def __init__(
self,
location: Union[Pose, None] = None,
location_param: Union[str, None] = None,
+ safe_navigation: bool = True,
):
if location is not None or location_param is not None:
super(GoToLocation, self).__init__(outcomes=["succeeded", "failed"])
@@ -41,100 +39,159 @@ def __init__(
outcomes=["succeeded", "failed"], input_keys=["location"]
)
- r = rospkg.RosPack()
- els = rosparam.load_file(
- os.path.join(r.get_path("lasr_skills"), "config", "motions.yaml")
- )
- for param, ns in els:
- rosparam.upload_params(ns, param)
+ if safe_navigation:
+ r = rospkg.RosPack()
+ els = rosparam.load_file(
+ os.path.join(r.get_path("lasr_skills"), "config", "motions.yaml")
+ )
+ for param, ns in els:
+ rosparam.upload_params(ns, param)
IS_SIMULATION = (
"/pal_startup_control/start" not in rosservice.get_service_list()
)
with self:
- smach.StateMachine.add(
- "LOWER_BASE",
- PlayMotion("pre_navigation"),
- transitions={
- "succeeded": (
- "ENABLE_HEAD_MANAGER" if not IS_SIMULATION else "GO_TO_LOCATION"
- ),
- "aborted": "failed",
- "preempted": "failed",
- },
- )
-
- if not IS_SIMULATION:
- if PUBLIC_CONTAINER:
- rospy.logwarn(
- "You are using a public container. The head manager will not be stopped during navigation."
- )
- else:
- smach.StateMachine.add(
- "ENABLE_HEAD_MANAGER",
- smach_ros.ServiceState(
- "/pal_startup_control/start",
- StartupStart,
- request=StartupStartRequest("head_manager", ""),
- ),
- transitions={
- "succeeded": "GO_TO_LOCATION",
- "preempted": "failed",
- "aborted": "failed",
- },
- )
-
- if location is not None:
+ if safe_navigation:
smach.StateMachine.add(
- "GO_TO_LOCATION",
- smach_ros.SimpleActionState(
- "move_base",
- MoveBaseAction,
- goal=MoveBaseGoal(
- target_pose=PoseStamped(
- pose=location, header=Header(frame_id="map")
- )
- ),
- ),
+ "LOWER_BASE",
+ PlayMotion("pre_navigation"),
transitions={
"succeeded": (
- "DISABLE_HEAD_MANAGER"
+ "ENABLE_HEAD_MANAGER"
if not IS_SIMULATION
- else "RAISE_BASE"
+ else "GO_TO_LOCATION"
),
"aborted": "failed",
"preempted": "failed",
},
)
- elif location_param is not None:
- smach.StateMachine.add(
- "GO_TO_LOCATION",
- smach_ros.SimpleActionState(
- "move_base",
- MoveBaseAction,
- goal=MoveBaseGoal(
- target_pose=PoseStamped(
- pose=Pose(
- position=Point(
- **rospy.get_param(f"{location_param}/position")
- ),
- orientation=Quaternion(
- **rospy.get_param(
- f"{location_param}/orientation"
- )
+
+ if not IS_SIMULATION:
+ if PUBLIC_CONTAINER:
+ rospy.logwarn(
+ "You are using a public container. The head manager will not be stopped during navigation."
+ )
+ else:
+ smach.StateMachine.add(
+ "ENABLE_HEAD_MANAGER",
+ smach_ros.ServiceState(
+ "/pal_startup_control/start",
+ StartupStart,
+ request=StartupStartRequest("head_manager", ""),
+ ),
+ transitions={
+ "succeeded": "GO_TO_LOCATION",
+ "preempted": "failed",
+ "aborted": "failed",
+ },
+ )
+
+ if location is not None:
+ smach.StateMachine.add(
+ "GO_TO_LOCATION",
+ smach_ros.SimpleActionState(
+ "move_base",
+ MoveBaseAction,
+ goal=MoveBaseGoal(
+ target_pose=PoseStamped(
+ pose=location, header=Header(frame_id="map")
+ )
+ ),
+ ),
+ transitions={
+ "succeeded": (
+ "DISABLE_HEAD_MANAGER"
+ if not IS_SIMULATION
+ else "RAISE_BASE"
+ ),
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+ elif location_param is not None:
+ smach.StateMachine.add(
+ "GO_TO_LOCATION",
+ smach_ros.SimpleActionState(
+ "move_base",
+ MoveBaseAction,
+ goal=MoveBaseGoal(
+ target_pose=PoseStamped(
+ pose=Pose(
+ position=Point(
+ **rospy.get_param(
+ f"{location_param}/position"
+ )
+ ),
+ orientation=Quaternion(
+ **rospy.get_param(
+ f"{location_param}/orientation"
+ )
+ ),
),
- ),
- header=Header(frame_id="map"),
- )
+ header=Header(frame_id="map"),
+ )
+ ),
),
- ),
- transitions={
- "succeeded": (
- "DISABLE_HEAD_MANAGER"
- if not IS_SIMULATION
- else "RAISE_BASE"
+ transitions={
+ "succeeded": (
+ "DISABLE_HEAD_MANAGER"
+ if not IS_SIMULATION
+ else "RAISE_BASE"
+ ),
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+ else:
+ smach.StateMachine.add(
+ "GO_TO_LOCATION",
+ smach_ros.SimpleActionState(
+ "move_base",
+ MoveBaseAction,
+ goal_cb=lambda ud, _: MoveBaseGoal(
+ target_pose=PoseStamped(
+ pose=ud.location, header=Header(frame_id="map")
+ )
+ ),
+ input_keys=["location"],
),
+ transitions={
+ "succeeded": (
+ "DISABLE_HEAD_MANAGER"
+ if not IS_SIMULATION
+ else "RAISE_BASE"
+ ),
+ "aborted": "succeeded",
+ "preempted": "failed",
+ },
+ )
+
+ if not IS_SIMULATION:
+ if PUBLIC_CONTAINER:
+ rospy.logwarn(
+ "You are using a public container. The head manager will not be start following navigation."
+ )
+ else:
+ smach.StateMachine.add(
+ "DISABLE_HEAD_MANAGER",
+ smach_ros.ServiceState(
+ "/pal_startup_control/stop",
+ StartupStop,
+ request=StartupStopRequest("head_manager"),
+ ),
+ transitions={
+ "succeeded": "RAISE_BASE",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+
+ smach.StateMachine.add(
+ "RAISE_BASE",
+ PlayMotion("post_navigation"),
+ transitions={
+ "succeeded": "succeeded",
"aborted": "failed",
"preempted": "failed",
},
@@ -153,42 +210,8 @@ def __init__(
input_keys=["location"],
),
transitions={
- "succeeded": (
- "DISABLE_HEAD_MANAGER"
- if not IS_SIMULATION
- else "RAISE_BASE"
- ),
- "aborted": "succeeded",
+ "succeeded": "succeeded",
+ "aborted": "failed",
"preempted": "failed",
},
)
-
- if not IS_SIMULATION:
- if PUBLIC_CONTAINER:
- rospy.logwarn(
- "You are using a public container. The head manager will not be start following navigation."
- )
- else:
- smach.StateMachine.add(
- "DISABLE_HEAD_MANAGER",
- smach_ros.ServiceState(
- "/pal_startup_control/stop",
- StartupStop,
- request=StartupStopRequest("head_manager"),
- ),
- transitions={
- "succeeded": "RAISE_BASE",
- "aborted": "failed",
- "preempted": "failed",
- },
- )
-
- smach.StateMachine.add(
- "RAISE_BASE",
- PlayMotion("post_navigation"),
- transitions={
- "succeeded": "succeeded",
- "aborted": "failed",
- "preempted": "failed",
- },
- )
diff --git a/skills/src/lasr_skills/rotate.py b/skills/src/lasr_skills/rotate.py
new file mode 100644
index 00000000..0b57bebf
--- /dev/null
+++ b/skills/src/lasr_skills/rotate.py
@@ -0,0 +1,69 @@
+from typing import Optional
+
+import numpy as np
+import rospy
+import smach
+from geometry_msgs.msg import Pose, PoseStamped, PoseWithCovarianceStamped, Quaternion
+from lasr_skills import GoToLocation
+from scipy.spatial.transform import Rotation as R
+
+
+class Rotate(smach.StateMachine):
+
+ class GetRotatedPose(smach.State):
+ def __init__(self, angle: Optional[float] = None):
+ super().__init__(
+ outcomes=["succeeded"],
+ input_keys=["angle"] if angle is None else [],
+ output_keys=["target_pose"],
+ )
+ self.angle = angle
+
+ def execute(self, userdata):
+ robot_pose_with_covariance = rospy.wait_for_message(
+ "/robot_pose", PoseWithCovarianceStamped
+ )
+ robot_pose = PoseStamped(
+ pose=robot_pose_with_covariance.pose.pose,
+ header=robot_pose_with_covariance.header,
+ )
+ current_orientation = np.array(
+ [
+ robot_pose.pose.orientation.x,
+ robot_pose.pose.orientation.y,
+ robot_pose.pose.orientation.z,
+ robot_pose.pose.orientation.w,
+ ]
+ )
+
+ rot_matrix = R.from_quat(current_orientation)
+ new_rot_matrix = rot_matrix * R.from_euler(
+ "z", userdata.angle if self.angle is None else self.angle, degrees=True
+ )
+ new_pose = Pose(
+ position=robot_pose.pose.position,
+ orientation=Quaternion(*new_rot_matrix.as_quat()),
+ )
+
+ userdata.target_pose = new_pose
+
+ return "succeeded"
+
+ def __init__(self, angle: Optional[float] = None):
+ super().__init__(
+ outcomes=["succeeded", "failed"],
+ input_keys=["angle"] if angle is None else [],
+ )
+
+ with self:
+ smach.StateMachine.add(
+ "GET_ROTATED_POSE",
+ self.GetRotatedPose(angle),
+ transitions={"succeeded": "ROTATE"},
+ )
+ smach.StateMachine.add(
+ "ROTATE",
+ GoToLocation(safe_navigation=False),
+ transitions={"succeeded": "succeeded", "failed": "failed"},
+ remapping={"location": "target_pose"},
+ )
diff --git a/tasks/restaurant/CMakeLists.txt b/tasks/restaurant/CMakeLists.txt
index f4c0bb9f..86ab10df 100644
--- a/tasks/restaurant/CMakeLists.txt
+++ b/tasks/restaurant/CMakeLists.txt
@@ -11,6 +11,7 @@ find_package(catkin REQUIRED COMPONENTS
lasr_skills
rospy
std_msgs
+ catkin_virtualenv
)
## System dependencies are found with CMake's conventions
@@ -21,6 +22,10 @@ find_package(catkin REQUIRED COMPONENTS
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
catkin_python_setup()
+catkin_generate_virtualenv(
+ INPUT_REQUIREMENTS requirements.in
+ PYTHON_INTERPRETER python3.8
+)
################################################
## Declare ROS messages, services and actions ##
diff --git a/tasks/restaurant/config/lab.yaml b/tasks/restaurant/config/lab.yaml
new file mode 100644
index 00000000..a67a0cb9
--- /dev/null
+++ b/tasks/restaurant/config/lab.yaml
@@ -0,0 +1,8 @@
+survey_motions:
+ - look_centre
+ - look_left
+ - look_right
+survey_angle_increments:
+ - -90
+ - -90
+ - -90
\ No newline at end of file
diff --git a/tasks/restaurant/config/motions.yaml b/tasks/restaurant/config/motions.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/tasks/restaurant/launch/core.launch b/tasks/restaurant/launch/core.launch
new file mode 100644
index 00000000..eaa1cf59
--- /dev/null
+++ b/tasks/restaurant/launch/core.launch
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tasks/restaurant/package.xml b/tasks/restaurant/package.xml
index a43015e8..419bc791 100644
--- a/tasks/restaurant/package.xml
+++ b/tasks/restaurant/package.xml
@@ -55,11 +55,11 @@
rospy
lasr_skills
rospy
+ catkin_virtualenv
-
-
+ requirements.txt
diff --git a/tasks/restaurant/requirements.in b/tasks/restaurant/requirements.in
new file mode 100644
index 00000000..d4e6f378
--- /dev/null
+++ b/tasks/restaurant/requirements.in
@@ -0,0 +1 @@
+jellyfish
\ No newline at end of file
diff --git a/tasks/restaurant/requirements.txt b/tasks/restaurant/requirements.txt
new file mode 100644
index 00000000..d7ba4554
--- /dev/null
+++ b/tasks/restaurant/requirements.txt
@@ -0,0 +1 @@
+jellyfish==1.1.0 # via -r requirements.in
diff --git a/tasks/restaurant/scripts/main.py b/tasks/restaurant/scripts/main.py
index a57f9298..6235e4c9 100644
--- a/tasks/restaurant/scripts/main.py
+++ b/tasks/restaurant/scripts/main.py
@@ -6,6 +6,6 @@
if __name__ == "__main__":
rospy.init_node("restaurant")
- bar_pose_map = rospy.wait_for_message("/amcl_pose", PoseWithCovarianceStamped)
+ bar_pose_map = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped)
restaurant = Restaurant(bar_pose_map.pose.pose)
restaurant.execute()
diff --git a/tasks/restaurant/src/restaurant/speech/__init__.py b/tasks/restaurant/src/restaurant/speech/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tasks/restaurant/src/restaurant/speech/speech_handlers.py b/tasks/restaurant/src/restaurant/speech/speech_handlers.py
new file mode 100644
index 00000000..896963eb
--- /dev/null
+++ b/tasks/restaurant/src/restaurant/speech/speech_handlers.py
@@ -0,0 +1,405 @@
+import string
+from typing import Any, Dict, List
+
+import jellyfish as jf
+
+SPELLING_THRESHOLD = 1
+PRONOUNCIATION_THRESHOLD = 0
+
+excluded_words = [
+ "i",
+ "would",
+ "like",
+ "and",
+ "please",
+ "um" "can",
+ "have" "may",
+ "could",
+ "get",
+]
+
+
+drinks_list = ["cola", "ice tea", "water", "milk", "big coke", "fanta", "dubbelfris"]
+
+drinks_split_list = [
+ "cola",
+ "ice",
+ "tea",
+ "water",
+ "milk",
+ "big",
+ "coke",
+ "fanta",
+ "dubbelfris",
+]
+
+food_list = [
+ "cornflakes",
+ "pea soup",
+ "curry",
+ "pancake mix",
+ "hagelslag",
+ "sausages",
+ "mayonaise",
+]
+
+food_split_list = [
+ "cornflakes",
+ "pea",
+ "soup",
+ "curry",
+ "pancake",
+ "mix",
+ "hagelslag",
+ "sausages",
+ "mayonaise",
+]
+
+fruits_list = [
+ "pear",
+ "plum",
+ "peach",
+ "lemon",
+ "orange",
+ "strawberry",
+ "banana",
+ "apple",
+]
+snacks_list = ["stroopwafel", "candy", "liquorice", "crisps", "pringles", "tictac"]
+
+items_list = drinks_list + food_list + fruits_list + snacks_list
+
+items_split_list = drinks_split_list + food_split_list + fruits_list + snacks_list
+
+double_word_items_split_list = [
+ "ice",
+ "tea",
+ "big",
+ "coke",
+ "pea",
+ "soup",
+ "pancake",
+ "mix",
+]
+
+double_word_items_full_list = ["ice tea", "big coke", "pea soup", "pancake mix"]
+
+double_word_items_direct_dict = {
+ "ice": "tea",
+ "tea": "ice",
+ "big": "coke",
+ "coke": "big",
+ "pea": "soup",
+ "soup": "pea",
+ "pancake": "mix",
+ "mix": "pancake",
+}
+
+double_word_items_full_dict = {
+ "ice": "ice tea",
+ "tea": "ice tea",
+ "big": "big coke",
+ "coke": "big coke",
+ "pea": "pea soup",
+ "soup": "pea soup",
+ "pancake": "pancake mix",
+ "mix": "pancake mix",
+}
+
+
+# Enter consecutive numbers in order if more are required
+numbers_list = ["a", "one", "two", "three", "four", "five"]
+numbers_map = {
+ word: (1 if word in ["a", "one"] else i)
+ for i, word in enumerate(numbers_list, start=0)
+}
+
+everything_split_list = items_split_list + numbers_list
+
+
+def get_split_sentence_list(guest_transcription):
+ """Split sentence into a list of strings.
+
+ Args:
+ guest_transcription (str): Transcription as a pure string.
+ Returns:
+ List[str]: Transcription split up as a list of strings.
+ """
+ filtered_sentence = guest_transcription.lower().translate(
+ str.maketrans("", "", string.punctuation)
+ )
+ sentence_list = [
+ word for word in filtered_sentence.split() if word not in excluded_words
+ ]
+ return sentence_list
+
+
+def get_num_and_items(sentence_list):
+ """Extract the ordered items and their corresponding number required.
+
+ Args:
+ sentence_list (List[str]): Transcription split up as a list of strings.
+
+ Returns:
+ List[int][str]: List identifying the pairing of the number words and item words.
+ Error message may be contained.
+ """
+ number_items = []
+ int_number_items = []
+ for i in range(len(sentence_list)):
+ if sentence_list[i] in numbers_list:
+ number_items.append(sentence_list[i])
+ int_number_items.append(numbers_map[sentence_list[i]])
+
+ matched_items = []
+ for i in range(len(sentence_list)):
+ for item in items_list:
+ item_words = item.split()
+ if sentence_list[i : i + len(item_words)] == item_words:
+ matched_items.append(item)
+ break
+
+ if len(int_number_items) > len(matched_items):
+ return [(-1, "more number")]
+ elif len(int_number_items) < len(matched_items):
+ return [(-1, "more items")]
+ elif len(int_number_items) == 0 and len(matched_items) == 0:
+ return [(-1, "unknown")]
+ else:
+ num_and_items = list(zip(int_number_items, matched_items))
+ return num_and_items
+
+
+def recover_sentence(sentence_list, error_message, last_resort):
+ """Recover words and phrases in the sentence. If there's a mismatch between the number of number words and item
+ words, recover until they are of the same number. If last resort mode is True, then disregard the mismatch in
+ numbers and perform all recovery behaviours.
+
+ Args:
+ sentence_list (List[str]): Transcription split up as a list of strings.
+ error_message (str): Identifies whether more number or item words were identified.
+ last_resort (bool): Whether all recovery behaviour needs to be performed.
+
+ Returns:
+ List[str]: Recovered sentence list.
+ """
+ # Recover items
+ if not last_resort and error_message == "more number":
+ recovered_sentence = handle_similar_spelt(
+ sentence_list, items_split_list, SPELLING_THRESHOLD
+ )
+ # print(f"Recovered items (spelt): {recovered_sentence}")
+ recovered_sentence = handle_similar_sound(
+ recovered_sentence, items_split_list, PRONOUNCIATION_THRESHOLD
+ )
+ # print(f"Recovered items (sound): {recovered_sentence}")
+ recovered_sentence = infer_second_item(recovered_sentence)
+ # print(f"Recovered items (inferred second word): {recovered_sentence}")
+ return recovered_sentence
+ # Recover numbers
+ elif not last_resort and error_message == "more items":
+ recovered_sentence = handle_similar_spelt(
+ sentence_list, numbers_list, SPELLING_THRESHOLD
+ )
+ # print(f"Recovered number (spelt): {recovered_sentence}")
+ recovered_sentence = handle_similar_sound(
+ recovered_sentence, numbers_list, PRONOUNCIATION_THRESHOLD
+ )
+ # print(f"Recovered number (sound): {recovered_sentence}")
+ return recovered_sentence
+ else:
+ recovered_sentence = handle_similar_spelt(
+ sentence_list, numbers_list, SPELLING_THRESHOLD
+ )
+ # print(f"Recovered number (spelt): {recovered_sentence}")
+ recovered_sentence = handle_similar_sound(
+ recovered_sentence, numbers_list, PRONOUNCIATION_THRESHOLD
+ )
+ # print(f"Recovered number (sound): {recovered_sentence}")
+
+ recovered_sentence = handle_similar_spelt(
+ sentence_list, items_split_list, SPELLING_THRESHOLD
+ )
+ # print(f"Recovered items (spelt): {recovered_sentence}")
+ recovered_sentence = handle_similar_sound(
+ recovered_sentence, items_split_list, PRONOUNCIATION_THRESHOLD
+ )
+ # print(f"Recovered items (sound): {recovered_sentence}")
+ recovered_sentence = infer_second_item(recovered_sentence)
+ # print(f"Recovered items (inferred second word): {recovered_sentence}")
+
+ return recovered_sentence
+
+
+def handle_similar_spelt(
+ sentence_list: List[str],
+ available_words: List[str],
+ distance_threshold: int,
+) -> str:
+ """Recover any word by spelling that has a similarity lower than the specified threshold
+ when comparing each word in the sentence list to the list of available words. Replace the old word
+ in the sentence with the newly recovered word.
+
+ Args:
+ sentence_list (List[str]): Transcription split up as a list of strings.
+ available_words (List[str]): List of available words to compare to (numbers or items).
+ distance_threshold (int): Similarity in terms of spelling distance required for a word to be recovered.
+
+ Returns:
+ List[str]: Recovered sentence in terms of spelling.
+ """
+ input_word_index = 0
+
+ for input_word in sentence_list:
+ input_word = sentence_list[input_word_index]
+ for available_word in available_words:
+ distance = get_damerau_levenshtein_distance(input_word, available_word)
+ if input_word != available_word and input_word not in everything_split_list:
+ if distance <= distance_threshold:
+ print(f"Spelling recovery: {input_word} -> {available_word}")
+ sentence_list[input_word_index] = available_word
+ input_word_index = input_word_index + 1
+ return sentence_list
+
+
+def handle_similar_sound(
+ sentence_list: List[str],
+ available_words: List[str],
+ distance_threshold: int,
+) -> str:
+ """Recover any word by pronounciation that has a similarity lower than the specified threshold
+ when comparing each word in the sentence list to the list of available words.
+
+ Args:
+ sentence_list (List[str]): Transcription split up as a list of strings.
+ available_words (List[str]): List of available words to compare to (numbers or items).
+ distance_threshold (int): Similarity in terms of pronounciation distance required for a word to be recovered.
+
+ Returns:
+ List[str]: Recovered sentence in terms of pronounciation.
+ """
+ input_word_index = 0
+
+ for input_word in sentence_list:
+ input_word = sentence_list[input_word_index]
+ for available_word in available_words:
+ distance = get_levenshtein_soundex_distance(input_word, available_word)
+ if input_word != available_word and input_word not in everything_split_list:
+ if distance <= distance_threshold:
+ print(f"Sound recovery: {input_word} -> {available_word}")
+ sentence_list[input_word_index] = available_word
+ input_word_index = input_word_index + 1
+ return sentence_list
+
+
+def infer_second_item(sentence_list: List[str]) -> str:
+ """Infer the second word of any two-worded item phrase in the sentence - if
+ a word is contained in any of the two-worded item phrases it is recovered. If the word in the next/previous index
+ is the expected second word of a two-worded item, then ignore. If a given phrase has already been recovered, ignore.
+
+ Args:
+ sentence_list (List[str]): Transcription split up as a list of strings.
+
+ Returns:
+ List[str]: Recovered sentence with the second item inferred.
+ """
+ allowed_recovery_phrases = list(double_word_items_full_list)
+ input_word_index = 0
+
+ for input_word in sentence_list:
+ for available_word in double_word_items_split_list:
+ if (
+ input_word == available_word
+ and double_word_items_full_dict[input_word] in allowed_recovery_phrases
+ ):
+ recovered_phrase = double_word_items_full_dict[input_word]
+ if (
+ input_word_index == 0
+ and sentence_list[input_word_index + 1]
+ != double_word_items_direct_dict[input_word]
+ ):
+ print(f"Infer double word: {input_word} -> {recovered_phrase}")
+ allowed_recovery_phrases.remove(recovered_phrase)
+ recovered_phrase_list = get_split_sentence_list(recovered_phrase)
+ sentence_list[input_word_index : input_word_index + 1] = (
+ recovered_phrase_list
+ )
+ elif (
+ input_word_index == len(sentence_list) - 1
+ and sentence_list[input_word_index - 1]
+ != double_word_items_direct_dict[input_word]
+ ):
+ print(f"Infer double word: {input_word} -> {recovered_phrase}")
+ allowed_recovery_phrases.remove(recovered_phrase)
+ recovered_phrase_list = get_split_sentence_list(recovered_phrase)
+ sentence_list[input_word_index : input_word_index + 1] = (
+ recovered_phrase_list
+ )
+ elif (
+ sentence_list[input_word_index - 1]
+ != double_word_items_direct_dict[input_word]
+ and sentence_list[input_word_index + 1]
+ != double_word_items_direct_dict[input_word]
+ ):
+ print(f"Infer double word: {input_word} -> {recovered_phrase}")
+ allowed_recovery_phrases.remove(recovered_phrase)
+ recovered_phrase_list = get_split_sentence_list(recovered_phrase)
+ sentence_list[input_word_index : input_word_index + 1] = (
+ recovered_phrase_list
+ )
+ input_word_index = input_word_index + 1
+ return sentence_list
+
+
+def get_damerau_levenshtein_distance(word_1: str, word_2: str) -> int:
+ """Get the damerau-levenshtein distance between two words for the similarity in spelling.
+
+ Args:
+ word_1 (str): First word
+ word_2 (str): Second word
+
+ Returns:
+ int: Damerau-levenshtein distance between the two words.
+ """
+ return jf.damerau_levenshtein_distance(word_1, word_2)
+
+
+def get_levenshtein_soundex_distance(word_1: str, word_2: str) -> int:
+ """Get the levenshtein distance between the soundex encoding of two words for the similarity
+ in pronounciation.
+
+ Args:
+ word_1 (str): First word
+ word_2 (str): Second word
+
+ Returns:
+ int: Levenshtein distance between the soundex encoding of the two words.
+ """
+ soundex_word_1 = jf.soundex(word_1)
+ soundex_word_2 = jf.soundex(word_2)
+ return jf.levenshtein_distance(soundex_word_1, soundex_word_2)
+
+
+def handle_speech(guest_transcription, last_resort):
+ sentence_list = get_split_sentence_list(guest_transcription)
+ print(sentence_list)
+ num_and_items = get_num_and_items(sentence_list)
+ print(num_and_items)
+
+ if num_and_items[0][0] == -1:
+ recovered_sentence_list = recover_sentence(
+ sentence_list, num_and_items[0][1], last_resort
+ )
+ recovered_num_and_items = get_num_and_items(recovered_sentence_list)
+ print(f"Recovered: {recovered_num_and_items}")
+ if recovered_num_and_items[0][0] == -1:
+ recovered_sentence_list = recover_sentence(
+ sentence_list, num_and_items[0][1], True
+ )
+ recovered_num_and_items = get_num_and_items(recovered_sentence_list)
+ print(f"Recovered: {recovered_num_and_items}")
+ if recovered_num_and_items[0][0] == -1:
+ return "unknown"
+ return recovered_num_and_items
+ else:
+ return num_and_items
diff --git a/tasks/restaurant/src/restaurant/state_machine.py b/tasks/restaurant/src/restaurant/state_machine.py
index 472109c2..1302009d 100644
--- a/tasks/restaurant/src/restaurant/state_machine.py
+++ b/tasks/restaurant/src/restaurant/state_machine.py
@@ -1,7 +1,8 @@
import smach
import smach_ros
from geometry_msgs.msg import Pose
-from lasr_skills import AskAndListen, GoToLocation, Say
+from lasr_skills import AskAndListen, GoToLocation, Rotate, Say, Wait
+from restaurant.speech.speech_handlers import handle_speech
from restaurant.states import Survey
from std_msgs.msg import Empty
@@ -41,7 +42,7 @@ def __init__(self, bar_pose_map: Pose) -> None:
Survey(),
transitions={
"customer_found": "GO_TO_CUSTOMER",
- "customer_not_found": "failed",
+ "customer_not_found": "SURVEY",
},
)
@@ -58,6 +59,30 @@ def __init__(self, bar_pose_map: Pose) -> None:
tts_phrase="Hello, I'm TIAGo. What can I get for you today?"
),
remapping={"transcribed_speech": "order_str"},
+ transitions={"succeeded": "PROCESS_ORDER", "failed": "failed"},
+ )
+
+ @smach.cb_interface(
+ input_keys=["order_str"],
+ output_keys=["order_str"],
+ outcomes=["succeeded", "failed"],
+ )
+ def speech_postprocess_cb(ud):
+ parsed_order = handle_speech(ud.order_str, False)
+ print(parsed_order)
+ order_string = ", ".join(
+ [
+ f"{count} {item if count == 1 or item.endswith('s') else item+'s'}"
+ for count, item in parsed_order
+ ]
+ )
+ ud.order_str = order_string
+ print(order_string)
+ return "succeeded"
+
+ smach.StateMachine.add(
+ "PROCESS_ORDER",
+ smach.CBState(speech_postprocess_cb),
transitions={"succeeded": "SAY_ORDER", "failed": "failed"},
)
@@ -65,7 +90,11 @@ def __init__(self, bar_pose_map: Pose) -> None:
"SAY_ORDER",
Say(format_str="Your order is: {}. I will deliver it shortly."),
remapping={"placeholders": "order_str"},
- transitions={"succeeded": "GO_TO_BAR", "failed": "failed"},
+ transitions={
+ "succeeded": "GO_TO_BAR",
+ "preempted": "failed",
+ "aborted": "failed",
+ },
)
smach.StateMachine.add(
@@ -76,15 +105,29 @@ def __init__(self, bar_pose_map: Pose) -> None:
smach.StateMachine.add(
"REQUEST_ITEMS",
- Say(format_str="Please get me {}"),
+ Say(
+ format_str="Please get me {}, I will turn around for you to load the order."
+ ),
remapping={"placeholders": "order_str"},
transitions={
- "suceeded": "succeeded",
+ "succeeded": "ROTATE_LOAD",
"aborted": "failed",
"preempted": "failed",
},
)
+ smach.StateMachine.add(
+ "ROTATE_LOAD",
+ Rotate(angle=180.0),
+ transitions={"succeeded": "WAIT_LOAD", "failed": "failed"},
+ )
+
+ smach.StateMachine.add(
+ "WAIT_LOAD",
+ Wait(5),
+ transitions={"succeeded": "RETURN_TO_CUSTOMER", "failed": "failed"},
+ )
+
smach.StateMachine.add(
"RETURN_TO_CUSTOMER",
GoToLocation(),
@@ -94,14 +137,28 @@ def __init__(self, bar_pose_map: Pose) -> None:
smach.StateMachine.add(
"SAY_TAKE_ORDER",
- Say(text="Here is your order"),
+ Say(
+ text="Here is your order, I will turn around for you to unload it."
+ ),
transitions={
- "succeeded": "GO_TO_SURVEY",
+ "succeeded": "ROTATE_UNLOAD",
"aborted": "failed",
"preempted": "failed",
},
)
+ smach.StateMachine.add(
+ "ROTATE_UNLOAD",
+ Rotate(angle=180.0),
+ transitions={"succeeded": "WAIT_UNLOAD", "failed": "failed"},
+ )
+
+ smach.StateMachine.add(
+ "WAIT_UNLOAD",
+ Wait(5),
+ transitions={"succeeded": "GO_TO_SURVEY", "failed": "failed"},
+ )
+
smach.StateMachine.add(
"GO_TO_SURVEY",
GoToLocation(location=bar_pose_map),
diff --git a/tasks/restaurant/src/restaurant/states/survey.py b/tasks/restaurant/src/restaurant/states/survey.py
index a8a0436c..3be8f62b 100644
--- a/tasks/restaurant/src/restaurant/states/survey.py
+++ b/tasks/restaurant/src/restaurant/states/survey.py
@@ -1,7 +1,235 @@
+import navigation_helpers
+import numpy as np
+import rospy
import smach
+import smach_ros
+from geometry_msgs.msg import Pose, PoseStamped, PoseWithCovarianceStamped, Quaternion
+from lasr_skills import DetectGesture, GoToLocation, PlayMotion, Rotate
+from lasr_vision_msgs.msg import CDRequest, CDResponse
+from lasr_vision_msgs.srv import CroppedDetection, CroppedDetectionRequest
+from scipy.spatial.transform import Rotation as R
class Survey(smach.StateMachine):
+ class HandleDetections(smach.StateMachine):
+ class GetResponse(smach.State):
+ def __init__(self):
+ super().__init__(
+ outcomes=["succeeded", "failed"],
+ input_keys=["responses"],
+ output_keys=[
+ "response",
+ "responses",
+ "person_point",
+ "cropped_image",
+ ],
+ )
+
+ def execute(self, userdata):
+ if len(userdata.responses[0].detections_3d) == 0:
+ rospy.logwarn("No response available, returning failed.")
+ return "failed"
+ response = userdata.responses[0].detections_3d.pop(0)
+ userdata.response = response
+ userdata.cropped_image = userdata.responses[0].cropped_imgs.pop(0)
+ userdata.person_point = response.point
+ return "succeeded"
+
+ class ComputeApproachPose(smach.State):
+
+ def __init__(self):
+
+ super().__init__(
+ outcomes=["succeeded", "failed"],
+ input_keys=["person_point"],
+ output_keys=["customer_approach_pose"],
+ )
+
+ def execute(self, userdata):
+ robot_pose_with_covariance = rospy.wait_for_message(
+ "/robot_pose", PoseWithCovarianceStamped
+ )
+ robot_pose = PoseStamped(
+ pose=robot_pose_with_covariance.pose.pose,
+ header=robot_pose_with_covariance.header,
+ )
+
+ person_pose = PoseStamped(
+ pose=Pose(
+ position=userdata.person_point,
+ orientation=robot_pose.pose.orientation,
+ ),
+ header=robot_pose.header,
+ )
+ approach_pose = navigation_helpers.get_pose_on_path(
+ robot_pose,
+ person_pose,
+ )
+ rospy.loginfo(approach_pose)
+
+ if approach_pose is None:
+ return "failed"
+
+ approach_pose.pose.orientation = navigation_helpers.compute_face_quat(
+ approach_pose.pose,
+ person_pose.pose,
+ )
+ userdata.customer_approach_pose = approach_pose.pose
+
+ return "succeeded"
+
+ def __init__(self):
+ super().__init__(
+ outcomes=["succeeded", "failed"],
+ input_keys=["responses"],
+ output_keys=["responses", "customer_approach_pose"],
+ )
+
+ with self:
+
+ smach.StateMachine.add(
+ "GET_RESPONSE",
+ self.GetResponse(),
+ transitions={"succeeded": "DETECT_GESTURE", "failed": "failed"},
+ )
+
+ smach.StateMachine.add(
+ "DETECT_GESTURE",
+ DetectGesture("waving"),
+ transitions={
+ "succeeded": "COMPUTE_APPROACH_POSE",
+ "failed": "GET_RESPONSE",
+ },
+ remapping={"img_msg": "cropped_image"},
+ )
+
+ smach.StateMachine.add(
+ "COMPUTE_APPROACH_POSE",
+ self.ComputeApproachPose(),
+ transitions={"succeeded": "succeeded", "failed": "GET_RESPONSE"},
+ )
+
def __init__(self) -> None:
- super().__init__(outcomes=["customer_found", "customer_not_found"])
+ super().__init__(
+ outcomes=["customer_found", "customer_not_found"],
+ output_keys=["customer_approach_pose"],
+ )
+
+ with self:
+
+ angle_iterator = smach.Iterator(
+ outcomes=["succeeded", "failed"],
+ it=rospy.get_param("/restaurant/survey_angle_increments"),
+ it_label="angle_increment",
+ input_keys=[],
+ output_keys=["customer_approach_pose"],
+ exhausted_outcome="failed",
+ )
+
+ with angle_iterator:
+
+ motion_iterator = smach.Iterator(
+ outcomes=["succeeded", "failed"],
+ it=rospy.get_param("/restaurant/survey_motions"),
+ it_label="motion_name",
+ input_keys=[],
+ output_keys=["customer_approach_pose"],
+ exhausted_outcome="failed",
+ )
+
+ with motion_iterator:
+
+ container_sm = smach.StateMachine(
+ outcomes=["succeeded", "failed", "continue"],
+ input_keys=["motion_name"],
+ output_keys=["customer_approach_pose"],
+ )
+
+ with container_sm:
+ smach.StateMachine.add(
+ "SURVEY_MOTION",
+ PlayMotion(),
+ transitions={
+ "succeeded": "DETECT",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+
+ smach.StateMachine.add(
+ "DETECT",
+ smach_ros.ServiceState(
+ "/vision/cropped_detection",
+ CroppedDetection,
+ request=CroppedDetectionRequest(
+ requests=[
+ CDRequest(
+ method="closest",
+ use_mask=True,
+ yolo_model="yolov8x-seg.pt",
+ yolo_model_confidence=0.5,
+ yolo_nms_threshold=0.3,
+ return_sensor_reading=False,
+ object_names=["person"],
+ polygons=[],
+ )
+ ]
+ ),
+ output_keys=["responses"],
+ response_slots=["responses"],
+ ),
+ transitions={
+ "succeeded": "HANDLE_DETECTIONS",
+ "aborted": "failed",
+ "preempted": "failed",
+ },
+ )
+
+ smach.StateMachine.add(
+ "HANDLE_DETECTIONS",
+ self.HandleDetections(),
+ transitions={
+ "succeeded": "succeeded",
+ "failed": "continue",
+ },
+ )
+
+ motion_iterator.set_contained_state(
+ "CONTAINER_STATE", container_sm, loop_outcomes=["continue"]
+ )
+
+ angle_container_sm = smach.StateMachine(
+ outcomes=["succeeded", "failed", "continue"],
+ input_keys=["angle_increment"],
+ output_keys=["customer_approach_pose"],
+ )
+ with angle_container_sm:
+
+ smach.StateMachine.add(
+ "ROTATE",
+ Rotate(),
+ transitions={"succeeded": "MOTION_ITERATOR"},
+ remapping={"angle": "angle_increment"},
+ )
+
+ smach.StateMachine.add(
+ "MOTION_ITERATOR",
+ motion_iterator,
+ transitions={
+ "succeeded": "succeeded",
+ "failed": "failed",
+ },
+ )
+ angle_iterator.set_contained_state(
+ "CONTAINER_STATE", angle_container_sm, loop_outcomes=["continue"]
+ )
+
+ smach.StateMachine.add(
+ "ANGLE_ITERATOR",
+ angle_iterator,
+ transitions={
+ "succeeded": "customer_found",
+ "failed": "customer_not_found",
+ },
+ )