Skip to content

Commit 64a7495

Browse files
m-barkerjws-1Sveali41
authored
Post RoboCup recepionist merge (#264)
Co-authored-by: Jared Swift <j.w.swift@outlook.com> Co-authored-by: Siyao <sveali41@gmail.com>
1 parent dd5f855 commit 64a7495

File tree

22 files changed

+351
-764
lines changed

22 files changed

+351
-764
lines changed

common/helpers/markers/src/markers/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@ def create_and_publish_marker(
5555
publisher_counts[publisher] += 1
5656
marker_msg = create_marker(point_stamped, idx, r, g, b)
5757
publisher.publish(marker_msg)
58-
rospy.sleep(2) # Needed to prevent markers from being overwritten
5958
if name is not None:
6059
name_location = point_stamped.point
6160
name_location.z += 0.1

common/vision/lasr_vision_bodypix/nodes/bodypix_services.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -117,23 +117,6 @@ def detect_wave(
117117
rospy.logerr(f"Error getting wave point: {e}")
118118
wave_position = PointStamped()
119119

120-
# if debug_publisher is not None:
121-
# cv2_gesture_img = cv2_img.msg_to_cv2_img(request.pcl_msg)
122-
# # Add text to the image
123-
# cv2.putText(
124-
# cv2_gesture_img,
125-
# gesture_to_detect,
126-
# (10, 30),
127-
# cv2.FONT_HERSHEY_SIMPLEX,
128-
# 1,
129-
# (0, 255, 0),
130-
# 2,
131-
# cv2.LINE_AA,
132-
# )
133-
# # Publish the image
134-
# debug_publisher.publish(cv2_img.cv2_img_to_msg(cv2_gesture_img))
135-
# create_and_publish_marker(marker_pub, wave_position, r=0, g=1, b=0)
136-
137120
is_waving = False if gesture_to_detect is None else True
138121

139122
return DetectWaveResponse(

common/vision/lasr_vision_clip/src/lasr_vision_clip/clip_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
#!/usr/bin/env python3
22
import torch
3-
import rospy
43
import cv2
54
import cv2_img
65
import numpy as np

common/vision/lasr_vision_feature_extraction/nodes/service

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ from lasr_vision_msgs.srv import (
66
from lasr_vision_feature_extraction.categories_and_attributes import (
77
CategoriesAndAttributes,
88
CelebAMaskHQCategoriesAndAttributes,
9-
DeepFashion2GeneralizedCategoriesAndAttributes,
109
)
1110

1211
from cv2_img import msg_to_cv2_img
@@ -48,7 +47,8 @@ def detect(
4847
head_frame,
4948
torso_frame,
5049
full_frame,
51-
image_raw=request.image_raw,
50+
head_mask,
51+
torso_mask,
5252
cloth_predictor=cloth_predictor,
5353
)
5454
response = TorchFaceFeatureDetectionDescriptionResponse()
@@ -58,8 +58,6 @@ def detect(
5858

5959
if __name__ == "__main__":
6060
# predictor will be global when inited, thus will be used within the function above.
61-
# head_model = lasr_vision_feature_extraction.load_face_classifier_model()
62-
# head_predictor = lasr_vision_feature_extraction.Predictor(head_model, torch.device('cpu'), CelebAMaskHQCategoriesAndAttributes)
6361
cloth_model = lasr_vision_feature_extraction.load_cloth_classifier_model()
6462
cloth_predictor = lasr_vision_feature_extraction.ClothPredictor(
6563
cloth_model, torch.device("cpu"), DeepFashion2GeneralizedCategoriesAndAttributes

common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/__init__.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,10 @@
1111
import torchvision.models as models
1212
from lasr_vision_feature_extraction.categories_and_attributes import (
1313
CategoriesAndAttributes,
14-
CelebAMaskHQCategoriesAndAttributes,
1514
DeepFashion2GeneralizedCategoriesAndAttributes,
1615
)
1716
from lasr_vision_feature_extraction.image_with_masks_and_attributes import (
1817
ImageWithMasksAndAttributes,
19-
ImageOfPerson,
2018
ImageOfCloth,
2119
)
2220
from lasr_vision_msgs.srv import Vqa, VqaRequest
@@ -445,32 +443,6 @@ def predict(self, rgb_image: np.ndarray) -> ImageWithMasksAndAttributes:
445443
return image_obj
446444

447445

448-
def load_face_classifier_model():
449-
cat_layers = CelebAMaskHQCategoriesAndAttributes.merged_categories.keys().__len__()
450-
segment_model = UNetWithResnetEncoder(num_classes=cat_layers)
451-
predictions = (
452-
len(CelebAMaskHQCategoriesAndAttributes.attributes)
453-
- len(CelebAMaskHQCategoriesAndAttributes.avoided_attributes)
454-
+ len(CelebAMaskHQCategoriesAndAttributes.mask_labels)
455-
)
456-
predict_model = MultiLabelResNet(
457-
num_labels=predictions, input_channels=cat_layers + 3
458-
)
459-
model = CombinedModel(segment_model, predict_model, cat_layers=cat_layers)
460-
model.eval()
461-
462-
r = rospkg.RosPack()
463-
model, _, _, _ = load_torch_model(
464-
model,
465-
None,
466-
path=path.join(
467-
r.get_path("lasr_vision_feature_extraction"), "models", "face_model.pth"
468-
),
469-
cpu_only=True,
470-
)
471-
return model
472-
473-
474446
def load_cloth_classifier_model():
475447
num_classes = len(DeepFashion2GeneralizedCategoriesAndAttributes.attributes)
476448
model = SegmentPredictorBbox(
@@ -558,9 +530,6 @@ def predict_frame(
558530
head_frame = pad_image_to_even_dims(head_frame)
559531
torso_frame = pad_image_to_even_dims(torso_frame)
560532

561-
# rst_person = ImageOfPerson.from_parent_instance(
562-
# head_predictor.predict(head_frame)
563-
# ).describe()
564533
rst_cloth = ImageOfCloth.from_parent_instance(
565534
cloth_predictor.predict(torso_frame)
566535
).describe()
@@ -597,7 +566,6 @@ def predict_frame(
597566
rst_person["hair_shape"] = "long hair"
598567

599568
result = {
600-
**rst_person,
601569
**rst_cloth,
602570
}
603571

common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/categories_and_attributes.py

Lines changed: 0 additions & 160 deletions
Original file line numberDiff line numberDiff line change
@@ -10,166 +10,6 @@ class CategoriesAndAttributes:
1010
thresholds_pred: dict[str, float] = {}
1111

1212

13-
class CelebAMaskHQCategoriesAndAttributes(CategoriesAndAttributes):
14-
mask_categories = [
15-
"cloth",
16-
"r_ear",
17-
"hair",
18-
"l_brow",
19-
"l_eye",
20-
"l_lip",
21-
"mouth",
22-
"neck",
23-
"nose",
24-
"r_brow",
25-
"r_ear",
26-
"r_eye",
27-
"skin",
28-
"u_lip",
29-
"hat",
30-
"l_ear",
31-
"neck_l",
32-
"eye_g",
33-
]
34-
merged_categories = {
35-
"ear": [
36-
"l_ear",
37-
"r_ear",
38-
],
39-
"brow": [
40-
"l_brow",
41-
"r_brow",
42-
],
43-
"eye": [
44-
"l_eye",
45-
"r_eye",
46-
],
47-
"mouth": [
48-
"l_lip",
49-
"u_lip",
50-
"mouth",
51-
],
52-
}
53-
_categories_to_merge = []
54-
for key in sorted(list(merged_categories.keys())):
55-
for cat in merged_categories[key]:
56-
_categories_to_merge.append(cat)
57-
for key in mask_categories:
58-
if key not in _categories_to_merge:
59-
merged_categories[key] = [key]
60-
mask_labels = ["hair"]
61-
selective_attributes = {
62-
"facial_hair": [
63-
"5_o_Clock_Shadow",
64-
"Goatee",
65-
"Mustache",
66-
"No_Beard",
67-
"Sideburns",
68-
],
69-
"hair_colour": [
70-
"Black_Hair",
71-
"Blond_Hair",
72-
"Brown_Hair",
73-
"Gray_Hair",
74-
],
75-
"hair_shape": [
76-
"Straight_Hair",
77-
"Wavy_Hair",
78-
],
79-
}
80-
plane_attributes = [
81-
"Bangs",
82-
"Eyeglasses",
83-
"Wearing_Earrings",
84-
"Wearing_Hat",
85-
"Wearing_Necklace",
86-
"Wearing_Necktie",
87-
"Male",
88-
]
89-
avoided_attributes = [
90-
"Arched_Eyebrows",
91-
"Bags_Under_Eyes",
92-
"Big_Lips",
93-
"Big_Nose",
94-
"Bushy_Eyebrows",
95-
"Chubby",
96-
"Double_Chin",
97-
"High_Cheekbones",
98-
"Narrow_Eyes",
99-
"Oval_Face",
100-
"Pointy_Nose",
101-
"Receding_Hairline",
102-
"Rosy_Cheeks",
103-
"Heavy_Makeup",
104-
"Wearing_Lipstick",
105-
"Attractive",
106-
"Blurry",
107-
"Mouth_Slightly_Open",
108-
"Pale_Skin",
109-
"Smiling",
110-
"Young",
111-
]
112-
attributes = [
113-
"5_o_Clock_Shadow",
114-
"Arched_Eyebrows",
115-
"Attractive",
116-
"Bags_Under_Eyes",
117-
"Bald",
118-
"Bangs",
119-
"Big_Lips",
120-
"Big_Nose",
121-
"Black_Hair",
122-
"Blond_Hair",
123-
"Blurry",
124-
"Brown_Hair",
125-
"Bushy_Eyebrows",
126-
"Chubby",
127-
"Double_Chin",
128-
"Eyeglasses",
129-
"Goatee",
130-
"Gray_Hair",
131-
"Heavy_Makeup",
132-
"High_Cheekbones",
133-
"Male",
134-
"Mouth_Slightly_Open",
135-
"Mustache",
136-
"Narrow_Eyes",
137-
"No_Beard",
138-
"Oval_Face",
139-
"Pale_Skin",
140-
"Pointy_Nose",
141-
"Receding_Hairline",
142-
"Rosy_Cheeks",
143-
"Sideburns",
144-
"Smiling",
145-
"Straight_Hair",
146-
"Wavy_Hair",
147-
"Wearing_Earrings",
148-
"Wearing_Hat",
149-
"Wearing_Lipstick",
150-
"Wearing_Necklace",
151-
"Wearing_Necktie",
152-
"Young",
153-
]
154-
155-
thresholds_mask: dict[str, float] = {}
156-
thresholds_pred: dict[str, float] = {}
157-
158-
# set default thresholds:
159-
for key in sorted(merged_categories.keys()):
160-
thresholds_mask[key] = 0.5
161-
for key in attributes + mask_labels:
162-
if key not in avoided_attributes:
163-
thresholds_pred[key] = 0.5
164-
165-
# set specific thresholds:
166-
thresholds_mask["eye_g"] = 0.5
167-
thresholds_pred["Eyeglasses"] = 0.5
168-
thresholds_pred["Wearing_Earrings"] = 0.5
169-
thresholds_pred["Wearing_Necklace"] = 0.5
170-
thresholds_pred["Wearing_Necktie"] = 0.5
171-
172-
17313
class DeepFashion2GeneralizedCategoriesAndAttributes(CategoriesAndAttributes):
17414
mask_categories = [
17515
"short sleeve top",

common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/image_with_masks_and_attributes.py

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -65,51 +65,6 @@ def _max_value_tuple(some_dict: dict[str, float]) -> tuple[str, float]:
6565
return max_key, some_dict[max_key]
6666

6767

68-
class ImageOfPerson(ImageWithMasksAndAttributes):
69-
def __init__(
70-
self,
71-
image: np.ndarray,
72-
masks: dict[str, np.ndarray],
73-
attributes: dict[str, float],
74-
categories_and_attributes: CategoriesAndAttributes,
75-
):
76-
super().__init__(image, masks, attributes, categories_and_attributes)
77-
78-
@classmethod
79-
def from_parent_instance(
80-
cls, parent_instance: ImageWithMasksAndAttributes
81-
) -> "ImageOfPerson":
82-
"""
83-
Creates an instance of ImageOfPerson using the properties of an
84-
instance of ImageWithMasksAndAttributes.
85-
"""
86-
return cls(
87-
image=parent_instance.image,
88-
masks=parent_instance.masks,
89-
attributes=parent_instance.attributes,
90-
categories_and_attributes=parent_instance.categories_and_attributes,
91-
)
92-
93-
def describe(self) -> dict:
94-
has_hair = self.attributes["hair"] - 0.5
95-
hair_colour = _max_value_tuple(self.selective_attribute_dict["hair_colour"])[0]
96-
hair_shape = _max_value_tuple(self.selective_attribute_dict["hair_shape"])[0]
97-
facial_hair = 1 - self.attributes["No_Beard"] - 0.5
98-
glasses = self.attributes["Eyeglasses"] - 0.5
99-
hat = self.attributes["Wearing_Hat"] - 0.5
100-
101-
result = {
102-
"has_hair": has_hair,
103-
"hair_colour": hair_colour,
104-
"hair_shape": hair_shape,
105-
"facial_hair": facial_hair,
106-
"glasses": glasses,
107-
"hat": hat,
108-
}
109-
110-
return result
111-
112-
11368
class ImageOfCloth(ImageWithMasksAndAttributes):
11469
def __init__(
11570
self,

common/vision/lasr_vision_yolov8/src/lasr_vision_yolov8/yolo.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -152,11 +152,6 @@ def detect_3d(
152152
f"Detected point: {detection.point} of object {detection.name}"
153153
)
154154

155-
# markers.create_and_publish_marker(
156-
# debug_point_publisher,
157-
# PointStamped(point=detection.point, header=pcl_map.header),
158-
# )
159-
160155
detected_objects.append(detection)
161156

162157
# publish to debug topic

0 commit comments

Comments
 (0)