-
Notifications
You must be signed in to change notification settings - Fork 37
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #134 from pipeless-ai/yolo-world
Support several inference outputs for ONNX Runtime + YOLO World example
- Loading branch information
Showing
11 changed files
with
267 additions
and
25 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
import cv2 | ||
import numpy as np | ||
|
||
def hook(frame_data, _): | ||
frame = frame_data['original'] | ||
model_output = frame_data['inference_output'] | ||
if len(model_output) > 0: | ||
yolo_input_shape = (640, 640, 3) # h,w,c | ||
boxes, scores, class_ids = postprocess_yolo_world(frame.shape, yolo_input_shape, model_output) | ||
class_labels = [yolo_classes[int(id)] for id in class_ids] | ||
for i in range(len(boxes)): | ||
draw_bbox(frame, boxes[i], class_labels[i], scores[i], color_palette[int(class_ids[i])]) | ||
|
||
frame_data['modified'] = frame | ||
|
||
################################################# | ||
# Util functions to make the hook more readable # | ||
################################################# | ||
yolo_classes = ['hard hat', 'gloves', 'protective boot', 'reflective vest', 'person'] | ||
color_palette = np.random.uniform(0, 255, size=(len(yolo_classes), 3)) | ||
|
||
def draw_bbox(image, box, label='', score=None, color=(255, 0, 255), txt_color=(255, 255, 255)): | ||
lw = max(round(sum(image.shape) / 2 * 0.003), 2) | ||
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) | ||
cv2.rectangle(image, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA) | ||
if label: | ||
tf = max(lw - 1, 1) # font thickness | ||
w, h = cv2.getTextSize(str(label), 0, fontScale=lw / 3, thickness=tf)[0] # text width, height | ||
outside = p1[1] - h >= 3 | ||
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 | ||
cv2.rectangle(image, p1, p2, color, -1, cv2.LINE_AA) # filled | ||
if score is not None: | ||
cv2.putText(image, f'{label} - {score}', (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), | ||
0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) | ||
else: | ||
cv2.putText(image, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), | ||
0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) | ||
|
||
def postprocess_yolo_world(original_frame_shape, resized_img_shape, output): | ||
original_height, original_width, _ = original_frame_shape | ||
resized_height, resized_width, _ = resized_img_shape | ||
|
||
boxes = np.array(output['boxes'][0]) | ||
classes = np.array(output['labels'][0]) | ||
scores = np.array(output['scores'][0]) | ||
|
||
# Filter negative indexes | ||
neg_indexes_classes = np.where(classes < 0)[0] | ||
neg_indexes_scores = np.where(scores < 0)[0] | ||
neg_indexes = np.concatenate((neg_indexes_classes, neg_indexes_scores)) | ||
|
||
mask = np.ones(classes.shape, dtype=bool) | ||
mask[neg_indexes] = False | ||
|
||
boxes = boxes[mask] | ||
classes = classes[mask] | ||
scores = scores[mask] | ||
|
||
# arrays to accumulate the results | ||
result_boxes = [] | ||
result_classes = [] | ||
result_scores = [] | ||
|
||
# Calculate the scaling factors for the bounding box coordinates | ||
if original_height > original_width: | ||
scale_factor = original_height / resized_height | ||
else: | ||
scale_factor = original_width / resized_width | ||
|
||
# Resize the output boxes | ||
for i, score in enumerate(scores): | ||
if score < 0.05: # apply confidence threshold | ||
continue | ||
if not score < 1: | ||
continue # Remove bad predictions that return a score of 1.0 | ||
|
||
x1, y1, x2, y2 = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3] | ||
|
||
## Calculate the scaled coordinates of the bounding box | ||
## the original image was padded to be square | ||
if original_height > original_width: | ||
# we added pad on the width | ||
pad = (resized_width - original_width / scale_factor) // 2 | ||
x1 = int((x1 - pad) * scale_factor) | ||
y1 = int(y1 * scale_factor) | ||
x2 = int((x2 - pad) * scale_factor) | ||
y2 = int(y2 * scale_factor) | ||
else: | ||
# we added pad on the height | ||
pad = (resized_height - original_height / scale_factor) // 2 | ||
x1 = int(x1 * scale_factor) | ||
y1 = int((y1 - pad) * scale_factor) | ||
x2 = int(x2 * scale_factor) | ||
y2 = int((y2 - pad) * scale_factor) | ||
|
||
result_classes.append(classes[i]) | ||
result_scores.append(score) | ||
result_boxes.append([x1, y1, x2, y2]) | ||
|
||
return result_boxes, result_scores, result_classes |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
import cv2 | ||
import numpy as np | ||
|
||
def is_cuda_available(): | ||
return cv2.cuda.getCudaEnabledDeviceCount() > 0 | ||
|
||
""" | ||
Resize and pad image. Uses CUDA when available | ||
""" | ||
def resize_and_pad(frame, target_dim, pad_top, pad_bottom, pad_left, pad_right): | ||
target_height, target_width = target_dim | ||
if is_cuda_available(): | ||
# FIXME: due to the memory allocation here could be even slower than running on CPU. We must provide the frame from GPU memory to the hook | ||
frame_gpu = cv2.cuda_GpuMat(frame) | ||
resized_frame_gpu = cv2.cuda.resize(frame_gpu, (target_width, target_height), interpolation=cv2.INTER_CUBIC) | ||
padded_frame_gpu = cv2.cuda.copyMakeBorder(resized_frame_gpu, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, value=(0, 0, 0)) | ||
result = padded_frame_gpu.download() | ||
return result | ||
else: | ||
resized_frame = cv2.resize(frame, (target_width, target_height), interpolation=cv2.INTER_CUBIC) | ||
padded_frame = cv2.copyMakeBorder(resized_frame, pad_top, pad_bottom, pad_left, pad_right, | ||
borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)) | ||
return padded_frame | ||
|
||
def resize_with_padding(frame, target_dim): | ||
target_height, target_width, _ = target_dim | ||
frame_height, frame_width, _ = frame.shape | ||
|
||
width_ratio = target_width / frame_width | ||
height_ratio = target_height / frame_height | ||
# Choose the minimum scaling factor to maintain aspect ratio | ||
scale_factor = min(width_ratio, height_ratio) | ||
# Calculate new dimensions after resizing | ||
new_width = int(frame_width * scale_factor) | ||
new_height = int(frame_height * scale_factor) | ||
# Calculate padding dimensions | ||
pad_width = (target_width - new_width) // 2 | ||
pad_height = (target_height - new_height) // 2 | ||
|
||
padded_image = resize_and_pad(frame, (new_height, new_width), pad_height, pad_height, pad_width, pad_width) | ||
return padded_image | ||
|
||
def hook(frame_data, _): | ||
frame = frame_data["original"].view() | ||
yolo_input_shape = (640, 640, 3) # h,w,c | ||
frame = resize_with_padding(frame, yolo_input_shape) | ||
frame = np.array(frame) / 255.0 # Normalize pixel values | ||
frame = np.transpose(frame, axes=(2,0,1)) # Convert to c,h,w | ||
inference_inputs = frame.astype("float32") | ||
frame_data['inference_input'] = inference_inputs |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
{ | ||
"runtime": "onnx", | ||
"model_uri": "https://pipeless-public.s3.eu-west-3.amazonaws.com/yolow-l-ppe.onnx", | ||
"inference_params": { | ||
"execution_provider": "cpu" | ||
} | ||
} |
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.