From 0a3c40cf16f0ce1fd755e2fd2d4d0a87152f4f43 Mon Sep 17 00:00:00 2001 From: Nathan Date: Thu, 28 Mar 2024 17:22:44 -0400 Subject: [PATCH 01/20] added error signal compatability --- .../turtlebot_vel_ctrl/vel_ctrl.py | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py index 32f5d4669..8eb38b294 100644 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py +++ b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py @@ -26,18 +26,30 @@ def __init__(self): def camData_callback(self,msg_in): msg_out = Twist() - k = 1.0 - err_z = msg_in.data + # no camera data is found. set velocity to zero + if msg_in.data < -999.0: + msg_out.linear.x = 0.0 + msg_out.linear.y = 0.0 + msg_out.linear.z = 0.0 - ang_input = err_z * k + msg_out.angular.x = 0.0 + msg_out.angular.y = 0.0 + msg_out.angular.z = 0.0 - msg_out.linear.x = 0.1 #define constant speed - msg_out.linear.y = 0.0 - msg_out.linear.z = 0.0 + #1 or both lanes are detected and error is send as usual + else: + k = 1.0 + err_z = msg_in.data - msg_out.angular.x = 0.0 - msg_out.angular.y = 0.0 - msg_out.angular.z = ang_input + ang_input = err_z * k + + msg_out.linear.x = 0.1 #define constant speed + msg_out.linear.y = 0.0 + msg_out.linear.z = 0.0 + + msg_out.angular.x = 0.0 + msg_out.angular.y = 0.0 + msg_out.angular.z = ang_input self.vel_publisher.publish(msg_out) From d37836fe43271f3120f3589d6d2420a0af0c4f40 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 28 Mar 2024 19:03:47 -0400 Subject: [PATCH 02/20] Added ROS Integration to UI, and simple ROS2 webcam publisher. Cleaned up some testing code from headaches of days long past. --- UI/Homepage.py | 2 - "UI/pages/\360\237\233\243 Lane_Detection.py" | 85 +++++++++++++------ ros2/src/lanefollowingtest/cam_publisher.py | 63 ++++++++++++++ .../lanefollowingtest/camera_sanity_alt.py | 13 --- ros2/src/lanefollowingtest/copypaste.py | 43 ---------- 5 files changed, 122 insertions(+), 84 deletions(-) create mode 100644 ros2/src/lanefollowingtest/cam_publisher.py delete mode 100644 ros2/src/lanefollowingtest/camera_sanity_alt.py delete mode 100644 ros2/src/lanefollowingtest/copypaste.py diff --git a/UI/Homepage.py b/UI/Homepage.py index cea9bebcd..fab2d34da 100644 --- a/UI/Homepage.py +++ b/UI/Homepage.py @@ -2,8 +2,6 @@ from streamlit import runtime from streamlit.web import cli as stcli import sys -import cv2 - def runner(): st.header("Welcome to the IGVC Homepage!") diff --git "a/UI/pages/\360\237\233\243 Lane_Detection.py" "b/UI/pages/\360\237\233\243 Lane_Detection.py" index 6976ddf2d..7fa607102 100644 --- "a/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -1,35 +1,68 @@ import streamlit as st import cv2 +from sensor_msgs.msg import Image +import rclpy +from rclpy.node import Node +from rclpy.qos import qos_profile_sensor_data +from cv_bridge import CvBridge +import typing -def render_imgs(src, tabs): - # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify - vidcap = cv2.VideoCapture(src) - frame_containers = [tab.empty() for tab in tabs] - while vidcap.isOpened(): - ret, frame = vidcap.read() - if not ret: - st.write("Video Capture Stopped") - else: - frame_containers[0].image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),channels="RGB") - frame_containers[1].image(cv2.cvtColor(frame, cv2.COLOR_BGR2HSV_FULL),channels="HSV") - frame_containers[2].image(cv2.resize(frame, (144, 144)),channels="BGR") - if cv2.waitKey(1) & 0xFF == ord("q"): - break - vidcap.release() - cv2.destroyAllWindows() - - -## See ../../ros2/src/lanefollowingtest/LaneDetection.py -## This page should render ALL images in there, as well as publish +class Image_Handler(Node): + + def __init__(self): + self._tabs = None + self._img = None + self._frame_containers = None + super().__init__('Streamlit_Image_Handler') + self._bridge = CvBridge() + self.camData_subscriber = self.create_subscription( + Image, + '/camera/image', + self.camData_callback, + qos_profile_sensor_data) + + def tabs(self,tabs): + self._tabs = tabs + # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify + self._frame_containers = [tab.empty() for tab in self._tabs] + + + def camData_callback(self, msg_in): + raw = msg_in + self._img = self._bridge.imgmsg_to_cv2(raw) + self.render_imgs() + + def render_imgs(self): + # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify + if self._img is not None: + for frame_container in self._frame_containers: + frame_container.image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") + +@st.cache_resource +def image_instantiator(): + rclpy.init(args=None) + return Image_Handler() + + + +# See ../../ros2/src/lanefollowingtest/LaneDetection.py +# This page should render ALL images in there, as well as publish if __name__ == "__main__": st.set_page_config( page_title="Lane Detection", page_icon="🛣") - st.write("This should render all of the images related to Lane Detection, and relevant parameters.") - st.checkbox("Display Parameters") - tabs = st.tabs(["Original","HSV", "Sliding Windows"]) - render_imgs("/dev/video0", (tabs)) + st.write( + "This should render all of the images related to Lane Detection, and relevant parameters.") + render = st.checkbox("Display Parameters") + tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) + + if not render: + ros_node = image_instantiator() + ros_node.tabs(tabs) + rclpy.spin(ros_node) + ros_node.destroy_node() + rclpy.shutdown() - ##I Don't even know if we'll want this, but it's a nice to have anyway - Restart = st.button("ESTOP",type="primary") \ No newline at end of file + # I Don't even know if we'll want this, but it's a nice to have anyway + Restart = st.button("ESTOP", type="primary") diff --git a/ros2/src/lanefollowingtest/cam_publisher.py b/ros2/src/lanefollowingtest/cam_publisher.py new file mode 100644 index 000000000..a707fee78 --- /dev/null +++ b/ros2/src/lanefollowingtest/cam_publisher.py @@ -0,0 +1,63 @@ +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import Image +from cv_bridge import CvBridge +import cv2 + + +class WebcamImagePublisher(Node): + + def __init__(self): + super().__init__('webcam_image_publisher') + self.logger = self.get_logger() + + # Set desired frame rate (may need adjustment based on hardware) + self.frame_rate = 60 + + # Initialize OpenCV video capture object + self.cap = cv2.VideoCapture("/dev/video0") # 0 for default webcam + + if not self.cap.isOpened(): + self.logger.error("Failed to open webcam!") + rclpy.shutdown() + return + + # Initialize CvBridge instance + self.bridge = CvBridge() + + # Create image publisher + self.image_pub = self.create_publisher( + Image, '/camera/image', 10) # Adjust queue size if needed + + # Timer to capture and publish images + self.timer = self.create_timer( + 1 / self.frame_rate, self.capture_and_publish) + + self.logger.info("Webcam image publisher node started!") + + def capture_and_publish(self): + ret, cv_image = self.cap.read() + + if not ret: + self.logger.warn("Failed to capture image!") + return + + # Optionally resize the image (comment out if not needed) + # resized_image = cv2.resize(cv_image, (width, height)) + + # Convert OpenCV image to ROS image message + ros_image = self.bridge.cv2_to_imgmsg(cv_image, encoding="bgr8") + print("published") + # Publish the image + self.image_pub.publish(ros_image) + + +def main(): + rclpy.init() + node = WebcamImagePublisher() + rclpy.spin(node) + rclpy.shutdown() + + +if __name__ == '__main__': + main() diff --git a/ros2/src/lanefollowingtest/camera_sanity_alt.py b/ros2/src/lanefollowingtest/camera_sanity_alt.py deleted file mode 100644 index 13a197164..000000000 --- a/ros2/src/lanefollowingtest/camera_sanity_alt.py +++ /dev/null @@ -1,13 +0,0 @@ -import cv2 - -vidcap_left = cv2.VideoCapture("/dev/video3") -vidcap_left.set(3,640) -vidcap_left.set(4,480) - - -while True: - left = vidcap_left.read() - if(left[0]): - cv2.imshow("left", left[1]) - if cv2.waitKey(10) == 27: - break diff --git a/ros2/src/lanefollowingtest/copypaste.py b/ros2/src/lanefollowingtest/copypaste.py deleted file mode 100644 index ee2dc6eec..000000000 --- a/ros2/src/lanefollowingtest/copypaste.py +++ /dev/null @@ -1,43 +0,0 @@ -import cv2 -import threading - -class camThread(threading.Thread): - def __init__(self, previewName, cam_label): - threading.Thread.__init__(self) - self.previewName = previewName - self.cam_label = cam_label - def run(self): - print ("Starting " + self.previewName) - camPreview(self.previewName, self.cam_label) - -def camPreview(previewName, cam_label): - cv2.namedWindow(previewName) - cam = cv2.VideoCapture(cam_label) - if cam.isOpened(): # try to get the first frame - rval, frame = cam.read() - else: - rval = False - while True: - cam.set(3,640) - cam.set(4,480) - frame = cam.read() - if(frame[0]): - cv2.imshow(previewName, frame[1]) - key = cv2.waitKey(20) - if key == 27: # exit on ESC - break - - - # rval, frame = cam.read() - # if(rval): - # cv2.imshow(previewName, frame) - # key = cv2.waitKey(20) - # cam.release() - - cv2.destroyWindow(previewName) - -# Create two threads as follows -thread1 = camThread("Camera 1", "/dev/video1") -thread2 = camThread("Camera 2", "/dev/video3") -thread1.start() -thread2.start() \ No newline at end of file From 95b74535b93b26c6841b95ce44f08d2090fab2e9 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 28 Mar 2024 20:39:09 -0400 Subject: [PATCH 03/20] Modified LaneDetection.py to publish images (in a very clean manner), and added subscriber functionality to the UI --- "UI/pages/\360\237\233\243 Lane_Detection.py" | 57 +++++++------ ros2/src/lanefollowingtest/LaneDetection.py | 79 +++++++++++-------- 2 files changed, 82 insertions(+), 54 deletions(-) diff --git "a/UI/pages/\360\237\233\243 Lane_Detection.py" "b/UI/pages/\360\237\233\243 Lane_Detection.py" index 7fa607102..5c49fa3c3 100644 --- "a/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -7,36 +7,42 @@ from cv_bridge import CvBridge import typing +IMG_CATEGORIES = ["raw","tf","sliding"] + class Image_Handler(Node): def __init__(self): - self._tabs = None - self._img = None + self._tab_dict = None self._frame_containers = None super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() - self.camData_subscriber = self.create_subscription( - Image, - '/camera/image', - self.camData_callback, - qos_profile_sensor_data) + image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") + self._subscribers = (self.create_subscription(Image, "/" + label, lambda msg: self.camData_callback(msg,label),qos_profile_sensor_data) for label in image_labels) + def tabs(self,tabs): - self._tabs = tabs - # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify - self._frame_containers = [tab.empty() for tab in self._tabs] + self._tab_dict = tabs + + + def camData_callback(self, msg_in,label): + print("Put image from %s" % label) + img = self._bridge.imgmsg_to_cv2(msg_in) + self.render_imgs(img,label) + + def render_imgs(self,img,label): + #Specifies which camera the image is from + which_cam = 0 + if img is not None: + type, cam_label = label.split("_") + match cam_label: + case "left": + which_cam = 0 + case "right": + which_cam = 1 + tab_dict[type][cam_label].image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") - def camData_callback(self, msg_in): - raw = msg_in - self._img = self._bridge.imgmsg_to_cv2(raw) - self.render_imgs() - def render_imgs(self): - # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify - if self._img is not None: - for frame_container in self._frame_containers: - frame_container.image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") @st.cache_resource def image_instantiator(): @@ -53,12 +59,15 @@ def image_instantiator(): page_icon="🛣") st.write( "This should render all of the images related to Lane Detection, and relevant parameters.") - render = st.checkbox("Display Parameters") - tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) + render = st.checkbox("Display Camera Output") + tabs = st.tabs(IMG_CATEGORIES) + imgs_w_places = tuple(zip(tabs, [tab.columns(2) for tab in tabs])) + #Dictionary + tab_dict = dict(zip(IMG_CATEGORIES,imgs_w_places)) - if not render: + if render: ros_node = image_instantiator() - ros_node.tabs(tabs) + ros_node.tabs(tab_dict) rclpy.spin(ros_node) ros_node.destroy_node() rclpy.shutdown() @@ -66,3 +75,5 @@ def image_instantiator(): # I Don't even know if we'll want this, but it's a nice to have anyway Restart = st.button("ESTOP", type="primary") + + diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index 4038d8ecc..e82038171 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -6,18 +6,19 @@ import rclpy from rclpy.node import Node from rclpy.qos import qos_profile_sensor_data - +from sensor_msgs.msg import Image +from cv_bridge import CvBridge import cv2 import numpy as np # Inputs from both cameras vidcap_left = cv2.VideoCapture("/dev/video0") -vidcap_left.set(3,640) -vidcap_left.set(4,480) -vidcap_right = cv2.VideoCapture("/dev/video2") -vidcap_right.set(3,640) -vidcap_right.set(4,480) +vidcap_left.set(3, 640) +vidcap_left.set(4, 480) +vidcap_right = cv2.VideoCapture("/dev/video4") +vidcap_right.set(3, 640) +vidcap_right.set(4, 480) # These are constants nwindows = 9 @@ -32,8 +33,8 @@ def nothing(x): pass -class Individual_Follower(): +class Individual_Follower(): def __init__(self): self._fit = None @@ -124,7 +125,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): return result -#These are constants for the timer callback: Should be modified by UI +# These are constants for the timer callback: Should be modified by UI l_h = 0 l_s = 0 l_v = 200 @@ -136,7 +137,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): lower = np.array([l_h, l_s, l_v]) upper = np.array([u_h, u_s, u_v]) -#Coordinates for the 4 alignment points: again, should be handled by the UI +# Coordinates for the 4 alignment points: again, should be handled by the UI bl = (12, 355) tl = (66, 304) br = (635, 344) @@ -148,9 +149,13 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): matrix = cv2.getPerspectiveTransform(pts1, pts2) + + class Lane_Follower(Node): + GUI = True def __init__(self): + super().__init__('lane_detection_node') self._tolerance = 0 self._left_follower = Individual_Follower() @@ -162,29 +167,41 @@ def __init__(self): # Publisher for error from the lane lines relative to the camera FOV self.camData_publisher = self.create_publisher( Float64, '/cam_data', 10) + + #GUI Controller Initializer + if Lane_Follower.GUI: + self._bridge = CvBridge() + image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") + self._publishers = {label: self.create_publisher(Image, "/" + label, 10) for label in image_labels} + + def img_publish(self, label, img_raw): + self._publishers[label].publish(self._bridge.cv2_to_imgmsg(img_raw,encoding="bgr8")) + - def measure_position_meters(self,left,right): + def measure_position_meters(self, left, right): left_x_pos = 0 right_x_pos = 0 - # Will be the same for left side & right side y_max = self._left_follower._binary_warped.shape[0] - + # Calculate left and right line positions at the bottom of the image - if left is not None: + if left is not None: left_fit = self._left_follower._fit left_x_pos = left_fit[0]*y_max**2 + \ left_fit[1]*y_max + left_fit[2] - cv2.imshow("Result Left", left) + if(Lane_Follower.GUI): + self.img_publish("sliding_left", left) + cv2.imshow("Result Left", left) if right is not None: right_fit = self._right_follower._fit right_x_pos = right_fit[0]*y_max**2 + \ right_fit[1]*y_max + right_fit[2] - cv2.imshow("Result Right", right) - + if(Lane_Follower.GUI): + self.img_publish("sliding_right", right) + cv2.imshow("Result Right", right) center_lanes_x_pos = (left_x_pos + right_x_pos)//2 # Calculate the deviation between the center of the lane and the center of the picture @@ -193,27 +210,25 @@ def measure_position_meters(self,left,right): veh_pos = ( (self._left_follower._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix - if(left is None): + if (left is None): veh_pos += 91 elif (right is None): veh_pos -= 91 return veh_pos / 100 - def timer_callback(self): success_l, image_l = vidcap_left.read() success_r, image_r = vidcap_right.read() - images = [(image_l, "Left"), (image_r, "Right")] - if not(success_l and success_r): + images = [(image_l, "left"), (image_r, "right")] + if not (success_l and success_r): return for image in images: frame = image[0] # frame = cv2.resize(image[0], (640, 480)) # I might've cooked with this list comprehension - for point in (bl,tl,br,tr): - frame = cv2.circle(frame,point,5,(0,0,255),-1) - + for point in (bl, tl, br, tr): + frame = cv2.circle(frame, point, 5, (0, 0, 255), -1) transformed_frame = cv2.warpPerspective( frame, matrix, (640, 480)) # Object Detection @@ -221,19 +236,22 @@ def timer_callback(self): hsv_transformed_frame = cv2.cvtColor( transformed_frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_transformed_frame, lower, upper) - if image[1] == "Left": + if image[1] == "left": self._left_follower.set_binwarp(binwarp=mask) else: self._right_follower.set_binwarp(binwarp=mask) - - cv2.imshow("Original " + image[1], frame) - cv2.imshow("Bird's Eye View " + image[1], transformed_frame) + + if(Lane_Follower.GUI): + self.img_publish("raw_" +image[1],frame) + self.img_publish("tf_" + image[1],transformed_frame) + cv2.imshow("Original " + image[1], frame) + cv2.imshow("Bird's Eye View " + image[1], transformed_frame) result_left = self._left_follower.Plot_Line() result_right = self._right_follower.Plot_Line() msg_out = Float64() - #TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? + # TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? if (result_left is not None or result_right is not None): pos = self.measure_position_meters(result_left, result_right) print(pos) @@ -242,14 +260,13 @@ def timer_callback(self): else: TOLERANCE = 100 self._tolerance += 1 - if(self._tolerance > TOLERANCE): + if (self._tolerance > TOLERANCE): msg_out.data = 1000.0 self.camData_publisher.publish(msg_out) - if cv2.waitKey(10) == 27: return - + def main(args=None): rclpy.init(args=args) # Initialize ROS2 program From 1c2be3519ac556814641960367d5ae01479227f4 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Fri, 29 Mar 2024 11:54:17 -0400 Subject: [PATCH 04/20] minor changes --- UI/Homepage.py | 1 + "UI/pages/\360\237\233\243 Lane_Detection.py" | 17 +++++------------ ros2/src/lanefollowingtest/LaneDetection.py | 11 +++++------ 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/UI/Homepage.py b/UI/Homepage.py index fab2d34da..29d44a6e7 100644 --- a/UI/Homepage.py +++ b/UI/Homepage.py @@ -11,6 +11,7 @@ def runner(): "Streamlit supports fast reloads, just refresh the python script to get going!") st.write("Docs are here: https://docs.streamlit.io/library/api-reference") st.write("This is also extremely relevant: https://stackoverflow.com/questions/67382181/how-to-use-streamlit-in-ros") + st.write("For Siann: You should love yourself, now!") diff --git "a/UI/pages/\360\237\233\243 Lane_Detection.py" "b/UI/pages/\360\237\233\243 Lane_Detection.py" index 5c49fa3c3..81606c661 100644 --- "a/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -5,7 +5,6 @@ from rclpy.node import Node from rclpy.qos import qos_profile_sensor_data from cv_bridge import CvBridge -import typing IMG_CATEGORIES = ["raw","tf","sliding"] @@ -17,6 +16,7 @@ def __init__(self): super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") + #Multithread the subscribers to separate rosnodes. self._subscribers = (self.create_subscription(Image, "/" + label, lambda msg: self.camData_callback(msg,label),qos_profile_sensor_data) for label in image_labels) @@ -25,21 +25,14 @@ def tabs(self,tabs): def camData_callback(self, msg_in,label): - print("Put image from %s" % label) + st.write("Put image from %s" % label) img = self._bridge.imgmsg_to_cv2(msg_in) - self.render_imgs(img,label) + self.render_imgs(img,label.split('_')[0]) def render_imgs(self,img,label): #Specifies which camera the image is from - which_cam = 0 if img is not None: - type, cam_label = label.split("_") - match cam_label: - case "left": - which_cam = 0 - case "right": - which_cam = 1 - tab_dict[type][cam_label].image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") + tab_dict[label].image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") @@ -60,7 +53,7 @@ def image_instantiator(): st.write( "This should render all of the images related to Lane Detection, and relevant parameters.") render = st.checkbox("Display Camera Output") - tabs = st.tabs(IMG_CATEGORIES) + tabs = st.tabs(IMG_CATEGORIES) imgs_w_places = tuple(zip(tabs, [tab.columns(2) for tab in tabs])) #Dictionary tab_dict = dict(zip(IMG_CATEGORIES,imgs_w_places)) diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index e82038171..0e5ba6a8e 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -1,8 +1,6 @@ # from irobot_create_msgs.msg import WheelTicks, WheelTicks -from geometry_msgs.msg import Twist from std_msgs.msg import Float64 - import rclpy from rclpy.node import Node from rclpy.qos import qos_profile_sensor_data @@ -169,6 +167,7 @@ def __init__(self): Float64, '/cam_data', 10) #GUI Controller Initializer + #/raw_left, /raw_right, /tf_left, /tf_right, /sliding_left, /sliding_right if Lane_Follower.GUI: self._bridge = CvBridge() image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") @@ -193,7 +192,7 @@ def measure_position_meters(self, left, right): if(Lane_Follower.GUI): self.img_publish("sliding_left", left) - cv2.imshow("Result Left", left) + cv2.imshow("Result Left", left) if right is not None: right_fit = self._right_follower._fit @@ -201,7 +200,7 @@ def measure_position_meters(self, left, right): right_fit[1]*y_max + right_fit[2] if(Lane_Follower.GUI): self.img_publish("sliding_right", right) - cv2.imshow("Result Right", right) + cv2.imshow("Result Right", right) center_lanes_x_pos = (left_x_pos + right_x_pos)//2 # Calculate the deviation between the center of the lane and the center of the picture @@ -244,8 +243,8 @@ def timer_callback(self): if(Lane_Follower.GUI): self.img_publish("raw_" +image[1],frame) self.img_publish("tf_" + image[1],transformed_frame) - cv2.imshow("Original " + image[1], frame) - cv2.imshow("Bird's Eye View " + image[1], transformed_frame) + cv2.imshow("Original " + image[1], frame) + cv2.imshow("Bird's Eye View " + image[1], transformed_frame) result_left = self._left_follower.Plot_Line() result_right = self._right_follower.Plot_Line() From 0ec3b771e76dbfcbd53233539baf79459155436b Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Tue, 9 Apr 2024 11:40:29 -0400 Subject: [PATCH 05/20] Cleaned up the repository: Trying to find ways to work with a real time stream in streamlit --- .gitignore | 8 + "UI/pages/\360\237\233\243 Lane_Detection.py" | 72 -- .../turtlebot_vel_ctrl/LICENSE | 202 ------ .../turtlebot_vel_ctrl/package.xml | 18 - .../resource/turtlebot_vel_ctrl | 0 .../turtlebot_vel_ctrl/setup.cfg | 4 - .../turtlebot_vel_ctrl/setup.py | 26 - .../turtlebot_vel_ctrl/test/test_copyright.py | 25 - .../turtlebot_vel_ctrl/test/test_flake8.py | 25 - .../turtlebot_vel_ctrl/test/test_pep257.py | 23 - .../turtlebot_vel_ctrl/__init__.py | 0 .../turtlebot_vel_ctrl/vel_ctrl.py | 66 -- {UI => ros2/UI}/.streamlit/config.toml | 0 {UI => ros2/UI}/Homepage.py | 0 .../\360\237\233\221 Object_Detection.py" | 0 .../pages/\360\237\233\243 Lane_Detection.py" | 68 ++ {UI => ros2/UI}/ui_generics.py | 0 ros2/lanefollowingtest/LaneDetection.py | 342 --------- ros2/lanefollowingtest/SobelVidCam.py | 665 ------------------ ros2/lanefollowingtest/VidCam.py | 457 ------------ ros2/lanefollowingtest/whitepxlClass.py | 95 --- ros2/log/latest_build | 2 +- ros2/src/lanefollowingtest/LaneDetection.py | 2 +- ros2/src/lanefollowingtest/cam_publisher.py | 17 +- 24 files changed, 88 insertions(+), 2029 deletions(-) delete mode 100644 "UI/pages/\360\237\233\243 Lane_Detection.py" delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/resource/turtlebot_vel_ctrl delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/__init__.py delete mode 100644 ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py rename {UI => ros2/UI}/.streamlit/config.toml (100%) rename {UI => ros2/UI}/Homepage.py (100%) rename "UI/pages/\360\237\233\221 Object_Detection.py" => "ros2/UI/pages/\360\237\233\221 Object_Detection.py" (100%) create mode 100644 "ros2/UI/pages/\360\237\233\243 Lane_Detection.py" rename {UI => ros2/UI}/ui_generics.py (100%) delete mode 100644 ros2/lanefollowingtest/LaneDetection.py delete mode 100644 ros2/lanefollowingtest/SobelVidCam.py delete mode 100644 ros2/lanefollowingtest/VidCam.py delete mode 100644 ros2/lanefollowingtest/whitepxlClass.py diff --git a/.gitignore b/.gitignore index 51cc300c0..b7c783b60 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,11 @@ __pycache__ build ros2/lanefollowingtest/LaneVideo.mp4 +log/COLCON_IGNORE +log/latest +log/latest_version-check +log/version-check_2024-03-28_13-31-27/logger_all.log +ros2/log/latest_build +ros2/log/build_2024-03-28_13-35-11/logger_all.log +ros2/log/build_2024-03-28_13-39-36/logger_all.log +ros2/log/latest_build diff --git "a/UI/pages/\360\237\233\243 Lane_Detection.py" "b/UI/pages/\360\237\233\243 Lane_Detection.py" deleted file mode 100644 index 81606c661..000000000 --- "a/UI/pages/\360\237\233\243 Lane_Detection.py" +++ /dev/null @@ -1,72 +0,0 @@ -import streamlit as st -import cv2 -from sensor_msgs.msg import Image -import rclpy -from rclpy.node import Node -from rclpy.qos import qos_profile_sensor_data -from cv_bridge import CvBridge - -IMG_CATEGORIES = ["raw","tf","sliding"] - -class Image_Handler(Node): - - def __init__(self): - self._tab_dict = None - self._frame_containers = None - super().__init__('Streamlit_Image_Handler') - self._bridge = CvBridge() - image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") - #Multithread the subscribers to separate rosnodes. - self._subscribers = (self.create_subscription(Image, "/" + label, lambda msg: self.camData_callback(msg,label),qos_profile_sensor_data) for label in image_labels) - - - def tabs(self,tabs): - self._tab_dict = tabs - - - def camData_callback(self, msg_in,label): - st.write("Put image from %s" % label) - img = self._bridge.imgmsg_to_cv2(msg_in) - self.render_imgs(img,label.split('_')[0]) - - def render_imgs(self,img,label): - #Specifies which camera the image is from - if img is not None: - tab_dict[label].image(cv2.cvtColor(self._img,cv2.COLOR_BGR2RGB),channels="RGB") - - - - -@st.cache_resource -def image_instantiator(): - rclpy.init(args=None) - return Image_Handler() - - - -# See ../../ros2/src/lanefollowingtest/LaneDetection.py -# This page should render ALL images in there, as well as publish -if __name__ == "__main__": - st.set_page_config( - page_title="Lane Detection", - page_icon="🛣") - st.write( - "This should render all of the images related to Lane Detection, and relevant parameters.") - render = st.checkbox("Display Camera Output") - tabs = st.tabs(IMG_CATEGORIES) - imgs_w_places = tuple(zip(tabs, [tab.columns(2) for tab in tabs])) - #Dictionary - tab_dict = dict(zip(IMG_CATEGORIES,imgs_w_places)) - - if render: - ros_node = image_instantiator() - ros_node.tabs(tab_dict) - rclpy.spin(ros_node) - ros_node.destroy_node() - rclpy.shutdown() - - - # I Don't even know if we'll want this, but it's a nice to have anyway - Restart = st.button("ESTOP", type="primary") - - diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE b/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml b/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml deleted file mode 100644 index 7be7e8f94..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - turtlebot_vel_ctrl - 0.0.0 - TODO: Package description - root - Apache-2.0 - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - - ament_python - - diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/resource/turtlebot_vel_ctrl b/ros2/TurtlebotController/turtlebot_vel_ctrl/resource/turtlebot_vel_ctrl deleted file mode 100644 index e69de29bb..000000000 diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg b/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg deleted file mode 100644 index 9060e4b44..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/turtlebot_vel_ctrl -[install] -install_scripts=$base/lib/turtlebot_vel_ctrl diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py deleted file mode 100644 index 33e25ed6d..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'turtlebot_vel_ctrl' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='root', - maintainer_email='root@todo.todo', - description='TODO: Package description', - license='Apache-2.0', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'vel_ctrl = turtlebot_vel_ctrl.vel_ctrl:main' - ], - }, -) diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py deleted file mode 100644 index 97a39196e..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_copyright.main import main -import pytest - - -# Remove the `skip` decorator once the source file(s) have a copyright header -@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') -@pytest.mark.copyright -@pytest.mark.linter -def test_copyright(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found errors' diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py deleted file mode 100644 index 27ee1078f..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_flake8.main import main_with_errors -import pytest - - -@pytest.mark.flake8 -@pytest.mark.linter -def test_flake8(): - rc, errors = main_with_errors(argv=[]) - assert rc == 0, \ - 'Found %d code style errors / warnings:\n' % len(errors) + \ - '\n'.join(errors) diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py deleted file mode 100644 index b234a3840..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_pep257.main import main -import pytest - - -@pytest.mark.linter -@pytest.mark.pep257 -def test_pep257(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found code style errors / warnings' diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/__init__.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py deleted file mode 100644 index 8eb38b294..000000000 --- a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py +++ /dev/null @@ -1,66 +0,0 @@ -# from irobot_create_msgs.msg import WheelTicks, WheelTicks -from geometry_msgs.msg import Twist -from std_msgs.msg import Float64 - -import rclpy -from rclpy.node import Node -from rclpy.qos import qos_profile_sensor_data - -# publishing twist message top the turtle bot -# subscribing to whatever the velocity and angle topics - -class TurtleBot4VelCtrlNode(Node): - - def __init__(self): - super().__init__('turtlebot4_velctrl_node') - - self.camData_subscriber = self.create_subscription( - Float64, - '/cam_data', - self.camData_callback, - qos_profile_sensor_data) - - - self.vel_publisher = self.create_publisher(Twist,'robot_0/cmd_vel', 10) - - def camData_callback(self,msg_in): - msg_out = Twist() - - # no camera data is found. set velocity to zero - if msg_in.data < -999.0: - msg_out.linear.x = 0.0 - msg_out.linear.y = 0.0 - msg_out.linear.z = 0.0 - - msg_out.angular.x = 0.0 - msg_out.angular.y = 0.0 - msg_out.angular.z = 0.0 - - #1 or both lanes are detected and error is send as usual - else: - k = 1.0 - err_z = msg_in.data - - ang_input = err_z * k - - msg_out.linear.x = 0.1 #define constant speed - msg_out.linear.y = 0.0 - msg_out.linear.z = 0.0 - - msg_out.angular.x = 0.0 - msg_out.angular.y = 0.0 - msg_out.angular.z = ang_input - - self.vel_publisher.publish(msg_out) - -def main(args=None): - rclpy.init(args=args) # Initialize ROS2 program - node = TurtleBot4VelCtrlNode() # Instantiate Node - rclpy.spin(node) # Puts the node in an infinite loop - - # Clean shutdown should be at the end of every node - node.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - main() diff --git a/UI/.streamlit/config.toml b/ros2/UI/.streamlit/config.toml similarity index 100% rename from UI/.streamlit/config.toml rename to ros2/UI/.streamlit/config.toml diff --git a/UI/Homepage.py b/ros2/UI/Homepage.py similarity index 100% rename from UI/Homepage.py rename to ros2/UI/Homepage.py diff --git "a/UI/pages/\360\237\233\221 Object_Detection.py" "b/ros2/UI/pages/\360\237\233\221 Object_Detection.py" similarity index 100% rename from "UI/pages/\360\237\233\221 Object_Detection.py" rename to "ros2/UI/pages/\360\237\233\221 Object_Detection.py" diff --git "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" new file mode 100644 index 000000000..837d1e090 --- /dev/null +++ "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -0,0 +1,68 @@ +import streamlit as st +import cv2 +from sensor_msgs.msg import Image +import rclpy +from rclpy.node import Node +from rclpy.qos import qos_profile_sensor_data +from cv_bridge import CvBridge + +class Image_Handler(Node): + + def __init__(self): + self._tabs = None + self._img = None + self._frame_containers = None + super().__init__('Streamlit_Image_Handler') + self._bridge = CvBridge() + self.camData_subscriber = self.create_subscription( + Image, + '/camera/image', + self.camData_callback, + qos_profile_sensor_data) + + def tabs(self,tabs): + self._tabs = tabs + # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify + self._frame_containers = [tab.empty() for tab in self._tabs] + + + def camData_callback(self, msg_in): + raw = msg_in + img = self._bridge.imgmsg_to_cv2(raw) + self._frame_containers[0].image(img) + st.write("IMAGE RECIEVED") + +@st.cache_data +def startup(): + rclpy.init() + return None + +def render(): + if("checkbox" not in st.session_state or st.session_state["checkbox"]): + + +# See ../../ros2/src/lanefollowingtest/LaneDetection.py +# This page should render ALL images in there, as well as publish +if __name__ == "__main__": + st.set_page_config( + page_title="Lane Detection", + page_icon="🛣") + startup() + + st.session_state["checkbox"] = st.checkbox("Render Video Feed",callback = render) + + st.write( + "This should render all of the images related to Lane Detection, and relevant parameters.") + tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) + + if "node" not in st.session_state: + handler = Image_Handler() + handler.tabs(tabs) + st.session_state["node"] = handler + if(render): + rclpy.spin(st.session_state['node']) + st.session_state['node'].destroy_node() + rclpy.shutdown() + + # I Don't even know if we'll want this, but it's a nice to have anyway + Restart = st.button("ESTOP", type="primary") \ No newline at end of file diff --git a/UI/ui_generics.py b/ros2/UI/ui_generics.py similarity index 100% rename from UI/ui_generics.py rename to ros2/UI/ui_generics.py diff --git a/ros2/lanefollowingtest/LaneDetection.py b/ros2/lanefollowingtest/LaneDetection.py deleted file mode 100644 index 78a933de3..000000000 --- a/ros2/lanefollowingtest/LaneDetection.py +++ /dev/null @@ -1,342 +0,0 @@ -import cv2 -import numpy as np - -vidcap = cv2.VideoCapture("/dev/video0") -success, image = vidcap.read() - -def nothing(x): - pass - -class Lane_Follower(): - - def __init__(self): - self._left_fit = None - self._right_fit = None - self._binary_warped = None - - def set_binwarp(self, binwarp): - self._binary_warped = binwarp - - def Plot_line(self, smoothen=False,prevFrameCount=6): #used Udacity's code to plot the lines and windows over lanes - histogram = np.sum(self._binary_warped[self._binary_warped.shape[0]//2:, :], axis=0) - # histogram = np.sum(self._binary_warped[self._binary_warped.shape[0]//2:,:], axis=0) - # Create an output image to draw on and visualize the result - out_img = np.dstack((self._binary_warped, self._binary_warped, self._binary_warped))*255 - # Find the peak of the left and right halves of the histogram - # These will be the starting point for the left and right lines - midpoint = np.int32(histogram.shape[0]/2) - leftx_base = np.argmax(histogram[:midpoint]) - rightx_base = np.argmax(histogram[midpoint:]) + midpoint - lane_width= abs(rightx_base-leftx_base) - # Choose the number of sliding windows - nwindows = 9 - # Set height of windows - window_height = np.int32(self._binary_warped.shape[0]/nwindows) - # Identify the x and y positions of all nonzero pixels in the image - nonzero = self._binary_warped.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - # Current positions to be updated for each window - leftx_current = leftx_base - rightx_current = rightx_base - # Set the width of the windows +/- margin - margin = 100 - # Set minimum number of pixels found to recenter window - minpix = 20 - # Create empty lists to receive left and right lane pixel indices - left_lane_inds = [] - right_lane_inds = [] - - # Step through the windows one by one - for window in range(nwindows): - # Identify window boundaries in x and y (and right and left) - win_y_low = self._binary_warped.shape[0] - (window+1)*window_height - win_y_high = self._binary_warped.shape[0] - window*window_height - win_xleft_low = leftx_current - margin - win_xleft_high = leftx_current + margin - win_xright_low = rightx_current - margin - win_xright_high = rightx_current + margin - # Draw the windows on the visualization image - cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), - (0,255,0), 2) - cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), - (0,255,0), 2) - # Identify the nonzero pixels in x and y within the window - good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] - good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] - # Append these indices to the lists - left_lane_inds.append(good_left_inds) - right_lane_inds.append(good_right_inds) - # If you found > minpix pixels, recenter next window on their mean position - if len(good_left_inds) > minpix: - leftx_current = np.int32(np.mean(nonzerox[good_left_inds])) - if len(good_right_inds) > minpix: - rightx_current = np.int32(np.mean(nonzerox[good_right_inds])) - - # Concatenate the arrays of indices - try: - left_lane_inds = np.concatenate(left_lane_inds) - right_lane_inds = np.concatenate(right_lane_inds) - except ValueError: - pass - - - # Extract left and right line pixel positions - leftx = nonzerox[left_lane_inds] - lefty = nonzeroy[left_lane_inds] - rightx = nonzerox[right_lane_inds] - righty = nonzeroy[right_lane_inds] - - - # Fit a second order polynomial to each - if(lefty.any() and leftx.any()): - self._left_fit = np.polyfit(lefty, leftx, 2) - else: - return None - if(righty.any() and rightx.any()): - self._right_fit = np.polyfit(righty, rightx, 2) - else: - return None - - # Ideas. Either use a from pervious point thing - # simple idea: just filter out the empty spots - - if(smoothen): - global fit_prev_left - global fit_prev_right - global fit_sum_left - global fit_sum_right - if(len(fit_prev_left)>prevFrameCount): - fit_sum_left-= fit_prev_left.pop(0) - fit_sum_right-= fit_prev_right.pop(0) - - fit_prev_left.append(self._left_fit) - fit_prev_right.append(self._right_fit) - fit_sum_left+=self._left_fit - fit_sum_right+= self._right_fit - - no_of_fit_values=len(fit_prev_left) - self._left_fit= fit_sum_left/no_of_fit_values - self._right_fit= fit_sum_right/no_of_fit_values - - - ploty = np.linspace(0, self._binary_warped.shape[0]-1, self._binary_warped.shape[0] ) - left_fitx = self._left_fit[0]*ploty**2 + self._left_fit[1]*ploty + self._left_fit[2] - right_fitx = self._right_fit[0]*ploty**2 + self._right_fit[1]*ploty + self._right_fit[2] - - out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] - out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] - - nonzero = self._binary_warped.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - - window_img = np.zeros_like(out_img) - # Generate a polygon to illustrate the search window area - # And recast the x and y points into usable format for cv2.fillPoly() - left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))]) - left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, - ploty])))]) - left_line_pts = np.hstack((left_line_window1, left_line_window2)) - right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))]) - right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, - ploty])))]) - right_line_pts = np.hstack((right_line_window1, right_line_window2)) - - # Draw the lane onto the warped blank image - cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0)) - cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0)) - - left_line_pts = np.array([np.transpose(np.vstack([left_fitx, ploty]))], dtype=np.int32) - right_line_pts = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))], dtype=np.int32) - - cv2.polylines(out_img, left_line_pts, isClosed=False, color=(0, 255, 255), thickness=3) - cv2.polylines(out_img, right_line_pts, isClosed=False, color=(0, 255, 255), thickness=3) - - result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0) - - return out_img, result, left_fitx,right_fitx,ploty,self._left_fit, self._right_fit, \ - left_lane_inds,right_lane_inds,lane_width - - def measure_position_meters(self): - # Define conversion in x from pixels space to meters - - # center is roughly 0.005 - xm_per_pix = 1 # meters per pixel in x dimension - # Choose the y value corresponding to the bottom of the image - y_max = self._binary_warped.shape[0] - # Calculate left and right line positions at the bottom of the image - left_x_pos = self._left_fit[0]*y_max**2 + self._left_fit[1]*y_max + self._left_fit[2] - right_x_pos = self._right_fit[0]*y_max**2 + self._right_fit[1]*y_max + self._right_fit[2] - # Calculate the x position of the center of the lane - center_lanes_x_pos = (left_x_pos + right_x_pos)//2 - # Calculate the deviation between the center of the lane and the center of the picture - # The car is assumed to be placed in the center of the picture - # If the deviation is negative, the car is on the felt hand side of the center of the lane - veh_pos = ((self._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix - veh_pos = abs(veh_pos) / 40000 - return veh_pos - - - - - -if __name__ == "__main__": - cv2.namedWindow("Trackbars") - - cv2.createTrackbar("L - H", "Trackbars", 0, 255, nothing) - cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing) - cv2.createTrackbar("L - V", "Trackbars", 200, 255, nothing) - cv2.createTrackbar("U - H", "Trackbars", 255, 255, nothing) - cv2.createTrackbar("U - S", "Trackbars", 50, 255, nothing) - cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing) - Detector = Lane_Follower() - - while success: - success, image = vidcap.read() - frame = cv2.resize(image, (640,480)) - - # ## Choosing points for perspective transformation - # THESE ARE PRESET, MODIFYING - # tl = (222,387) - # bl = (70 ,472) - # tr = (400,380) - # br = (538,472) - bl = (12,355) - tl = (66,304) - br = (635,344) - tr = (595,308) - - - cv2.circle(frame, tl, 5, (0,0,255), -1) - cv2.circle(frame, bl, 5, (0,0,255), -1) - cv2.circle(frame, tr, 5, (0,0,255), -1) - cv2.circle(frame, br, 5, (0,0,255), -1) - - ## Aplying perspective transformation - pts1 = np.float32([tl, bl, tr, br]) - pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]]) - - # Matrix to warp the image for birdseye window - matrix = cv2.getPerspectiveTransform(pts1, pts2) - transformed_frame = cv2.warpPerspective(frame, matrix, (640,480)) - - ### Object Detection - # Image Thresholding - hsv_transformed_frame = cv2.cvtColor(transformed_frame, cv2.COLOR_BGR2HSV) - - l_h = cv2.getTrackbarPos("L - H", "Trackbars") - l_s = cv2.getTrackbarPos("L - S", "Trackbars") - l_v = cv2.getTrackbarPos("L - V", "Trackbars") - u_h = cv2.getTrackbarPos("U - H", "Trackbars") - u_s = cv2.getTrackbarPos("U - S", "Trackbars") - u_v = cv2.getTrackbarPos("U - V", "Trackbars") - - lower = np.array([l_h,l_s,l_v]) - upper = np.array([u_h,u_s,u_v]) - mask = cv2.inRange(hsv_transformed_frame, lower, upper) - - Detector.set_binwarp(binwarp=mask) - - - #Histogram - histogram = np.sum(mask[mask.shape[0]//2:, :], axis=0) - midpoint = np.int32(histogram.shape[0]/2) - left_base = np.argmax(histogram[:midpoint]) - right_base = np.argmax(histogram[midpoint:]) + midpoint - - #Sliding Window - y = 472 - lx = [] - rx = [] - - # Set the width of the windows +/- margin - margin = 100 - # Set minimum number of pixels found to recenter window - minpix = 50 - - msk = mask.copy() - output = Detector.Plot_line() - if(output is not None): - out_img, result, left_fitx,right_fitx, \ - ploty,left_fit, right_fit, left_lane_inds,right_lane_inds,lane_width = output - pos = Detector.measure_position_meters() - print(pos) - while y>0: - nonzero = msk.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - # print(nonzero) - - - ## Left threshold - img = mask[y-40:y, left_base-50:left_base+50] - contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - for contour in contours: - M = cv2.moments(contour) - if M["m00"] != 0: - cx = int(M["m10"]/M["m00"]) - cy = int(M["m01"]/M["m00"]) - lx.append(left_base-50 + cx) - left_base = left_base-50 + cx - - ## Right threshold - img = mask[y-40:y, right_base-50:right_base+50] - contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - for contour in contours: - M = cv2.moments(contour) - if M["m00"] != 0: - cx = int(M["m10"]/M["m00"]) - cy = int(M["m01"]/M["m00"]) - rx.append(right_base-50 + cx) - right_base = right_base-50 + cx - - - - cv2.rectangle(msk, (left_base-50,y), (left_base+50,y-40), (255,255,255), 2) - cv2.rectangle(msk, (right_base-50,y), (right_base+50,y-40), (255,255,255), 2) - y -= 40 - try: - leftx = nonzerox[lx] - lefty = nonzeroy[lx] - rightx = nonzerox[rx] - righty = nonzeroy[rx] - except: - print("error this time around, retrying") - # cv2.imshow("Outimg" , out_img) - cv2.imshow("R",result) - - cv2.imshow("Original", frame) - cv2.imshow("Bird's Eye View", transformed_frame) - cv2.imshow("Lane Detection - Image Thresholding", mask) - cv2.imshow("Lane Detection - Sliding Windows", msk) - # Display the result - if cv2.waitKey(10) == 27: - break - - # if bool(lx) and bool(rx): - # left_fit = np.polyfit(lefty, leftx, 2) - # right_fit = np.polyfit(righty, rightx, 2) - - # # Generate y values for the entire height of the image - # ploty = np.linspace(0, transformed_frame.shape[0] - 1, transformed_frame.shape[0]) - - # # Generate x values using the polynomial fits - # left_fitx = np.polyval(left_fit, ploty) - # right_fitx = np.polyval(right_fit, ploty) - - # # Create an image to draw the lane lines - # line_image = np.zeros_like(msk) - - # # Draw the left lane line - # for i in range(len(left_fitx)): - # cv2.circle(line_image, (int(left_fitx[i]), int(ploty[i])), 1, 255, -1) - - # # Draw the right lane line - # for i in range(len(right_fitx)): - # cv2.circle(line_image, (int(right_fitx[i]), int(ploty[i])), 1, 255, -1) - - # # Combine the original image with the drawn lane lines - # result = cv2.addWeighted(mask, 1, cv2.cvtColor(line_image, cv2.COLOR_GRAY2BGR), 0.3, 0) diff --git a/ros2/lanefollowingtest/SobelVidCam.py b/ros2/lanefollowingtest/SobelVidCam.py deleted file mode 100644 index 57acfd4cd..000000000 --- a/ros2/lanefollowingtest/SobelVidCam.py +++ /dev/null @@ -1,665 +0,0 @@ -import numpy as np -import cv2 -import matplotlib.pyplot as plt -from moviepy.editor import VideoFileClip -from queue import Queue -import os -import glob -from whitepxlClass import WhitePixelDetector - - - -class CameraCalibration: - def __init__(self, nx, ny, chessboard_dir,SHOW_PLOT=False): - self.NX = nx - self.NY = ny - self.CHESSBOARD_DIR = chessboard_dir - self.show_plot = SHOW_PLOT - - def get_chessboard_corners(self, imgs): - """Return image and object points from a set of chessboard images.""" - if not isinstance(imgs, list): - raise ValueError("imgs parameter needs to be a list.") - - # Initialize 3D object points - objp = np.zeros((self.NX * self.NY, 3), np.float32) - objp[:, :2] = np.mgrid[0:self.NX, 0:self.NY].T.reshape(-1, 2) - - imgps = [] - objps = [] - - for img in imgs: - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - found, corners = cv2.findChessboardCorners(gray, (self.NX, self.NY), None) - if not found: - continue - imgps.append(corners) - objps.append(objp) - - return imgps, objps - - def chessboard_cam_calib(self, imgps, objps, img_size): - """Returns camera calibration matrix and distortion coefficients.""" - ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objps, imgps, img_size, None, None) - return mtx, dist - - def plots_and_calibration(self): - img_locs = glob.glob(self.CHESSBOARD_DIR) - # print(img_locs) - - # Read all images into a list - imgs = [cv2.imread(img_loc) for img_loc in img_locs] - - - - # Get size of images using one image as a sample, with width as the first element - img_size = imgs[0].shape[::-1][1:] - - # Calibrate camera and retrieve calibration matrix and distortion coefficients - imgps, objps = self.get_chessboard_corners(imgs) - MTX, DIST = self.chessboard_cam_calib(imgps, objps, img_size) - - if self.show_plot: - - # Set up figure for plotting - f, axarr = plt.subplots(len(imgs), 3) - f.set_size_inches(15, 30) - - # Loop through images, undistort them, and draw corners on undistorted versions. - for i, img in enumerate(imgs): - # Set column headings on figure - if i == 0: - axarr[i, 0].set_title("Original Image") - axarr[i, 1].set_title("Undistorted Image") - axarr[i, 2].set_title("Corners Drawn") - - # Generate new undistorted image - undist = cv2.undistort(img, MTX, DIST, None, MTX) - undist_copy = undist.copy() - - # Generate new image with corner points drawn - undist_grey = cv2.cvtColor(undist_copy, cv2.COLOR_BGR2GRAY) - found, corners = cv2.findChessboardCorners(undist_grey, (self.NX, self.NY), None) - - if found: - drawn = cv2.drawChessboardCorners(undist_copy, (self.NX, self.NY), corners, found) - else: - drawn = img - - # Plot images on figure - axarr[i, 0].imshow(img) - axarr[i, 0].axis('off') - axarr[i, 1].imshow(undist) - axarr[i, 1].axis('off') - axarr[i, 2].imshow(drawn) - axarr[i, 2].axis('off') - return MTX,DIST - - -def warp_image(img,src,dst): - img_size = (img.shape[1], img.shape[0]) - M= cv2.getPerspectiveTransform(src, dst) - inv= cv2.getPerspectiveTransform(dst, src) - warped= cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) - return warped,inv - -def reverse_warping(img,M): - img_size = (img.shape[1], img.shape[0]) - unwarped= cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) - return unwarped - -def channelwise_thresholding(image,thresh): - image = image*(255/np.max(image)) - # print(image) - # 2) Apply a threshold to the L channel - binary_output = np.zeros_like(image) - binary_output[(image > thresh[0]) & (image <= thresh[1])] = 1 - return binary_output - -def Custom_channel_converter(img): - - img1=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,0] # Y channel - img2=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,1] #Cr channel - img3=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)[:,:,1] #L channel - img4=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)[:,:,2] #S channel - return img1, img2, img3, img4 - -def sobel_image(img, orient='x', thresh_min=0, thresh_max=255, convert=True): - - - gray= img - if(convert): - gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) - - sobel=None - if(orient=='x'): - sobel= cv2.Sobel(gray, cv2.CV_64F, 1,0) - else: - sobel= cv2.Sobel(gray, cv2.CV_64F, 0,1) - - sobel_abs= np.absolute(sobel) - sobel_8bit= np.uint8(255* sobel_abs/np.max(sobel_abs)) - binary_output= np.zeros_like(sobel_8bit) - binary_output[(sobel_8bit>=thresh_min) & (thresh_max>=sobel_8bit)]=1 - - return binary_output - -def sobel_gradient_image(img, thresh=(0, np.pi/2), convert=True): - gray= img - if(convert): - gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) - - sobelx= cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=15) - sobely= cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=15) - - abs_sobelx= np.absolute(sobelx) - abs_sobely= np.absolute(sobely) - - grad= np.arctan2(abs_sobely, abs_sobelx) - - binary_output=np.zeros_like(grad) - binary_output[(grad>thresh[0])&(grad= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] - good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] - # Append these indices to the lists - left_lane_inds.append(good_left_inds) - right_lane_inds.append(good_right_inds) - # If you found > minpix pixels, recenter next window on their mean position - if len(good_left_inds) > minpix: - leftx_current = np.int32(np.mean(nonzerox[good_left_inds])) - if len(good_right_inds) > minpix: - rightx_current = np.int32(np.mean(nonzerox[good_right_inds])) - - # Concatenate the arrays of indices - left_lane_inds = np.concatenate(left_lane_inds) - right_lane_inds = np.concatenate(right_lane_inds) - - # Extract left and right line pixel positions - leftx = nonzerox[left_lane_inds] - lefty = nonzeroy[left_lane_inds] - rightx = nonzerox[right_lane_inds] - righty = nonzeroy[right_lane_inds] - - # Fit a second order polynomial to each - left_fit = np.polyfit(lefty, leftx, 2) - right_fit = np.polyfit(righty, rightx, 2) - - if(smoothen): - global fit_prev_left - global fit_prev_right - global fit_sum_left - global fit_sum_right - if(len(fit_prev_left)>prevFrameCount): - fit_sum_left-= fit_prev_left.pop(0) - fit_sum_right-= fit_prev_right.pop(0) - - fit_prev_left.append(left_fit) - fit_prev_right.append(right_fit) - fit_sum_left+=left_fit - fit_sum_right+= right_fit - - no_of_fit_values=len(fit_prev_left) - left_fit= fit_sum_left/no_of_fit_values - right_fit= fit_sum_right/no_of_fit_values - - - ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) - left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] - right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] - - out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] - out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] - - nonzero = binary_warped.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - - window_img = np.zeros_like(out_img) - # Generate a polygon to illustrate the search window area - # And recast the x and y points into usable format for cv2.fillPoly() - left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))]) - left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, - ploty])))]) - left_line_pts = np.hstack((left_line_window1, left_line_window2)) - right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))]) - right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, - ploty])))]) - right_line_pts = np.hstack((right_line_window1, right_line_window2)) - - # Draw the lane onto the warped blank image - cv2.fillPoly(window_img, np.int32([left_line_pts]), (0,255, 0)) - cv2.fillPoly(window_img, np.int32([right_line_pts]), (0,255, 0)) - result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0) - - return out_img, result, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width - -def draw_lane(original_img, Combined_img, left_fitx, right_fitx, M): - new_img = np.copy(original_img) - warp_zero = np.zeros_like(Combined_img).astype(np.uint8) - color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) - - h,w = Combined_img.shape - ploty = np.linspace(0, h-1, num=h) - - # print("left_fitx shape:", left_fitx.shape) - # print("ploty shape:", ploty.shape) - - if len(left_fitx) != len(ploty): - # Adjust the length of left_fitx to match the length of ploty - left_fitx = left_fitx[:len(ploty)] - - if len(right_fitx) != len(ploty): - # Adjust the length of left_fitx to match the length of ploty - right_fitx = right_fitx[:len(ploty)] - - - pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) - pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) - pts = np.hstack((pts_left, pts_right)) - - cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) - cv2.polylines(color_warp, np.int32([pts_left]), isClosed=False, color=(255,0,0), thickness=15) - cv2.polylines(color_warp, np.int32([pts_right]), isClosed=False, color=(255,0,0), thickness=15) - - return color_warp, new_img - -center_distances= Queue(maxsize=15) -distanceSum=0 -def get_car_position(l_fit, r_fit,w,h): - xm_per_pix=3.7/700 - - center_dist=0 - lane_center_position=0 - if r_fit is not None and l_fit is not None: - car_position = w/2 - l_fit_x_int = l_fit[0]*h**2 + l_fit[1]*h + l_fit[2] - r_fit_x_int = r_fit[0]*h**2 + r_fit[1]*h + r_fit[2] - lane_center_position = (r_fit_x_int + l_fit_x_int) /2 - center_dist = (car_position - lane_center_position) * xm_per_pix - - - global distanceSum - if(center_distances.full()): - el=center_distances.get() - distanceSum-=el - - center_distances.put(center_dist) - distanceSum+=center_dist - - no_of_distance_values=center_distances.qsize() - center_dist= distanceSum/no_of_distance_values - return center_dist,lane_center_position - -def get_direction(center_dist): - direction = '' - if center_dist > 0: - direction = 'right' - elif center_dist < 0: - direction = 'left' - return direction - -def Plot_details(laneImage,curv_rad,center_dist,width_lane,lane_center_position): - offest_top=0 - copy= np.zeros_like(laneImage) - - h = laneImage.shape[0] - font = cv2.FONT_HERSHEY_COMPLEX_SMALL - text = 'Curve radius: ' + '{:04.2f}'.format(curv_rad) + 'cm' - cv2.putText(laneImage, text, (40,70+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA) - cv2.putText(copy, text, (40,100+offest_top), font, 4.0, (255,255,255), 3, cv2.LINE_AA) - - abs_center_dist = abs(center_dist) - direction= get_direction(center_dist) - text = '{:04.3f}'.format(abs_center_dist) + 'm ' + direction + ' of center' -# cv2.putText(laneImage, 'steering '+direction, (40,110+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA) - cv2.putText(laneImage, '|', (640,710), font, 2.0, (255,255,255), 3, cv2.LINE_AA) - cv2.putText(laneImage, '|', (int(lane_center_position),680), font, 2.0, (255,0,0), 3, cv2.LINE_AA) - cv2.putText(laneImage, text, (40,120+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA) - - text = 'Lane Width: ' + '{:04.2f}'.format(width_lane) + 'm' - cv2.putText(laneImage, text, (40,170+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA) - cv2.putText(copy, text, (40,280+offest_top), font, 4.0, (255,255,255), 3, cv2.LINE_AA) - - return laneImage, copy - -width_lane_avg=[] -radius_values = Queue(maxsize=15) -radius_sum=0 - -def calc_radius_position(combined, l_fit, r_fit, l_lane_inds, r_lane_inds,lane_width): - - # Define conversions in x and y from pixels space to meters - ym_per_pix = (30*100)/720 # meters per pixel in y dimension - xm_per_pix = (3.7*100)/700 # meters per pixel in x dimension - left_curverad, right_curverad, center_dist, width_lane = (0, 0, 0, 0) - h = combined.shape[0] - w = combined.shape[1] - ploty = np.linspace(0, h-1, h) - y_eval = np.max(ploty) - - # Identify the x and y positions of all nonzero pixels in the image - nonzero = combined.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - - # Extract left and right line pixel positions - leftx = nonzerox[l_lane_inds] - lefty = nonzeroy[l_lane_inds] - rightx = nonzerox[r_lane_inds] - righty = nonzeroy[r_lane_inds] - - if len(leftx) != 0 and len(rightx) != 0: - # Fit new polynomials to x,y in world space - left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2) - right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2) - - #applying the formula for - left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0]) - right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0]) - - width_lane= lane_width*xm_per_pix - if(len(width_lane_avg) != 0): - avg_width=(sum(width_lane_avg)/len(width_lane_avg)) - if abs(avg_width-width_lane)<0.5: - width_lane_avg.append(width_lane) - else: - width_lane=avg_width - - - # Averaging radius value over past 15 frames - global radius_sum - if(radius_values.full()): - el=radius_values.get() - - radius_sum-=el - curve_radius= (left_curverad+right_curverad)/2 - radius_values.put(curve_radius) - radius_sum+=curve_radius - - no_of_radius_values=radius_values.qsize() - curve_radius= radius_sum/no_of_radius_values -# print(curve_radius, radius_sum,no_of_radius_values) - - center_dist,lane_center_position= get_car_position(l_fit,r_fit,w,h) #getting the car distance from the center - return curve_radius, center_dist,width_lane,lane_center_position - -def undistort(img,mtx,dist): - return cv2.undistort(img,mtx,dist, None, mtx) - - -def Lane_pipeline(img,smoothen,prevFrameCount): - undistorted_image= undistort(img,mtx,dist) - warped_image,M= warp_image(undistorted_image) - image_S_channel= cv2.cvtColor(warped_image, cv2.COLOR_RGB2HLS)[:,:,2] - - imgY, imgCr, imgb, imgS= Custom_channel_converter(warped_image) - - Ybinary= channelwise_thresholding(imgY,(215,255)) - Crbinary= channelwise_thresholding(imgCr,(215,255)) - Lbinary= channelwise_thresholding(imgb,(215,255)) - Sbinary= channelwise_thresholding(imgS,(200,255)) - combined = np.zeros_like(imgY) - - -# sobel_mag_image= sobel_mag(image_S_channel, (15,60), False) - # sobel_image1= sobel_image(image_S_channel,'x', 15,60, False) - # sobel_grad_image= sobel_gradient_image(image_S_channel, (0.5,1.8), False) - # combined[(Crbinary==1)|(Ybinary==1)|((Lbinary==1)&(Sbinary==1))] = 1 -# |((sobel_image1==1) & (sobel_grad_image==1)) -# plt.imshow(combined) -# combined[]=1 - -# |((sobel_image1==1)&(sobel_grad_image==1)) -# ((sobel_mag_image == 1) & (sobel_grad_image == 0)) - -# out_img,out_img1, left_fitx,right_fitx,ploty,left_curverad,right_curverad,center_dist= Plot_line(combined) - out_img,out_img1, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width= Plot_line(Ybinary,smoothen,prevFrameCount) - curverad,center_dist,width_lane,lane_center_position= calc_radius_position(combined,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width) - laneImage,new_img =draw_lane(img, combined, left_fitx, right_fitx, M) - unwarped_image= reverse_warping(laneImage,M) - laneImage = cv2.addWeighted(new_img, 1, unwarped_image, 0.5, 0) - laneImage, copy = Plot_details(laneImage,curverad,center_dist,width_lane,lane_center_position) - # print(center_dist) - return img,out_img,out_img1,unwarped_image,laneImage,combined,copy - - - -def CallPipeline(image): - smoothen= True - prevFrameCount=4 - rgb_image,out_img,out_img1,unwarped_image,laneImage,combined,data_copy= Lane_pipeline(image,smoothen,prevFrameCount) - - out_image = np.zeros((720,1280,3), dtype=np.uint8) - - #stacking up various images in one output Image - out_image[0:720,0:1280,:] = cv2.resize(laneImage,(1280,720)) #top-left - out_image[20:190,960:1260,:] = cv2.resize(np.dstack((combined*255, combined*255, combined*255)),(300,170))#side Panel - out_image[210:380,960:1260,:] = cv2.resize(out_img,(300,170))#side Panel -# out_image[400:570,960:1260,:] = cv2.resize(data_copy,(300,170))#bottom-left - return out_image - - - -def main(): - # Set parameters - cap = cv2.VideoCapture("/dev/video2") - - - nx = 9 - ny = 6 - chessboard_dir = './camera_cal_adesso/*.jpg' - - # Create an instance of the CameraCalibration class - calibrator = CameraCalibration(nx, ny, chessboard_dir) - - - # Run the calibration and plotting - MTX,DIST = calibrator.plots_and_calibration() - mtx = MTX - dist = DIST - - - ret = True - while ret: - ret, frame = cap.read() - lane_test_img = frame - - offset=10 - height, width= frame.shape[0], frame.shape[1] - - lane_test_undist = cv2.undistort(lane_test_img, MTX, DIST, None, MTX) - # lane_test_undist = cv2.cvtColor(lane_test_img, cv2.COLOR_BGR2RGB) - reg_distort = np.hstack((frame, lane_test_undist)) - white_pixel_detector = WhitePixelDetector(lane_test_undist) - - - - result = white_pixel_detector.process_frame() - if result: - frame, tl_pt, tr_pt, bl_pt, br_pt = result - # print("Top Left:", tl_pt) - # print("Top Right:", tr_pt) - # print("Bottom Left:", bl_pt) - # print("Bottom Right:", br_pt) - - src = np.float32([ - tl_pt, # Top-left corner - bl_pt, # Bottom-left corner - br_pt, # Bottom-right corner - tr_pt # Top-right corner - ]) - - dst=np.float32([(offset,0),(width-offset,0),(width-offset,height),(offset,height)]) - - unwarped_image,M= warp_image(lane_test_img,src,dst) - cv2.imshow("Bird's Eye View", unwarped_image) - - imgY, imgCr, imgL, imgS = Custom_channel_converter(unwarped_image) - - result_image = np.zeros((2 * imgY.shape[0], 2 * imgY.shape[1]), dtype=np.uint8) - - - # Place each frame in the result image - result_image[0:imgY.shape[0], 0:imgY.shape[1]] = imgY - result_image[0:imgCr.shape[0], imgY.shape[1]:] = imgCr - result_image[imgY.shape[0]:, 0:imgL.shape[1]] = imgL - result_image[imgY.shape[0]:, imgL.shape[1]:] = imgS - - - # Display labels on each section - cv2.putText(result_image, 'imgY', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_image, 'imgCr', (imgY.shape[1] + 10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_image, 'imgL', (10, imgY.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_image, 'imgS', (imgY.shape[1] + 10, imgY.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - - - - Ybinary= channelwise_thresholding(imgY,(220,255)) - Crbinary= channelwise_thresholding(imgCr,(220,255)) - Lbinary= channelwise_thresholding(imgL,(220,252)) - Sbinary= channelwise_thresholding(imgS,(220,255)) - - Ybinary= cv2.bitwise_not(Ybinary) - Crbinary= cv2.bitwise_not(Crbinary) - Lbinary= cv2.bitwise_not(Lbinary) - Sbinary= cv2.bitwise_not(Sbinary) - combined = np.zeros_like(imgY) - - warped_image,M= warp_image(lane_test_img,src,dst) - - image_S_channel= cv2.cvtColor(warped_image, cv2.COLOR_RGB2HLS)[:,:,2] - - sobel_image1= sobel_image(image_S_channel,'x', 15,60, False) - sobel_grad_image= sobel_gradient_image(image_S_channel, (0.5,1.8), False) - combined[(Crbinary==1)|(Ybinary==1)|((Lbinary==1)&(Sbinary==1))] = 1 - combined = cv2.bitwise_not(combined) - - cv2.imshow("PLS",combined) - - - - # Create a blank image to display the binary images - result_binary_image = np.zeros((2 * Ybinary.shape[0], 2 * Ybinary.shape[1]), dtype=np.uint8) - - # Place each binary image in the result image - result_binary_image[0:Ybinary.shape[0], 0:Ybinary.shape[1]] = Ybinary - result_binary_image[0:Crbinary.shape[0], Ybinary.shape[1]:] = Crbinary - result_binary_image[Ybinary.shape[0]:, 0:Lbinary.shape[1]] = Lbinary - result_binary_image[Ybinary.shape[0]:, Lbinary.shape[1]:] = Sbinary - - # Display labels on each section - cv2.putText(result_binary_image, 'Ybinary', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_binary_image, 'Crbinary', (Ybinary.shape[1] + 10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_binary_image, 'Lbinary', (10, Ybinary.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - cv2.putText(result_binary_image, 'Sbinary', (Ybinary.shape[1] + 10, Ybinary.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) - - # Display the result binary image - cv2.imshow('Four Binary Images Display', result_binary_image) - cv2.imshow('Color Spaces', result_image) - - global fit_prev_left - global fit_prev_right - global fit_sum_left - global fit_sum_right - fit_prev_left=[] - fit_prev_right=[] - fit_sum_left=0 - fit_sum_right=0 - - out_img, out_img1, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width = Plot_line(Ybinary) - - - # Generate x and y values for plotting - ploty = np.linspace(0, result_binary_image.shape[0]-1, result_binary_image.shape[0]) - left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] - right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] - - for y, left_x, right_x in zip(ploty.astype(int), left_fitx.astype(int), right_fitx.astype(int)): - cv2.circle(out_img, (left_x, y), 2, (255, 0, 0), -1) # Draw left line in red - cv2.circle(out_img, (right_x, y), 2, (0, 255, 255), -1) # Draw right line in yellow - - # we may have to do some proccessing of our own. All it needs is something detected thats white. - cv2.imshow("PLYFIT",out_img) - - curverad,center_dist,width_lane,lane_center_position= calc_radius_position(Ybinary,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width) - laneImage,new_img =draw_lane(frame, Ybinary, left_fitx, right_fitx, M) - unwarped_image= reverse_warping(laneImage,M) - laneImage = cv2.addWeighted(new_img, 1, unwarped_image, 0.5, 0) - laneImage, copy = Plot_details(laneImage,curverad,center_dist,width_lane,lane_center_position) - print(center_dist) - - cv2.imshow("UMMA",laneImage) - - # # Display the combined frame - # cv2.imshow('Combined Video Feed', reg_distort) - - if cv2.waitKey(1) & 0xFF == ord('q'): - break - - - - - - - # Release the webcam and close all windows - cap.release() - cv2.destroyAllWindows() - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/ros2/lanefollowingtest/VidCam.py b/ros2/lanefollowingtest/VidCam.py deleted file mode 100644 index b00d94eb8..000000000 --- a/ros2/lanefollowingtest/VidCam.py +++ /dev/null @@ -1,457 +0,0 @@ -import os -import glob - -import cv2 -import numpy as np -import matplotlib.pyplot as plt -from moviepy.editor import VideoFileClip -from whitepxlClass import WhitePixelDetector - - -GRADIENT_THRESH = (20, 100) -S_CHANNEL_THRESH = (80, 255) -L_CHANNEL_THRESH = (80, 255) - -# NEED TO BE -B_CHANNEL_THRESH = (150, 200) -L2_CHANNEL_THRESH = (200, 200) - - -class CameraCalibration: - def __init__(self, nx, ny, chessboard_dir,SHOW_PLOT=False): - self.NX = nx - self.NY = ny - self.CHESSBOARD_DIR = chessboard_dir - self.show_plot = SHOW_PLOT - - def get_chessboard_corners(self, imgs): - """Return image and object points from a set of chessboard images.""" - if not isinstance(imgs, list): - raise ValueError("imgs parameter needs to be a list.") - - # Initialize 3D object points - objp = np.zeros((self.NX * self.NY, 3), np.float32) - objp[:, :2] = np.mgrid[0:self.NX, 0:self.NY].T.reshape(-1, 2) - - imgps = [] - objps = [] - - for img in imgs: - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - found, corners = cv2.findChessboardCorners(gray, (self.NX, self.NY), None) - if not found: - continue - imgps.append(corners) - objps.append(objp) - - return imgps, objps - - def chessboard_cam_calib(self, imgps, objps, img_size): - """Returns camera calibration matrix and distortion coefficients.""" - ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objps, imgps, img_size, None, None) - return mtx, dist - - def plots_and_calibration(self): - img_locs = glob.glob(self.CHESSBOARD_DIR) - print(img_locs) - - # Read all images into a list - imgs = [cv2.imread(img_loc) for img_loc in img_locs] - - - - # Get size of images using one image as a sample, with width as the first element - img_size = imgs[0].shape[::-1][1:] - - # Calibrate camera and retrieve calibration matrix and distortion coefficients - imgps, objps = self.get_chessboard_corners(imgs) - MTX, DIST = self.chessboard_cam_calib(imgps, objps, img_size) - - if self.show_plot: - - # Set up figure for plotting - f, axarr = plt.subplots(len(imgs), 3) - f.set_size_inches(15, 30) - - # Loop through images, undistort them, and draw corners on undistorted versions. - for i, img in enumerate(imgs): - # Set column headings on figure - if i == 0: - axarr[i, 0].set_title("Original Image") - axarr[i, 1].set_title("Undistorted Image") - axarr[i, 2].set_title("Corners Drawn") - - # Generate new undistorted image - undist = cv2.undistort(img, MTX, DIST, None, MTX) - undist_copy = undist.copy() - - # Generate new image with corner points drawn - undist_grey = cv2.cvtColor(undist_copy, cv2.COLOR_BGR2GRAY) - found, corners = cv2.findChessboardCorners(undist_grey, (self.NX, self.NY), None) - - if found: - drawn = cv2.drawChessboardCorners(undist_copy, (self.NX, self.NY), corners, found) - else: - drawn = img - - # Plot images on figure - axarr[i, 0].imshow(img) - axarr[i, 0].axis('off') - axarr[i, 1].imshow(undist) - axarr[i, 1].axis('off') - axarr[i, 2].imshow(drawn) - axarr[i, 2].axis('off') - return MTX,DIST - - -def seperate_hls(rgb_img): - hls = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2HLS) - h = hls[:,:,0] - l = hls[:,:,1] - s = hls[:,:,2] - return h, l, s - -def seperate_lab(rgb_img): - lab = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2Lab) - l = lab[:,:,0] - a = lab[:,:,1] - b = lab[:,:,2] - return l, a, b - -def seperate_luv(rgb_img): - luv = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2Luv) - - l = luv[:,:,0] - u = luv[:,:,1] - v = luv[:,:,2] - return l, u, v - -def binary_threshold_lab_luv(rgb_img, bthresh, lthresh): - l, a, b = seperate_lab(rgb_img) - l2, u, v = seperate_luv(rgb_img) - binary = np.zeros_like(l) - binary[ - ((b > bthresh[0]) & (b <= bthresh[1])) | - ((l2 > lthresh[0]) & (l2 <= lthresh[1])) - ] = 1 - return binary - -def binary_threshold_hls(rgb_img, sthresh, lthresh): - h, l, s = seperate_hls(rgb_img) - binary = np.zeros_like(h) - binary[ - ((s > sthresh[0]) & (s <= sthresh[1])) & - ((l > lthresh[0]) & (l <= lthresh[1])) - ] = 1 - return binary - -def gradient_threshold(channel, thresh): - # Take the derivative in x - sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0) - # Absolute x derivative to accentuate lines away from horizontal - abs_sobelx = np.absolute(sobelx) - scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) - # Threshold gradient channel - sxbinary = np.zeros_like(scaled_sobel) - sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 - return sxbinary - - -def histo_peak(histo): - """Find left and right peaks of histogram""" - midpoint = np.int32(histo.shape[0]/2) - leftx_base = np.argmax(histo[:midpoint]) - rightx_base = np.argmax(histo[midpoint:]) + midpoint - return leftx_base, rightx_base - -def get_lane_indices_sliding_windows(binary_warped, leftx_base, rightx_base, n_windows, margin, recenter_minpix): - """Get lane line pixel indices by using sliding window technique""" - # Create an output image to draw on and visualize the result - out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255 - out_img = out_img.copy() - # Set height of windows - window_height = np.int32(binary_warped.shape[0]/n_windows) - - # Identify the x and y positions of all nonzero pixels in the image - nonzero = binary_warped.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - - # Create empty lists to receive left and right lane pixel indices - left_lane_inds = [] - right_lane_inds = [] - # Current positions to be updated for each window - leftx_current = leftx_base - rightx_current = rightx_base - - for window in range(n_windows): - # Identify window boundaries in x and y (and right and left) - win_y_low = binary_warped.shape[0] - (window + 1) * window_height - win_y_high = binary_warped.shape[0] - window * window_height - win_xleft_low = leftx_current - margin - win_xleft_high = leftx_current + margin - win_xright_low = rightx_current - margin - win_xright_high = rightx_current + margin - cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2) - cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2) - # Identify the nonzero pixels in x and y within the window - good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] - good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & - (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] - # Append these indices to the lists - left_lane_inds.append(good_left_inds) - right_lane_inds.append(good_right_inds) - # If you found > minpix pixels, recenter next window on their mean position - if len(good_left_inds) > recenter_minpix: - leftx_current = np.int32(np.mean(nonzerox[good_left_inds])) - if len(good_right_inds) > recenter_minpix: - rightx_current = np.int32(np.mean(nonzerox[good_right_inds])) - - # Concatenate the arrays of indices - left_lane_inds = np.concatenate(left_lane_inds) - right_lane_inds = np.concatenate(right_lane_inds) - return left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img - -def get_lane_indices_from_prev_window(binary_warped, left_fit, right_fit, margin): - """Detect lane line by searching around detection of previous sliding window detection""" - nonzero = binary_warped.nonzero() - nonzeroy = np.array(nonzero[0]) - nonzerox = np.array(nonzero[1]) - - left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + - left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + - left_fit[1]*nonzeroy + left_fit[2] + margin))) - right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + - right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + - right_fit[1]*nonzeroy + right_fit[2] + margin))) - - # Again, extract left and right line pixel positions - leftx = nonzerox[left_lane_inds] - lefty = nonzeroy[left_lane_inds] - rightx = nonzerox[right_lane_inds] - righty = nonzeroy[right_lane_inds] - - # Fit a second order polynomial to each - left_fit = np.polyfit(lefty, leftx, 2) - right_fit = np.polyfit(righty, rightx, 2) - - # Generate x and y values for plotting - ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) - left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] - right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] - return left_lane_inds, right_lane_inds, ploty, left_fitx, right_fitx - - - - - - -def main(): - # Set parameters - cap = cv2.VideoCapture("/dev/video2") - - - nx = 9 - ny = 6 - chessboard_dir = './camera_cal_adesso/*.jpg' - - # Create an instance of the CameraCalibration class - calibrator = CameraCalibration(nx, ny, chessboard_dir) - - - # Run the calibration and plotting - MTX,DIST = calibrator.plots_and_calibration() - - - ret = True - while ret: - ret, frame = cap.read() - - # cv2.imshow("INITAL FRAME", frame) - - - # TEST_IMG = "./test_images/straight_lines1.jpg" - lane_test_img = frame - lane_test_img_rgb = cv2.cvtColor(lane_test_img, cv2.COLOR_BGR2RGB) - lane_test_undist = cv2.undistort(lane_test_img_rgb, MTX, DIST, None, MTX) - reg_distort = np.hstack((frame, lane_test_undist)) - - # Display the combined frame - cv2.imshow('Combined Video Feed', reg_distort) - - # LAB and LUV channel threshold - s_binary = binary_threshold_lab_luv(lane_test_undist, B_CHANNEL_THRESH, L2_CHANNEL_THRESH) - - # Convert the frame to grayscale - gray = cv2.cvtColor(lane_test_undist, cv2.COLOR_BGR2GRAY) - gray = cv2.GaussianBlur(gray, (7, 7), 0) - - - - - # Threshold the frame to get white pixels - _, thresholded = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY) - - cv2.imshow("COPE",thresholded) - - - - # Gradient threshold on S channel - h, l, s = seperate_hls(lane_test_undist) - sxbinary = gradient_threshold(s, GRADIENT_THRESH) - - - # Combine two binary images to view their contribution in green and red - color_binary = np.dstack((sxbinary, s_binary, np.zeros_like(sxbinary))) * 255 - - - # bottom_row = np.hstack((sxbinary, color_binary)) - - # binary_images = np.vstack((top_row, bottom_row)) - - # # Display the combined frame - - - IMG_SIZE = lane_test_undist.shape[::-1][1:] - OFFSET = 300 - - white_pixel_detector = WhitePixelDetector(lane_test_undist) - - result = white_pixel_detector.process_frame() - if result: - frame, tl_pt, tr_pt, bl_pt, br_pt = result - print("Top Left:", tl_pt) - print("Top Right:", tr_pt) - print("Bottom Left:", bl_pt) - print("Bottom Right:", br_pt) - - - - - PRES_SRC_PNTS = np.float32([ - tl_pt, # Top-left corner - bl_pt, # Bottom-left corner - br_pt, # Bottom-right corner - tr_pt # Top-right corner - ]) - - PRES_DST_PNTS = np.float32([ - [OFFSET, 0], - [OFFSET, IMG_SIZE[1]], - [IMG_SIZE[0]-OFFSET, IMG_SIZE[1]], - [IMG_SIZE[0]-OFFSET, 0] -]) - M = cv2.getPerspectiveTransform(PRES_SRC_PNTS, PRES_DST_PNTS) - M_INV = cv2.getPerspectiveTransform(PRES_DST_PNTS, PRES_SRC_PNTS) - warped = cv2.warpPerspective(lane_test_undist, M, IMG_SIZE, flags=cv2.INTER_LINEAR) - - warped_cp = warped.copy() - warped_poly = cv2.polylines(warped_cp, np.int32([PRES_DST_PNTS]), True, (255,0,0), 3) - - - cv2.polylines(lane_test_undist, np.int32([PRES_SRC_PNTS]), True, (255, 0, 0), 3) - top_row = np.hstack((lane_test_undist, color_binary)) - cv2.imshow('2x2 Video Configuration', top_row) - ort_dots = np.hstack((frame, warped_cp)) - cv2.imshow('LANES', ort_dots) - - N_WINDOWS = 10 - MARGIN = 100 - RECENTER_MINPIX = 50 - - # Define conversions in x and y from pixels space to meters - YM_PER_PIX = 30 / 720 # meters per pixel in y dimension - XM_PER_PIX = 3.7 / 700 # meters per pixel in x dimension - - - - - - # Warp binary image of lane line - binary_warped = cv2.warpPerspective(s_binary, M, IMG_SIZE, flags=cv2.INTER_LINEAR) - histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0) - print(histogram) - - # f, axarr = plt.subplots(2,2) - # f.set_size_inches(18, 10) - # axarr[0, 0].imshow(binary_warped, cmap='gray') - # axarr[0, 1].plot(histogram) - # axarr[0, 0].set_title("Warped Binary Lane Line") - # axarr[0, 1].set_title("Histogram of Lane line Pixels") - - - cv2.imshow("binary_warped",binary_warped) - - # leftx_base, rightx_base = histo_peak(histogram) - # left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img = get_lane_indices_sliding_windows( - # binary_warped, leftx_base, rightx_base, N_WINDOWS, MARGIN, RECENTER_MINPIX) - - - # # Extract left and right line pixel positions - # leftx = nonzerox[left_lane_inds] - # lefty = nonzeroy[left_lane_inds] - # rightx = nonzerox[right_lane_inds] - # righty = nonzeroy[right_lane_inds] - - - # # Fit a second order polynomial to each - # left_fit = np.polyfit(lefty, leftx, 2) - # right_fit = np.polyfit(righty, rightx, 2) - - - # # Generate x and y values for plotting - # ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0]) - # left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] - # right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] - - # for y, left_x, right_x in zip(ploty.astype(int), left_fitx.astype(int), right_fitx.astype(int)): - # cv2.circle(out_img, (left_x, y), 2, (255, 0, 0), -1) # Draw left line in red - # cv2.circle(out_img, (right_x, y), 2, (0, 255, 255), -1) # Draw right line in yellow - - - - - # # we may have to do some proccessing of our own. All it needs is something detected thats white. - # cv2.imshow("PLYFIT",out_img) - - - - - - - - - - - - # Break the loop when 'q' key is pressed - if cv2.waitKey(1) & 0xFF == ord('q'): - break - - # Release the webcam and close all windows - cap.release() - cv2.destroyAllWindows() - - - - - # f, axarr = plt.subplots(2,2) - # f.set_size_inches(17, 10) - # axarr[0, 0].imshow(lane_test_img_rgb) - # axarr[0, 1].imshow(lane_test_undist) - # axarr[0, 0].set_title("Original Image") - # axarr[0, 1].set_title("Undistorted Image") - # axarr[0, 0].axis('off') - # axarr[0, 1].axis('off') - - - - - # Show the plots - # plt.show() - - -if __name__ == "__main__": - main() - diff --git a/ros2/lanefollowingtest/whitepxlClass.py b/ros2/lanefollowingtest/whitepxlClass.py deleted file mode 100644 index d0f7880f5..000000000 --- a/ros2/lanefollowingtest/whitepxlClass.py +++ /dev/null @@ -1,95 +0,0 @@ -import cv2 -import numpy as np -import math -from functools import partial - -class WhitePixelDetector: - def __init__(self,frame): - # self.cap = cv2.VideoCapture(video_source) - self.frame = frame - self.tl = (0, 0) # Top left - self.tr = (640, 0) - self.bl = (0, 480) - self.br = (640, 480) - self.tl_dist = partial(self.euclidean_distance, reference_point=self.tl) - self.tr_dist = partial(self.euclidean_distance, reference_point=self.tr) - self.bl_dist = partial(self.euclidean_distance, reference_point=self.bl) - self.br_dist = partial(self.euclidean_distance, reference_point=self.br) - - def euclidean_distance(self, coord, reference_point): - return math.sqrt((coord[0] - reference_point[0]) ** 2 + (coord[1] - reference_point[1]) ** 2) - - def detect_white_pixels(self): - gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) - gray = cv2.GaussianBlur(gray, (7, 7), 0) - _, thresholded = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) - contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - white_pixel_coordinates = [point[0] for contour in contours for point in contour] - return white_pixel_coordinates - - def interpolate_and_draw_line(self,coordinates): - if len(coordinates) > 1: - coordinates_np = np.array(coordinates, dtype=np.int32) - coordinates_np = coordinates_np.reshape(-1, 1, 2) - cv2.polylines(self.frame, [coordinates_np], isClosed=True, color=(0, 0, 255), thickness=2) - - def process_frame(self): - # ret, frame = self.cap.read() - - height, width, _ = self.frame.shape - - top_left = (0, 0) - top_right = (width, 0) - bottom_left = (0, height) - bottom_right = (width, height) - - white_pixel_coordinates = self.detect_white_pixels() - - if white_pixel_coordinates: - sorted_coordinates_distance_tl = sorted(white_pixel_coordinates, key=self.tl_dist) - sorted_coordinates_distance_tr = sorted(white_pixel_coordinates, key=self.tr_dist) - sorted_coordinates_distance_bl = sorted(white_pixel_coordinates, key=self.bl_dist) - sorted_coordinates_distance_br = sorted(white_pixel_coordinates, key=self.br_dist) - tl_pt = sorted_coordinates_distance_tl[0] - tr_pt = sorted_coordinates_distance_tr[0] - bl_pt = sorted_coordinates_distance_bl[0] - br_pt = sorted_coordinates_distance_br[0] - - # Draw circles on the frame - cv2.circle(self.frame, tuple(tl_pt), 12, (0, 255, 0), -1) - cv2.circle(self.frame, tuple(tr_pt), 12, (0, 255, 0), -1) - cv2.circle(self.frame, tuple(bl_pt), 12, (0, 255, 0), -1) - cv2.circle(self.frame, tuple(br_pt), 12, (0, 255, 0), -1) - - # Interpolate to form a line and draw it on the frame - self.interpolate_and_draw_line(white_pixel_coordinates) - - return self.frame, tl_pt, tr_pt, bl_pt, br_pt - - return None - - # def release_capture(self): - # self.cap.release() - -# # Instantiate the class with the video source -# white_pixel_detector = WhitePixelDetector("/dev/video5") - -# while True: -# result = white_pixel_detector.process_frame() -# if result: -# frame, tl_pt, tr_pt, bl_pt, br_pt = result -# print("Top Left:", tl_pt) -# print("Top Right:", tr_pt) -# print("Bottom Left:", bl_pt) -# print("Bottom Right:", br_pt) -# cv2.imshow('Interpolated Line', frame) - -# # Check for a key press and break the loop if 'q' is pressed -# if cv2.waitKey(1) & 0xFF == ord('q'): -# break - -# # Release the webcam -# white_pixel_detector.release_capture() - -# # Close all windows -# cv2.destroyAllWindows() diff --git a/ros2/log/latest_build b/ros2/log/latest_build index 42edb5cc3..ebbe5d0f0 120000 --- a/ros2/log/latest_build +++ b/ros2/log/latest_build @@ -1 +1 @@ -build_2024-03-12_18-56-12 \ No newline at end of file +build_2024-03-28_13-39-36 \ No newline at end of file diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index 0e5ba6a8e..ece87be09 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -8,7 +8,7 @@ from cv_bridge import CvBridge import cv2 import numpy as np - +import LaneDetection as LD # Inputs from both cameras vidcap_left = cv2.VideoCapture("/dev/video0") diff --git a/ros2/src/lanefollowingtest/cam_publisher.py b/ros2/src/lanefollowingtest/cam_publisher.py index a707fee78..82ef40e01 100644 --- a/ros2/src/lanefollowingtest/cam_publisher.py +++ b/ros2/src/lanefollowingtest/cam_publisher.py @@ -6,7 +6,6 @@ class WebcamImagePublisher(Node): - def __init__(self): super().__init__('webcam_image_publisher') self.logger = self.get_logger() @@ -28,6 +27,11 @@ def __init__(self): # Create image publisher self.image_pub = self.create_publisher( Image, '/camera/image', 10) # Adjust queue size if needed + + # Create image publisher + self.hsv_pub = self.create_publisher( + Image, '/camera/image_hsv', 10) # Adjust queue size if needed + # Timer to capture and publish images self.timer = self.create_timer( @@ -42,14 +46,13 @@ def capture_and_publish(self): self.logger.warn("Failed to capture image!") return - # Optionally resize the image (comment out if not needed) - # resized_image = cv2.resize(cv_image, (width, height)) - # Convert OpenCV image to ROS image message - ros_image = self.bridge.cv2_to_imgmsg(cv_image, encoding="bgr8") - print("published") + raw_image = self.bridge.cv2_to_imgmsg(cv_image, encoding="passthrough") + hsv_img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) + hsv_image = self.bridge.cv2_to_imgmsg(hsv_img, encoding="passthrough") # Publish the image - self.image_pub.publish(ros_image) + self.image_pub.publish(raw_image) + self.hsv_pub.publish(hsv_image) def main(): From a7dd3a85dd4cb0015651806462cc2bd84628d3a8 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Wed, 10 Apr 2024 18:02:43 -0400 Subject: [PATCH 06/20] The U.I Lives! Squashed all existing bugs (As far as I'm aware), meaning that multiple topics are subscribed too, being rendered, and without fail or concern. This was fixed with a spin_once and then controlling the spin speed manually, rather than letting RCLPY do it. In addition, adding a delay to the system helps solve some issues involving tab switching too fast. --- .../pages/\360\237\233\243 Lane_Detection.py" | 53 +++++++++++-------- ros2/src/lanefollowingtest/cam_publisher.py | 23 ++++---- 2 files changed, 43 insertions(+), 33 deletions(-) diff --git "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" index 837d1e090..1700568da 100644 --- "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -5,6 +5,8 @@ from rclpy.node import Node from rclpy.qos import qos_profile_sensor_data from cv_bridge import CvBridge +import time + class Image_Handler(Node): @@ -14,11 +16,17 @@ def __init__(self): self._frame_containers = None super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() - self.camData_subscriber = self.create_subscription( + self.imgData_subscriber = self.create_subscription( Image, '/camera/image', self.camData_callback, qos_profile_sensor_data) + self.hsvData_subscriber = self.create_subscription( + Image, + '/camera/image_hsv', + self.hsvData_callback, + qos_profile_sensor_data) + def tabs(self,tabs): self._tabs = tabs @@ -30,16 +38,12 @@ def camData_callback(self, msg_in): raw = msg_in img = self._bridge.imgmsg_to_cv2(raw) self._frame_containers[0].image(img) - st.write("IMAGE RECIEVED") -@st.cache_data -def startup(): - rclpy.init() - return None + def hsvData_callback(self, msg_in): + raw = msg_in + img = self._bridge.imgmsg_to_cv2(raw) + self._frame_containers[1].image(img) -def render(): - if("checkbox" not in st.session_state or st.session_state["checkbox"]): - # See ../../ros2/src/lanefollowingtest/LaneDetection.py # This page should render ALL images in there, as well as publish @@ -47,22 +51,27 @@ def render(): st.set_page_config( page_title="Lane Detection", page_icon="🛣") - startup() - - st.session_state["checkbox"] = st.checkbox("Render Video Feed",callback = render) + time.sleep(0.2) + render = st.checkbox("Render Video Feed") st.write( "This should render all of the images related to Lane Detection, and relevant parameters.") tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) if "node" not in st.session_state: - handler = Image_Handler() - handler.tabs(tabs) - st.session_state["node"] = handler - if(render): - rclpy.spin(st.session_state['node']) - st.session_state['node'].destroy_node() - rclpy.shutdown() - - # I Don't even know if we'll want this, but it's a nice to have anyway - Restart = st.button("ESTOP", type="primary") \ No newline at end of file + try: + rclpy.init() + except RuntimeError: + st.warning("something went wrong performance may be degraded. Try restarting fully") + finally: + handler = Image_Handler() + handler.tabs(tabs) + st.session_state["node"] = handler + if render: + while(True): + try: + rclpy.spin_once(st.session_state['node']) + time.sleep(0.01) + except: + st.warning("Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") + break diff --git a/ros2/src/lanefollowingtest/cam_publisher.py b/ros2/src/lanefollowingtest/cam_publisher.py index 82ef40e01..e5b21f2e5 100644 --- a/ros2/src/lanefollowingtest/cam_publisher.py +++ b/ros2/src/lanefollowingtest/cam_publisher.py @@ -11,7 +11,7 @@ def __init__(self): self.logger = self.get_logger() # Set desired frame rate (may need adjustment based on hardware) - self.frame_rate = 60 + self.frame_rate = 30 # Initialize OpenCV video capture object self.cap = cv2.VideoCapture("/dev/video0") # 0 for default webcam @@ -26,11 +26,11 @@ def __init__(self): # Create image publisher self.image_pub = self.create_publisher( - Image, '/camera/image', 10) # Adjust queue size if needed + Image, '/camera/image', 5) # Adjust queue size if needed - # Create image publisher + # # Create image publisher self.hsv_pub = self.create_publisher( - Image, '/camera/image_hsv', 10) # Adjust queue size if needed + Image, '/camera/image_hsv', 5) # Adjust queue size if needed # Timer to capture and publish images @@ -45,14 +45,15 @@ def capture_and_publish(self): if not ret: self.logger.warn("Failed to capture image!") return - + raw_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) + raw_image = cv2.resize(raw_image, (640,480)) + raw_image_msg = self.bridge.cv2_to_imgmsg(raw_image, "bgr8") # Convert OpenCV image to ROS image message - raw_image = self.bridge.cv2_to_imgmsg(cv_image, encoding="passthrough") - hsv_img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) - hsv_image = self.bridge.cv2_to_imgmsg(hsv_img, encoding="passthrough") - # Publish the image - self.image_pub.publish(raw_image) - self.hsv_pub.publish(hsv_image) + hsv_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2HSV) + hsv_image_msg = self.bridge.cv2_to_imgmsg(hsv_image, encoding="passthrough") + # # Publish the image + self.image_pub.publish(raw_image_msg) + self.hsv_pub.publish(hsv_image_msg) def main(): From 49a334f9259707788c45298813c304733c354b08 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Wed, 10 Apr 2024 18:44:43 -0400 Subject: [PATCH 07/20] Added Publisher, specified goals in Homepage.py --- "ros2/UI/ \360\237\232\227 Homepage.py" | 61 ++++++++++++++++ ros2/UI/Homepage.py | 28 -------- .../pages/\360\237\233\243 Lane_Detection.py" | 72 ++++++++++++------- 3 files changed, 109 insertions(+), 52 deletions(-) create mode 100644 "ros2/UI/ \360\237\232\227 Homepage.py" delete mode 100644 ros2/UI/Homepage.py diff --git "a/ros2/UI/ \360\237\232\227 Homepage.py" "b/ros2/UI/ \360\237\232\227 Homepage.py" new file mode 100644 index 000000000..27668693f --- /dev/null +++ "b/ros2/UI/ \360\237\232\227 Homepage.py" @@ -0,0 +1,61 @@ +import streamlit as st +from streamlit import runtime +from streamlit.web import cli as stcli +import sys + + +def runner(): + st.header("Welcome to the IGVC Homepage!") + st.markdown(''' + Please select an option on the sidebar to the right: This specifies which UI file you'd like to run + + + Streamlit supports fast reloads: Just refresh the python script to get going! + + + Docs are here: https://docs.streamlit.io/library/api-reference + ''') + st.divider() + st.markdown(''' + For the freshmen: This UI is designed to serve multiple purposes + + #1. We need a way to render all of the openCV data we have: This is stuff like the lane or object detection. + + #2. We need a way to tune parameters in an easy way, without refreshing every single page for small little tweaks. + + #3. We need to be able to control the state machines on the car: Each of the function tests is effectively it's own state machine. + We need to be able to control which test we're in, and visualize any important data in a direct manner. + + + #4. It makes us look professional, and adds quite a bit of useful fluff for our design report. + ''') + st.divider() + st.markdown(''' + Of course, not a lot of this is in your control: We haven't built the state machines or finished all of our algorithms. + However, Lane Detection is in the perfect place for you to familiarize yourself with both streamlit and the entire tech stack. + I'm detailing all immediate tasks here: Please allocate amongst yourselves. + I have taken the liberty of building a bespoke ros2 publisher and subscriber system (was not trivial), that should provide a solid framework. + However, it will get extremely tedious to build a proper UI if you don't exploit some of the advantages of python. + Read about list comprehension and get good. + #1: Connecting all datastreams from the lane_detection algorithm into here. + Everything that is cv.imshow() is rendered in that, in a clean and effective manner. + #2: Parametrizing all constants in that: anything that had a slider should be here, and it should be published. + #3: Beautifying, and building more tabs & systems so this is the only interface we need with the car. + We should not be opening up or handling multiple windows with the judges. + Debugging systems, sensor messages (E.g IMU, CAN) should all live here. + + ''') + st.warning("FIRST PRIORITY: https://docs.streamlit.io/develop/api-reference/widgets/st.page_link CHANGE THE PAGES SO NO MORE EMOJIS") + + +# This allows the system to run without being in a dedicated streamlit session: +if __name__ == "__main__": + if runtime.exists(): + st.set_page_config( + page_title="IGVC Homepage", + page_icon="🚗") + st.sidebar.success("Select a System above.") + runner() + else: + sys.argv = ["streamlit", "run", sys.argv[0]] + sys.exit(stcli.main()) diff --git a/ros2/UI/Homepage.py b/ros2/UI/Homepage.py deleted file mode 100644 index 29d44a6e7..000000000 --- a/ros2/UI/Homepage.py +++ /dev/null @@ -1,28 +0,0 @@ -import streamlit as st -from streamlit import runtime -from streamlit.web import cli as stcli -import sys - -def runner(): - st.header("Welcome to the IGVC Homepage!") - st.write("Please select an option on the sidebar to the right.") - st.write("For the freshmen: You guys can beautify this up however you'd like") - st.write( - "Streamlit supports fast reloads, just refresh the python script to get going!") - st.write("Docs are here: https://docs.streamlit.io/library/api-reference") - st.write("This is also extremely relevant: https://stackoverflow.com/questions/67382181/how-to-use-streamlit-in-ros") - st.write("For Siann: You should love yourself, now!") - - - -# This allows the system to run without being in a dedicated streamlit session: -if __name__ == "__main__": - if runtime.exists(): - st.set_page_config( - page_title="IGVC Homepage", - page_icon="🚗") - st.sidebar.success("Select a demo above.") - runner() - else: - sys.argv = ["streamlit", "run", sys.argv[0]] - sys.exit(stcli.main()) diff --git "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" index 1700568da..0f0191907 100644 --- "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -6,10 +6,10 @@ from rclpy.qos import qos_profile_sensor_data from cv_bridge import CvBridge import time +from std_msgs.msg import String - +## This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection. class Image_Handler(Node): - def __init__(self): self._tabs = None self._img = None @@ -27,27 +27,41 @@ def __init__(self): self.hsvData_callback, qos_profile_sensor_data) - - def tabs(self,tabs): + def tabs(self, tabs): self._tabs = tabs # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify self._frame_containers = [tab.empty() for tab in self._tabs] - def camData_callback(self, msg_in): raw = msg_in img = self._bridge.imgmsg_to_cv2(raw) self._frame_containers[0].image(img) - + def hsvData_callback(self, msg_in): raw = msg_in img = self._bridge.imgmsg_to_cv2(raw) self._frame_containers[1].image(img) - +#This class is your publisher: Flesh it out and integrate accordingly +class Publisher(Node): + def __init__(self): + super().__init__('Streamlit_Button_Publisher') + self.logger = self.get_logger() + self.button_pub = self.create_publisher(String, '/streamlit/button',10) + +def demo_publish(): + msg = String() + msg.data = "Streamlit Button!" + st.session_state["pub_node"].button_pub.publish(msg) + st.success("Message Published!") + + + + # See ../../ros2/src/lanefollowingtest/LaneDetection.py -# This page should render ALL images in there, as well as publish +# This page should render ALL images in there, as well as publish important update data if __name__ == "__main__": + #How we Configure the page st.set_page_config( page_title="Lane Detection", page_icon="🛣") @@ -58,20 +72,30 @@ def hsvData_callback(self, msg_in): "This should render all of the images related to Lane Detection, and relevant parameters.") tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) - if "node" not in st.session_state: - try: - rclpy.init() - except RuntimeError: - st.warning("something went wrong performance may be degraded. Try restarting fully") - finally: - handler = Image_Handler() - handler.tabs(tabs) - st.session_state["node"] = handler + + #This hunk initializes the ROS2 nodes without breaking anything :) + if "sub_node" not in st.session_state: + try: + rclpy.init() + except RuntimeError: + st.warning( + "something went wrong performance may be degraded. Try restarting fully.") + finally: + handler = Image_Handler() + handler.tabs(tabs) + st.session_state["sub_node"] = handler + if "pub_node" not in st.session_state: + st.session_state["pub_node"] = Publisher() + + + st.button("Ros Topic Publisher Demo!",on_click=demo_publish) + # As this is an infinite loop, it should only be run at the end of the file. if render: - while(True): - try: - rclpy.spin_once(st.session_state['node']) - time.sleep(0.01) - except: - st.warning("Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") - break + while (True): + try: + rclpy.spin_once(st.session_state['sub_node']) + time.sleep(0.01) + except: + st.warning( + "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") + break From ba26d430aa13aa7e01f741ace6c78aa56c74fa3b Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Fri, 12 Apr 2024 12:22:37 -0400 Subject: [PATCH 08/20] Added changes to Homepage & Lane_Detection: It Works! --- "ros2/UI/ \360\237\232\227 Homepage.py" | 5 +++- .../pages/\360\237\233\243 Lane_Detection.py" | 30 +++++++++---------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git "a/ros2/UI/ \360\237\232\227 Homepage.py" "b/ros2/UI/ \360\237\232\227 Homepage.py" index 27668693f..3e463a50c 100644 --- "a/ros2/UI/ \360\237\232\227 Homepage.py" +++ "b/ros2/UI/ \360\237\232\227 Homepage.py" @@ -36,7 +36,10 @@ def runner(): I'm detailing all immediate tasks here: Please allocate amongst yourselves. I have taken the liberty of building a bespoke ros2 publisher and subscriber system (was not trivial), that should provide a solid framework. However, it will get extremely tedious to build a proper UI if you don't exploit some of the advantages of python. - Read about list comprehension and get good. + Read about list comprehension and get good. ''') + st.divider() + st.header("Tasks") + st.markdown( ''' #1: Connecting all datastreams from the lane_detection algorithm into here. Everything that is cv.imshow() is rendered in that, in a clean and effective manner. #2: Parametrizing all constants in that: anything that had a slider should be here, and it should be published. diff --git "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" index 0f0191907..4aca1aa9d 100644 --- "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" +++ "b/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" @@ -16,31 +16,30 @@ def __init__(self): self._frame_containers = None super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() + self.imgData_subscriber = self.create_subscription( Image, '/camera/image', - self.camData_callback, + lambda msg: self.camData_callback(msg,(self._frame_containers[0][0], self._frame_containers[0][1])), qos_profile_sensor_data) + self.hsvData_subscriber = self.create_subscription( Image, '/camera/image_hsv', - self.hsvData_callback, + lambda msg: self.camData_callback(msg,(self._frame_containers[1][0], self._frame_containers[1][1])), qos_profile_sensor_data) def tabs(self, tabs): self._tabs = tabs - # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify - self._frame_containers = [tab.empty() for tab in self._tabs] + self._frame_containers = [(tab.empty(),tab.empty()) for tab in self._tabs] - def camData_callback(self, msg_in): + #self._frame_containers[] is a container corresponding to one of the tabs: You can create + def camData_callback(self, msg_in,args): raw = msg_in img = self._bridge.imgmsg_to_cv2(raw) - self._frame_containers[0].image(img) + args[0].image(img) + args[1].image(img) - def hsvData_callback(self, msg_in): - raw = msg_in - img = self._bridge.imgmsg_to_cv2(raw) - self._frame_containers[1].image(img) #This class is your publisher: Flesh it out and integrate accordingly class Publisher(Node): @@ -72,8 +71,8 @@ def demo_publish(): "This should render all of the images related to Lane Detection, and relevant parameters.") tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) - #This hunk initializes the ROS2 nodes without breaking anything :) + #Should not need to be tuoched if "sub_node" not in st.session_state: try: rclpy.init() @@ -87,15 +86,16 @@ def demo_publish(): if "pub_node" not in st.session_state: st.session_state["pub_node"] = Publisher() - - st.button("Ros Topic Publisher Demo!",on_click=demo_publish) - # As this is an infinite loop, it should only be run at the end of the file. +#This should also not need to be modified if render: while (True): - try: + try: rclpy.spin_once(st.session_state['sub_node']) time.sleep(0.01) except: st.warning( "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") break + else: + st.button("Ros Topic Publisher Demo!",on_click=demo_publish) + From 2d43bd7d40697330253f481096b86f82599dce70 Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani Date: Fri, 12 Apr 2024 17:59:35 -0400 Subject: [PATCH 09/20] Fixed Lane_Detection, publishing all images from the turtlebot --- ros2/src/lanefollowingtest/LaneDetection.py | 79 ++++++++++----------- 1 file changed, 38 insertions(+), 41 deletions(-) diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index ece87be09..a1c73ece8 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -1,22 +1,22 @@ # from irobot_create_msgs.msg import WheelTicks, WheelTicks from std_msgs.msg import Float64 +from sensor_msgs.msg import Image import rclpy from rclpy.node import Node -from rclpy.qos import qos_profile_sensor_data -from sensor_msgs.msg import Image from cv_bridge import CvBridge import cv2 import numpy as np -import LaneDetection as LD + # Inputs from both cameras vidcap_left = cv2.VideoCapture("/dev/video0") -vidcap_left.set(3, 640) -vidcap_left.set(4, 480) -vidcap_right = cv2.VideoCapture("/dev/video4") -vidcap_right.set(3, 640) -vidcap_right.set(4, 480) +vidcap_left.set(3,640) +vidcap_left.set(4,480) +vidcap_right = cv2.VideoCapture("/dev/video2") +vidcap_right.set(3,640) +vidcap_right.set(4,480) + # These are constants nwindows = 9 @@ -31,9 +31,9 @@ def nothing(x): pass - class Individual_Follower(): + def __init__(self): self._fit = None self._binary_warped = None @@ -123,7 +123,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): return result -# These are constants for the timer callback: Should be modified by UI +#These are constants for the timer callback: Should be modified by UI l_h = 0 l_s = 0 l_v = 200 @@ -135,7 +135,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): lower = np.array([l_h, l_s, l_v]) upper = np.array([u_h, u_s, u_v]) -# Coordinates for the 4 alignment points: again, should be handled by the UI +#Coordinates for the 4 alignment points: again, should be handled by the UI bl = (12, 355) tl = (66, 304) br = (635, 344) @@ -147,13 +147,10 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): matrix = cv2.getPerspectiveTransform(pts1, pts2) - - class Lane_Follower(Node): GUI = True def __init__(self): - super().__init__('lane_detection_node') self._tolerance = 0 self._left_follower = Individual_Follower() @@ -165,42 +162,42 @@ def __init__(self): # Publisher for error from the lane lines relative to the camera FOV self.camData_publisher = self.create_publisher( Float64, '/cam_data', 10) - - #GUI Controller Initializer - #/raw_left, /raw_right, /tf_left, /tf_right, /sliding_left, /sliding_right if Lane_Follower.GUI: self._bridge = CvBridge() image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") self._publishers = {label: self.create_publisher(Image, "/" + label, 10) for label in image_labels} + def img_publish(self, label, img_raw): - self._publishers[label].publish(self._bridge.cv2_to_imgmsg(img_raw,encoding="bgr8")) + if(self.GUI): + self._publishers[label].publish(self._bridge.cv2_to_imgmsg(img_raw,encoding="passthrough")) +# cv2.imshow(label, img_raw) + - def measure_position_meters(self, left, right): + def measure_position_meters(self,left,right): left_x_pos = 0 right_x_pos = 0 + # Will be the same for left side & right side y_max = self._left_follower._binary_warped.shape[0] - + # Calculate left and right line positions at the bottom of the image - if left is not None: + if left is not None: left_fit = self._left_follower._fit left_x_pos = left_fit[0]*y_max**2 + \ left_fit[1]*y_max + left_fit[2] + self.img_publish("sliding_left",left) - if(Lane_Follower.GUI): - self.img_publish("sliding_left", left) - cv2.imshow("Result Left", left) if right is not None: right_fit = self._right_follower._fit right_x_pos = right_fit[0]*y_max**2 + \ right_fit[1]*y_max + right_fit[2] - if(Lane_Follower.GUI): - self.img_publish("sliding_right", right) - cv2.imshow("Result Right", right) + self.img_publish("sliding_right",right) + + center_lanes_x_pos = (left_x_pos + right_x_pos)//2 # Calculate the deviation between the center of the lane and the center of the picture @@ -209,25 +206,27 @@ def measure_position_meters(self, left, right): veh_pos = ( (self._left_follower._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix - if (left is None): + if(left is None): veh_pos += 91 elif (right is None): veh_pos -= 91 return veh_pos / 100 + def timer_callback(self): success_l, image_l = vidcap_left.read() success_r, image_r = vidcap_right.read() images = [(image_l, "left"), (image_r, "right")] - if not (success_l and success_r): + if not(success_l and success_r): return for image in images: frame = image[0] # frame = cv2.resize(image[0], (640, 480)) # I might've cooked with this list comprehension - for point in (bl, tl, br, tr): - frame = cv2.circle(frame, point, 5, (0, 0, 255), -1) + for point in (bl,tl,br,tr): + frame = cv2.circle(frame,point,5,(0,0,255),-1) + transformed_frame = cv2.warpPerspective( frame, matrix, (640, 480)) # Object Detection @@ -239,18 +238,15 @@ def timer_callback(self): self._left_follower.set_binwarp(binwarp=mask) else: self._right_follower.set_binwarp(binwarp=mask) - - if(Lane_Follower.GUI): - self.img_publish("raw_" +image[1],frame) - self.img_publish("tf_" + image[1],transformed_frame) - cv2.imshow("Original " + image[1], frame) - cv2.imshow("Bird's Eye View " + image[1], transformed_frame) + + self.img_publish("raw_"+ image[1], frame) + self.img_publish("tf_" + image[1], transformed_frame) result_left = self._left_follower.Plot_Line() result_right = self._right_follower.Plot_Line() msg_out = Float64() - # TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? + #TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? if (result_left is not None or result_right is not None): pos = self.measure_position_meters(result_left, result_right) print(pos) @@ -259,13 +255,14 @@ def timer_callback(self): else: TOLERANCE = 100 self._tolerance += 1 - if (self._tolerance > TOLERANCE): + if(self._tolerance > TOLERANCE): msg_out.data = 1000.0 self.camData_publisher.publish(msg_out) + if cv2.waitKey(10) == 27: return - + def main(args=None): rclpy.init(args=args) # Initialize ROS2 program @@ -277,4 +274,4 @@ def main(args=None): if __name__ == '__main__': - main() + main() \ No newline at end of file From b201bb87d26ca76d7d90981f5466e538ad1b6a29 Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani <62775035+Vaibhav-Hariani@users.noreply.github.com> Date: Mon, 15 Apr 2024 02:32:52 -0400 Subject: [PATCH 10/20] =?UTF-8?q?Rename=20=20=F0=9F=9A=97=20Homepage.py=20?= =?UTF-8?q?to=20Homepage.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed it... --- "ros2/UI/ \360\237\232\227 Homepage.py" => ros2/UI/Homepage.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "ros2/UI/ \360\237\232\227 Homepage.py" => ros2/UI/Homepage.py (100%) diff --git "a/ros2/UI/ \360\237\232\227 Homepage.py" b/ros2/UI/Homepage.py similarity index 100% rename from "ros2/UI/ \360\237\232\227 Homepage.py" rename to ros2/UI/Homepage.py From 8eb6918aa4fb9a2956e5e207a13797de3f81debc Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani <62775035+Vaibhav-Hariani@users.noreply.github.com> Date: Mon, 15 Apr 2024 02:33:46 -0400 Subject: [PATCH 11/20] =?UTF-8?q?Rename=20=F0=9F=9B=91=20Object=5FDetectio?= =?UTF-8?q?n.py=20to=20Object=5FDetection.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removing Emojis for Terminal Users. --- .../UI/pages/Object_Detection.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "ros2/UI/pages/\360\237\233\221 Object_Detection.py" => ros2/UI/pages/Object_Detection.py (100%) diff --git "a/ros2/UI/pages/\360\237\233\221 Object_Detection.py" b/ros2/UI/pages/Object_Detection.py similarity index 100% rename from "ros2/UI/pages/\360\237\233\221 Object_Detection.py" rename to ros2/UI/pages/Object_Detection.py From 075d74f08745708e445c08aae38338f2540e4515 Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani <62775035+Vaibhav-Hariani@users.noreply.github.com> Date: Mon, 15 Apr 2024 02:34:15 -0400 Subject: [PATCH 12/20] =?UTF-8?q?Rename=20=F0=9F=9B=A3=20Lane=5FDetection.?= =?UTF-8?q?py=20to=20Lane=5FDetection.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removed Emojis for terminal users. --- .../UI/pages/Lane_Detection.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "ros2/UI/pages/\360\237\233\243 Lane_Detection.py" => ros2/UI/pages/Lane_Detection.py (100%) diff --git "a/ros2/UI/pages/\360\237\233\243 Lane_Detection.py" b/ros2/UI/pages/Lane_Detection.py similarity index 100% rename from "ros2/UI/pages/\360\237\233\243 Lane_Detection.py" rename to ros2/UI/pages/Lane_Detection.py From 233047ddc5f421b7ea4cc80e6be743f474ebf560 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Tue, 16 Apr 2024 11:13:57 -0400 Subject: [PATCH 13/20] Blind attempt of Lane Determination: Will need to characterize, tune, and test (ESPECIALLY AROUND TURNS). --- ros2/src/lanefollowingtest/LaneDetection.py | 88 +++++++++++++-------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index a1c73ece8..6931ecd88 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -11,11 +11,11 @@ # Inputs from both cameras vidcap_left = cv2.VideoCapture("/dev/video0") -vidcap_left.set(3,640) -vidcap_left.set(4,480) +vidcap_left.set(3, 640) +vidcap_left.set(4, 480) vidcap_right = cv2.VideoCapture("/dev/video2") -vidcap_right.set(3,640) -vidcap_right.set(4,480) +vidcap_right.set(3, 640) +vidcap_right.set(4, 480) # These are constants @@ -31,8 +31,8 @@ def nothing(x): pass -class Individual_Follower(): +class Individual_Follower(): def __init__(self): self._fit = None @@ -123,7 +123,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): return result -#These are constants for the timer callback: Should be modified by UI +# These are constants for the timer callback: Should be modified by UI l_h = 0 l_s = 0 l_v = 200 @@ -135,7 +135,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6): lower = np.array([l_h, l_s, l_v]) upper = np.array([u_h, u_s, u_v]) -#Coordinates for the 4 alignment points: again, should be handled by the UI +# Coordinates for the 4 alignment points: again, should be handled by the UI bl = (12, 355) tl = (66, 304) br = (635, 344) @@ -155,6 +155,9 @@ def __init__(self): self._tolerance = 0 self._left_follower = Individual_Follower() self._right_follower = Individual_Follower() + # Determine which lane we're in: Left lane means the right image is dashed + self._Left_Lane = False + timer_period = 0.01 # seconds self.timer = self.create_timer(timer_period, self.timer_callback) @@ -164,40 +167,36 @@ def __init__(self): Float64, '/cam_data', 10) if Lane_Follower.GUI: self._bridge = CvBridge() - image_labels = ("raw_left", "raw_right", "tf_left", "tf_right", "sliding_left", "sliding_right") - self._publishers = {label: self.create_publisher(Image, "/" + label, 10) for label in image_labels} - + image_labels = ("raw_left", "raw_right", "tf_left", + "tf_right", "sliding_left", "sliding_right") + self._publishers = {label: self.create_publisher( + Image, "/" + label, 10) for label in image_labels} def img_publish(self, label, img_raw): - if(self.GUI): - self._publishers[label].publish(self._bridge.cv2_to_imgmsg(img_raw,encoding="passthrough")) + if (self.GUI): + self._publishers[label].publish( + self._bridge.cv2_to_imgmsg(img_raw, encoding="passthrough")) # cv2.imshow(label, img_raw) - - - def measure_position_meters(self,left,right): + def measure_position_meters(self, left, right): left_x_pos = 0 right_x_pos = 0 - # Will be the same for left side & right side y_max = self._left_follower._binary_warped.shape[0] - + # Calculate left and right line positions at the bottom of the image - if left is not None: + if left is not None: left_fit = self._left_follower._fit left_x_pos = left_fit[0]*y_max**2 + \ left_fit[1]*y_max + left_fit[2] - self.img_publish("sliding_left",left) - + self.img_publish("sliding_left", left) if right is not None: right_fit = self._right_follower._fit right_x_pos = right_fit[0]*y_max**2 + \ right_fit[1]*y_max + right_fit[2] - self.img_publish("sliding_right",right) - - + self.img_publish("sliding_right", right) center_lanes_x_pos = (left_x_pos + right_x_pos)//2 # Calculate the deviation between the center of the lane and the center of the picture @@ -206,26 +205,42 @@ def measure_position_meters(self,left,right): veh_pos = ( (self._left_follower._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix - if(left is None): + if (left is None): veh_pos += 91 elif (right is None): veh_pos -= 91 return veh_pos / 100 + def determine_lane_size(self, img): + # Taking in both warped images, determine which lane line is the longer one, and ergo the "solid line" + # This may struggle on turns, but might work depending: Will need to characterize + gray = cv2.split(img)[3] + # TODO, parametrize all constants here for tweaking in the U.I + edges = cv2.Canny(gray, 50, 150) + lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, + minLineLength=100, MaxLineGap=5) + m_length = 0 + for line in lines: + x1, y1, x2, y2 = line[0] + length = (x1 - x2) ^ 2 + (y1-y2) ^ 2 + m_length = max(m_length, length) + return m_length def timer_callback(self): success_l, image_l = vidcap_left.read() success_r, image_r = vidcap_right.read() images = [(image_l, "left"), (image_r, "right")] - if not(success_l and success_r): + left_buffer = -1 + right_buffer = -1 + if not (success_l and success_r): return for image in images: frame = image[0] # frame = cv2.resize(image[0], (640, 480)) # I might've cooked with this list comprehension - for point in (bl,tl,br,tr): - frame = cv2.circle(frame,point,5,(0,0,255),-1) + for point in (bl, tl, br, tr): + frame = cv2.circle(frame, point, 5, (0, 0, 255), -1) transformed_frame = cv2.warpPerspective( frame, matrix, (640, 480)) @@ -236,33 +251,36 @@ def timer_callback(self): mask = cv2.inRange(hsv_transformed_frame, lower, upper) if image[1] == "left": self._left_follower.set_binwarp(binwarp=mask) + left_buffer = self.determine_lane_size(mask) else: self._right_follower.set_binwarp(binwarp=mask) - - self.img_publish("raw_"+ image[1], frame) + right_buffer = self.determine_lane_size(mask) + self.img_publish("raw_" + image[1], frame) self.img_publish("tf_" + image[1], transformed_frame) result_left = self._left_follower.Plot_Line() result_right = self._right_follower.Plot_Line() - msg_out = Float64() - #TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? + # TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid? if (result_left is not None or result_right is not None): pos = self.measure_position_meters(result_left, result_right) print(pos) msg_out.data = pos self.camData_publisher.publish(msg_out) + if(left_buffer > right_buffer and left_buffer > 0): + self._Left_Lane = True + elif (right_buffer > 0): + self._Left_Lane = False else: TOLERANCE = 100 self._tolerance += 1 - if(self._tolerance > TOLERANCE): + if (self._tolerance > TOLERANCE): msg_out.data = 1000.0 self.camData_publisher.publish(msg_out) - if cv2.waitKey(10) == 27: return - + def main(args=None): rclpy.init(args=args) # Initialize ROS2 program @@ -274,4 +292,4 @@ def main(args=None): if __name__ == '__main__': - main() \ No newline at end of file + main() From 7f8aeecad45a1bba3d1489e40d97ba7109da44c4 Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani Date: Wed, 17 Apr 2024 20:03:07 -0400 Subject: [PATCH 14/20] Finalized UI publisher (For some reason it was not committed before), and tested the lane determination system (It works!) --- ros2/UI/pages/Lane_Detection.py | 112 +++++++++++++------- ros2/UI/pages/Object_Detection.py | 2 +- ros2/src/lanefollowingtest/LaneDetection.py | 34 +++--- 3 files changed, 96 insertions(+), 52 deletions(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index 4aca1aa9d..03cc379b0 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -8,59 +8,94 @@ import time from std_msgs.msg import String -## This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection. +# This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection. + + class Image_Handler(Node): def __init__(self): self._tabs = None self._img = None - self._frame_containers = None + self._columns = [] + self._frame_containers = [] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() - self.imgData_subscriber = self.create_subscription( + self.lane_state_pub = self.create_subscription( + String, "lane_state", self.lane_state_callback, qos_profile_sensor_data) + + self.imgData_subscriber_1 = self.create_subscription( Image, - '/camera/image', - lambda msg: self.camData_callback(msg,(self._frame_containers[0][0], self._frame_containers[0][1])), + 'raw_left', + lambda msg: self.camData_callback( + msg, self._frame_containers[0][0]), qos_profile_sensor_data) - - self.hsvData_subscriber = self.create_subscription( + self.imgData_subscriber_2 = self.create_subscription( Image, - '/camera/image_hsv', - lambda msg: self.camData_callback(msg,(self._frame_containers[1][0], self._frame_containers[1][1])), + 'raw_right', + lambda msg: self.camData_callback( + msg, self._frame_containers[0][1]), + qos_profile_sensor_data) + self.transformed_subscriber_1 = self.create_subscription( + Image, + 'tf_left', + lambda msg: self.camData_callback( + msg, self._frame_containers[1][0]), + qos_profile_sensor_data) + self.transformed_subscriber_2 = self.create_subscription( + Image, + 'tf_right', + lambda msg: self.camData_callback( + msg, self._frame_containers[1][1]), + qos_profile_sensor_data) + self.sliding_subscriber_1 = self.create_subscription( + Image, + 'sliding_left', + lambda msg: self.camData_callback( + msg, self._frame_containers[2][0]), + qos_profile_sensor_data) + self.sliding_subscriber_1 = self.create_subscription( + Image, + 'sliding_right', + lambda msg: self.camData_callback( + msg, self._frame_containers[2][1]), qos_profile_sensor_data) def tabs(self, tabs): self._tabs = tabs - self._frame_containers = [(tab.empty(),tab.empty()) for tab in self._tabs] + self._columns = [(tab.columns(2)) for tab in self._tabs] + for column in self._columns: + tabs = (column[0].empty(), column[1].empty()) + self._frame_containers.append(tabs) - #self._frame_containers[] is a container corresponding to one of the tabs: You can create - def camData_callback(self, msg_in,args): + # self._frame_containers[] is a container corresponding to one of the tabs: You can create + def camData_callback(self, msg_in, args): raw = msg_in img = self._bridge.imgmsg_to_cv2(raw) - args[0].image(img) - args[1].image(img) + with args: + st.image(img) + + def lane_state_callback(self, msg): + st.write(msg.data) -#This class is your publisher: Flesh it out and integrate accordingly +# This class is your publisher: Flesh it out and integrate accordingly class Publisher(Node): def __init__(self): - super().__init__('Streamlit_Button_Publisher') - self.logger = self.get_logger() - self.button_pub = self.create_publisher(String, '/streamlit/button',10) + super().__init__('Streamlit_Button_Publisher') + self.logger = self.get_logger() + self.button_pub = self.create_publisher( + String, '/streamlit/button', 10) + def demo_publish(): msg = String() msg.data = "Streamlit Button!" - st.session_state["pub_node"].button_pub.publish(msg) - st.success("Message Published!") - - # See ../../ros2/src/lanefollowingtest/LaneDetection.py # This page should render ALL images in there, as well as publish important update data if __name__ == "__main__": - #How we Configure the page + # How we Configure the page st.set_page_config( page_title="Lane Detection", page_icon="🛣") @@ -69,10 +104,10 @@ def demo_publish(): render = st.checkbox("Render Video Feed") st.write( "This should render all of the images related to Lane Detection, and relevant parameters.") - tabs = st.tabs(["Original", "HSV", "Sliding Windows"]) + tabs = st.tabs(["Original", "Transformed", "Sliding Windows"]) - #This hunk initializes the ROS2 nodes without breaking anything :) - #Should not need to be tuoched + # This hunk initializes the ROS2 nodes without breaking anything :) + # Should not need to be tuoched if "sub_node" not in st.session_state: try: rclpy.init() @@ -83,19 +118,22 @@ def demo_publish(): handler = Image_Handler() handler.tabs(tabs) st.session_state["sub_node"] = handler - if "pub_node" not in st.session_state: - st.session_state["pub_node"] = Publisher() + + if "pub_node" not in st.session_state: + st.session_state["pub_node"] = Publisher() + + lane_state = st.empty() -#This should also not need to be modified +# This should also not need to be modified if render: while (True): - try: + # try: + with lane_state: rclpy.spin_once(st.session_state['sub_node']) - time.sleep(0.01) - except: - st.warning( - "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") - break + # time.sleep(0.01) + # except: + # st.warning( + # "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") + # break else: - st.button("Ros Topic Publisher Demo!",on_click=demo_publish) - + st.button("Ros Topic Publisher Demo!", on_click=demo_publish) diff --git a/ros2/UI/pages/Object_Detection.py b/ros2/UI/pages/Object_Detection.py index 4368c6bd5..919c0b9c7 100644 --- a/ros2/UI/pages/Object_Detection.py +++ b/ros2/UI/pages/Object_Detection.py @@ -10,6 +10,6 @@ def image_source_renderer(image): if __name__ == "__main__": st.set_page_config( - page_title="Lane Detection", + page_title="Object Detection", page_icon="🛑") diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index 6931ecd88..c86299d0b 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -7,13 +7,13 @@ from cv_bridge import CvBridge import cv2 import numpy as np - +from std_msgs.msg import String # Inputs from both cameras -vidcap_left = cv2.VideoCapture("/dev/video0") +vidcap_left = cv2.VideoCapture("/dev/video3") vidcap_left.set(3, 640) vidcap_left.set(4, 480) -vidcap_right = cv2.VideoCapture("/dev/video2") +vidcap_right = cv2.VideoCapture("/dev/video1") vidcap_right.set(3, 640) vidcap_right.set(4, 480) @@ -165,6 +165,7 @@ def __init__(self): # Publisher for error from the lane lines relative to the camera FOV self.camData_publisher = self.create_publisher( Float64, '/cam_data', 10) + self.lane_pubs = self.create_publisher(String, "lane_state",10) if Lane_Follower.GUI: self._bridge = CvBridge() image_labels = ("raw_left", "raw_right", "tf_left", @@ -174,9 +175,9 @@ def __init__(self): def img_publish(self, label, img_raw): if (self.GUI): + new_img = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB) self._publishers[label].publish( - self._bridge.cv2_to_imgmsg(img_raw, encoding="passthrough")) -# cv2.imshow(label, img_raw) + self._bridge.cv2_to_imgmsg(new_img, encoding="passthrough")) def measure_position_meters(self, left, right): left_x_pos = 0 @@ -214,16 +215,16 @@ def measure_position_meters(self, left, right): def determine_lane_size(self, img): # Taking in both warped images, determine which lane line is the longer one, and ergo the "solid line" # This may struggle on turns, but might work depending: Will need to characterize - gray = cv2.split(img)[3] # TODO, parametrize all constants here for tweaking in the U.I - edges = cv2.Canny(gray, 50, 150) + edges = cv2.Canny(img, 50, 150) lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100, - minLineLength=100, MaxLineGap=5) + minLineLength=100, maxLineGap=5) m_length = 0 - for line in lines: - x1, y1, x2, y2 = line[0] - length = (x1 - x2) ^ 2 + (y1-y2) ^ 2 - m_length = max(m_length, length) + if(lines is not None): + for line in lines: + x1, y1, x2, y2 = line[0] + length = (x1 - x2) ^ 2 + (y1-y2) ^ 2 + m_length = max(m_length, length) return m_length def timer_callback(self): @@ -267,9 +268,14 @@ def timer_callback(self): print(pos) msg_out.data = pos self.camData_publisher.publish(msg_out) - if(left_buffer > right_buffer and left_buffer > 0): + msg = String() + if(left_buffer > right_buffer): + msg.data = "In Left lane" + self.lane_pubs.publish(msg) self._Left_Lane = True - elif (right_buffer > 0): + elif (right_buffer > left_buffer): + msg.data = "In Right lane" + self.lane_pubs.publish(msg) self._Left_Lane = False else: TOLERANCE = 100 From 0fe35db2cfa2936bed83f7838a50925248a568ee Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani Date: Wed, 17 Apr 2024 22:32:06 -0400 Subject: [PATCH 15/20] Finished up the UI side of things for the most part: Need to add custom ROS2 message that can be subscribed to in LaneDetection.py --- ros2/UI/pages/Lane_Detection.py | 253 +++++++++++++------- ros2/src/lanefollowingtest/LaneDetection.py | 3 +- 2 files changed, 166 insertions(+), 90 deletions(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index 03cc379b0..d8aa77678 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -7,71 +7,42 @@ from cv_bridge import CvBridge import time from std_msgs.msg import String - +import numpy as np # This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection. class Image_Handler(Node): - def __init__(self): - self._tabs = None - self._img = None - self._columns = [] - self._frame_containers = [] + + def __init__(self, containers): + self.reference_img = None + self.topics = ["/raw_left", "/raw_right", "/tf_left", + "/tf_right", "/sliding_left", "/sliding_right"] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() - self.lane_state_pub = self.create_subscription( String, "lane_state", self.lane_state_callback, qos_profile_sensor_data) - - self.imgData_subscriber_1 = self.create_subscription( - Image, - 'raw_left', - lambda msg: self.camData_callback( - msg, self._frame_containers[0][0]), - qos_profile_sensor_data) - self.imgData_subscriber_2 = self.create_subscription( - Image, - 'raw_right', - lambda msg: self.camData_callback( - msg, self._frame_containers[0][1]), - qos_profile_sensor_data) - self.transformed_subscriber_1 = self.create_subscription( - Image, - 'tf_left', - lambda msg: self.camData_callback( - msg, self._frame_containers[1][0]), - qos_profile_sensor_data) - self.transformed_subscriber_2 = self.create_subscription( - Image, - 'tf_right', - lambda msg: self.camData_callback( - msg, self._frame_containers[1][1]), - qos_profile_sensor_data) - self.sliding_subscriber_1 = self.create_subscription( - Image, - 'sliding_left', - lambda msg: self.camData_callback( - msg, self._frame_containers[2][0]), - qos_profile_sensor_data) - self.sliding_subscriber_1 = self.create_subscription( - Image, - 'sliding_right', - lambda msg: self.camData_callback( - msg, self._frame_containers[2][1]), - qos_profile_sensor_data) - - def tabs(self, tabs): - self._tabs = tabs - self._columns = [(tab.columns(2)) for tab in self._tabs] - for column in self._columns: - tabs = (column[0].empty(), column[1].empty()) - self._frame_containers.append(tabs) + self._left_subscriber = self.create_subscription( + Image, self.topics[0], lambda msg: self.camData_callback(msg, containers[0], img_to_repl="THIS"), qos_profile_sensor_data) + self._right_subscriber = self.create_subscription( + Image, self.topics[1], lambda msg: self.camData_callback(msg, containers[1]), qos_profile_sensor_data) + self._tf_left_subscriber = self.create_subscription( + Image, self.topics[2], lambda msg: self.camData_callback(msg, containers[2]), qos_profile_sensor_data) + self._tf_right_subscriber = self.create_subscription( + Image, self.topics[3], lambda msg: self.camData_callback(msg, containers[3]), qos_profile_sensor_data) + self._sliding_left_subscriber = self.create_subscription( + Image, self.topics[4], lambda msg: self.camData_callback(msg, containers[4]), qos_profile_sensor_data) + self._sliding_right_subscriber = self.create_subscription( + Image, self.topics[5], lambda msg: self.camData_callback(msg, containers[5]), qos_profile_sensor_data) # self._frame_containers[] is a container corresponding to one of the tabs: You can create - def camData_callback(self, msg_in, args): + + def camData_callback(self, msg_in, img_location, img_to_repl=None): raw = msg_in - img = self._bridge.imgmsg_to_cv2(raw) - with args: + img = self._bridge.imgmsg_to_cv2(raw, desired_encoding="passthrough") + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + if (img_to_repl is not None): + self.reference_img = img + with img_location: st.image(img) def lane_state_callback(self, msg): @@ -87,9 +58,145 @@ def __init__(self): String, '/streamlit/button', 10) +def render_handler(): + tabs = st.tabs( + ["Original", "Birds Eye View", "Sliding Windows"]) + containers = [] + frame_containers = [] + for tab in tabs: + cols = tab.columns(2) + containers.append(cols[0]) + containers.append(cols[1]) + with cols[0]: + frame_containers.append(st.empty()) + with cols[1]: + frame_containers.append(st.empty()) + lane_state = st.empty() + # This hunk initializes the ROS2 nodes without breaking anything :) + # Should not need to be tuoched + if "sub_node" not in st.session_state and 'pub_node' not in st.session_state: + try: + rclpy.init() + except: + st.warning("Something Wrong With the Generator, try restarting?") + elif "sub_node" not in st.session_state: + handler = Image_Handler(frame_containers) + st.session_state["sub_node"] = handler + while (True): + with lane_state: + try: + rclpy.spin_once(st.session_state['sub_node']) + time.sleep(.01) + except: + st.warning( + "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") + break + + def demo_publish(): msg = String() msg.data = "Streamlit Button!" + st.session_state["pub_node"].button_pub.publish(msg) + st.success("Message Published!", icon="✅") + + +def get_from_prior(labels, defaults): + defaults = np.resize(defaults, len(labels)) + for i in range(len(labels)): + if labels[i] not in st.session_state: + st.session_state[labels[i]] = defaults[i] + else: + defaults[i] = st.session_state[labels[i]] + return defaults + + +def input_gen(func, labels, lowers, uppers, vals): + lowers = np.resize(lowers, len(labels)) + uppers = np.resize(uppers, len(labels)) + func_list = [] + for i in range(len(labels)): + func_list.append(func( + labels[i], lowers[i], uppers[i], vals[i])) + return func_list + + +def input_handler(): + # handles all output + # We use get_from_prior to ensure that, even if the render checkbox was flipped, data persists + # Input gen allows me to streamline generating a lot of objects by making it super duper quick + if "pub_node" not in st.session_state and "sub_node" not in st.session_state: + try: + rclpy.init() + except: + st.warning("Something Wrong With the Generator, try restarting?") + elif "pub_node" not in st.session_state: + st.session_state["pub_node"] = Publisher() + + tabs = st.tabs(["HSV Tweakers", "Coordinates", "Misc"]) + # HSV user input + with tabs[0]: + l, h = st.columns(2) + # lower HSV slider + L_labels = ["Lower Hue", "Lower Saturation", "Lower Value"] + default_vals = [0, 0, 200] + default_vals = get_from_prior(L_labels, default_vals) + + l_h, l_s, l_v = input_gen(l.slider, L_labels, [0], [255], default_vals) + U_labels = ["Upper Hue", "Upper Saturation", "Upper Value"] + default_vals = [255, 50, 255] + default_vals = get_from_prior(U_labels, default_vals) + # upper HSV slider + u_h, u_s, u_v = input_gen(h.slider, U_labels, [0], [255], default_vals) + # Coordinate input widget + with tabs[1]: + x, y = st.columns(2) + x_labels = ["Bottom Left x", "Top Left x", + "Bottom Right x", "Top Right x"] + default_vals = [12, 66, 635, 595] + default_vals = get_from_prior(x_labels, default_vals) + bl_x, tl_x, br_x, tr_x = input_gen( + x.number_input, x_labels, [0], [640], default_vals) + y_labels = ["Bottom Left y", "Top Left y", + "Bottom Right y", "Top Right y"] + default_vals = [355, 304, 344, 308] + default_vals = get_from_prior(y_labels, default_vals) + bl_y, tl_y, br_y, tr_y = input_gen( + y.number_input, y_labels, [0], [480], default_vals) + + # This is souced from LaneDetection + bl = (bl_x, bl_y) + tl = (tl_x, tl_y) + br = (br_x, br_y) + tr = (tr_x, tr_y) + pts1 = np.float32([tl, bl, tr, br]) + pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]]) + # Matrix to warp the image for birdseye window + + # This is what we're really looking for, and what we might want to consider returning as a custom ros message... + # https://github.com/Box-Robotics/ros2_numpy + matrix = cv2.getPerspectiveTransform(pts1, pts2) + lower = np.array([l_h, l_s, l_v]) + upper = np.array([u_h, u_s, u_v]) + + img = None + if ("sub_node" in st.session_state): + img = st.session_state["sub_node"].reference_img + + if img is not None: + cols = st.columns(3) + cols[0].image(img, "Left reference image for transformation") + transformed_left = cv2.warpPerspective(img, matrix, (640, 480)) + hsv_transformed_left = cv2.cvtColor( + transformed_left, cv2.COLOR_BGR2HSV) + mask = cv2.inRange(hsv_transformed_left, lower, upper) + cols[1].image(transformed_left, "Left Birds Eye View Preview") + cols[2].image(mask, "Left Mask Preview") + +# TODO: Make this button publish the custom ROS2 Message :) + st.button("Publish!", on_click=demo_publish) + #TODO: Make this ubtton do soemthign + st.button("RESET TO DEFAULT") + # See ../../ros2/src/lanefollowingtest/LaneDetection.py @@ -100,40 +207,10 @@ def demo_publish(): page_title="Lane Detection", page_icon="🛣") time.sleep(0.2) - - render = st.checkbox("Render Video Feed") st.write( - "This should render all of the images related to Lane Detection, and relevant parameters.") - tabs = st.tabs(["Original", "Transformed", "Sliding Windows"]) - - # This hunk initializes the ROS2 nodes without breaking anything :) - # Should not need to be tuoched - if "sub_node" not in st.session_state: - try: - rclpy.init() - except RuntimeError: - st.warning( - "something went wrong performance may be degraded. Try restarting fully.") - finally: - handler = Image_Handler() - handler.tabs(tabs) - st.session_state["sub_node"] = handler - - if "pub_node" not in st.session_state: - st.session_state["pub_node"] = Publisher() - - lane_state = st.empty() - -# This should also not need to be modified + "This page is designed to control the lane detection functionality, and allow for quick edits to the algorithm.") + render = st.checkbox("Render Video Feed") if render: - while (True): - # try: - with lane_state: - rclpy.spin_once(st.session_state['sub_node']) - # time.sleep(0.01) - # except: - # st.warning( - # "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") - # break + render_handler() else: - st.button("Ros Topic Publisher Demo!", on_click=demo_publish) + input_handler() diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py index c86299d0b..179afe397 100644 --- a/ros2/src/lanefollowingtest/LaneDetection.py +++ b/ros2/src/lanefollowingtest/LaneDetection.py @@ -175,9 +175,8 @@ def __init__(self): def img_publish(self, label, img_raw): if (self.GUI): - new_img = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB) self._publishers[label].publish( - self._bridge.cv2_to_imgmsg(new_img, encoding="passthrough")) + self._bridge.cv2_to_imgmsg(img_raw, encoding="passthrough")) def measure_position_meters(self, left, right): left_x_pos = 0 From 0986c5a634d7e33c882e90b606f1db2c674ac1df Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 18 Apr 2024 12:47:43 -0400 Subject: [PATCH 16/20] Finished up the UI barring publisher, added daniels code --- ros2/UI/Homepage.py | 13 +- ros2/UI/pages/Lane_Detection.py | 251 +++++++++++++++--------------- ros2/UI/pages/Object_Detection.py | 4 +- ros2/UI/ui_generics.py | 7 + 4 files changed, 143 insertions(+), 132 deletions(-) diff --git a/ros2/UI/Homepage.py b/ros2/UI/Homepage.py index 3e463a50c..372f33ef7 100644 --- a/ros2/UI/Homepage.py +++ b/ros2/UI/Homepage.py @@ -2,8 +2,9 @@ from streamlit import runtime from streamlit.web import cli as stcli import sys +from ui_generics import * - +@st.cache_data # def runner(): st.header("Welcome to the IGVC Homepage!") st.markdown(''' @@ -21,7 +22,7 @@ def runner(): #1. We need a way to render all of the openCV data we have: This is stuff like the lane or object detection. - #2. We need a way to tune parameters in an easy way, without refreshing every single page for small little tweaks. + #2. We need a way to tune parameters in an easy way, without refreshing every single page for small little tweaks. #3. We need to be able to control the state machines on the car: Each of the function tests is effectively it's own state machine. We need to be able to control which test we're in, and visualize any important data in a direct manner. @@ -48,17 +49,13 @@ def runner(): Debugging systems, sensor messages (E.g IMU, CAN) should all live here. ''') - st.warning("FIRST PRIORITY: https://docs.streamlit.io/develop/api-reference/widgets/st.page_link CHANGE THE PAGES SO NO MORE EMOJIS") # This allows the system to run without being in a dedicated streamlit session: if __name__ == "__main__": if runtime.exists(): - st.set_page_config( - page_title="IGVC Homepage", - page_icon="🚗") - st.sidebar.success("Select a System above.") + sidebar() runner() else: - sys.argv = ["streamlit", "run", sys.argv[0]] + sys.argv = ["streamlit", "run", "--client.showSidebarNavigation=False" ,sys.argv[0]] sys.exit(stcli.main()) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index d8aa77678..86c8b9d6b 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -8,6 +8,8 @@ import time from std_msgs.msg import String import numpy as np +from ui_generics import * + # This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection. @@ -15,24 +17,26 @@ class Image_Handler(Node): def __init__(self, containers): self.reference_img = None - self.topics = ["/raw_left", "/raw_right", "/tf_left", + self.topics = ["/camera/image", "/raw_right", "/tf_left", "/tf_right", "/sliding_left", "/sliding_right"] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() + self.containers = containers self.lane_state_pub = self.create_subscription( String, "lane_state", self.lane_state_callback, qos_profile_sensor_data) + self._left_subscriber = self.create_subscription( - Image, self.topics[0], lambda msg: self.camData_callback(msg, containers[0], img_to_repl="THIS"), qos_profile_sensor_data) + Image, self.topics[0], lambda msg: self.camData_callback(msg, self.containers[0], img_to_repl="THIS"), qos_profile_sensor_data) self._right_subscriber = self.create_subscription( - Image, self.topics[1], lambda msg: self.camData_callback(msg, containers[1]), qos_profile_sensor_data) + Image, self.topics[1], lambda msg: self.camData_callback(msg, self.containers[1]), qos_profile_sensor_data) self._tf_left_subscriber = self.create_subscription( - Image, self.topics[2], lambda msg: self.camData_callback(msg, containers[2]), qos_profile_sensor_data) + Image, self.topics[2], lambda msg: self.camData_callback(msg, self.containers[2]), qos_profile_sensor_data) self._tf_right_subscriber = self.create_subscription( - Image, self.topics[3], lambda msg: self.camData_callback(msg, containers[3]), qos_profile_sensor_data) + Image, self.topics[3], lambda msg: self.camData_callback(msg, self.containers[3]), qos_profile_sensor_data) self._sliding_left_subscriber = self.create_subscription( - Image, self.topics[4], lambda msg: self.camData_callback(msg, containers[4]), qos_profile_sensor_data) + Image, self.topics[4], lambda msg: self.camData_callback(msg, self.containers[4]), qos_profile_sensor_data) self._sliding_right_subscriber = self.create_subscription( - Image, self.topics[5], lambda msg: self.camData_callback(msg, containers[5]), qos_profile_sensor_data) + Image, self.topics[5], lambda msg: self.camData_callback(msg, self.containers[5]), qos_profile_sensor_data) # self._frame_containers[] is a container corresponding to one of the tabs: You can create @@ -58,45 +62,40 @@ def __init__(self): String, '/streamlit/button', 10) -def render_handler(): - tabs = st.tabs( - ["Original", "Birds Eye View", "Sliding Windows"]) - containers = [] - frame_containers = [] - for tab in tabs: - cols = tab.columns(2) - containers.append(cols[0]) - containers.append(cols[1]) - with cols[0]: - frame_containers.append(st.empty()) - with cols[1]: - frame_containers.append(st.empty()) - lane_state = st.empty() - # This hunk initializes the ROS2 nodes without breaking anything :) - # Should not need to be tuoched - if "sub_node" not in st.session_state and 'pub_node' not in st.session_state: - try: - rclpy.init() - except: - st.warning("Something Wrong With the Generator, try restarting?") - elif "sub_node" not in st.session_state: - handler = Image_Handler(frame_containers) - st.session_state["sub_node"] = handler - while (True): - with lane_state: - try: - rclpy.spin_once(st.session_state['sub_node']) +def render_handler(context): + rclpy.try_shutdown() + rclpy.init() + context.empty() + with context: + tabs = st.tabs( + ["Original", "Birds Eye View", "Sliding Windows"]) + containers = [] + frame_containers = [] + for tab in tabs: + cols = tab.columns(2) + containers.append(cols[0]) + containers.append(cols[1]) + with cols[0]: + frame_containers.append(st.empty()) + with cols[1]: + frame_containers.append(st.empty()) + # This hunk initializes the ROS2 nodes without breaking anything :) + # Should not need to be tuoched + st.write(len(frame_containers)) + handler = get_publisher(frame_containers) + while (True): + # try: + rclpy.spin_once(handler) time.sleep(.01) - except: - st.warning( - "Something went wrong, perhaps tabs were clicked too quickly? Try restarting.") - break - - + # except: + # with st.empty(): + # st.warning("Something went wrong, unrender & rerender, close all other tabs.") + # break def demo_publish(): + publisher = get_publisher() msg = String() msg.data = "Streamlit Button!" - st.session_state["pub_node"].button_pub.publish(msg) + publisher.button_pub.publish(msg) st.success("Message Published!", icon="✅") @@ -120,83 +119,86 @@ def input_gen(func, labels, lowers, uppers, vals): return func_list -def input_handler(): - # handles all output - # We use get_from_prior to ensure that, even if the render checkbox was flipped, data persists - # Input gen allows me to streamline generating a lot of objects by making it super duper quick - if "pub_node" not in st.session_state and "sub_node" not in st.session_state: - try: - rclpy.init() - except: - st.warning("Something Wrong With the Generator, try restarting?") - elif "pub_node" not in st.session_state: - st.session_state["pub_node"] = Publisher() - - tabs = st.tabs(["HSV Tweakers", "Coordinates", "Misc"]) - # HSV user input - with tabs[0]: - l, h = st.columns(2) - # lower HSV slider - L_labels = ["Lower Hue", "Lower Saturation", "Lower Value"] - default_vals = [0, 0, 200] - default_vals = get_from_prior(L_labels, default_vals) - - l_h, l_s, l_v = input_gen(l.slider, L_labels, [0], [255], default_vals) - U_labels = ["Upper Hue", "Upper Saturation", "Upper Value"] - default_vals = [255, 50, 255] - default_vals = get_from_prior(U_labels, default_vals) - # upper HSV slider - u_h, u_s, u_v = input_gen(h.slider, U_labels, [0], [255], default_vals) - # Coordinate input widget - with tabs[1]: - x, y = st.columns(2) - x_labels = ["Bottom Left x", "Top Left x", - "Bottom Right x", "Top Right x"] - default_vals = [12, 66, 635, 595] - default_vals = get_from_prior(x_labels, default_vals) - bl_x, tl_x, br_x, tr_x = input_gen( - x.number_input, x_labels, [0], [640], default_vals) - y_labels = ["Bottom Left y", "Top Left y", - "Bottom Right y", "Top Right y"] - default_vals = [355, 304, 344, 308] - default_vals = get_from_prior(y_labels, default_vals) - bl_y, tl_y, br_y, tr_y = input_gen( - y.number_input, y_labels, [0], [480], default_vals) - - # This is souced from LaneDetection - bl = (bl_x, bl_y) - tl = (tl_x, tl_y) - br = (br_x, br_y) - tr = (tr_x, tr_y) - pts1 = np.float32([tl, bl, tr, br]) - pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]]) - # Matrix to warp the image for birdseye window - - # This is what we're really looking for, and what we might want to consider returning as a custom ros message... - # https://github.com/Box-Robotics/ros2_numpy - matrix = cv2.getPerspectiveTransform(pts1, pts2) - lower = np.array([l_h, l_s, l_v]) - upper = np.array([u_h, u_s, u_v]) - - img = None - if ("sub_node" in st.session_state): - img = st.session_state["sub_node"].reference_img - - if img is not None: - cols = st.columns(3) - cols[0].image(img, "Left reference image for transformation") - transformed_left = cv2.warpPerspective(img, matrix, (640, 480)) - hsv_transformed_left = cv2.cvtColor( - transformed_left, cv2.COLOR_BGR2HSV) - mask = cv2.inRange(hsv_transformed_left, lower, upper) - cols[1].image(transformed_left, "Left Birds Eye View Preview") - cols[2].image(mask, "Left Mask Preview") - -# TODO: Make this button publish the custom ROS2 Message :) - st.button("Publish!", on_click=demo_publish) - #TODO: Make this ubtton do soemthign - st.button("RESET TO DEFAULT") - +def input_handler(context): + rclpy.try_shutdown() + rclpy.init() + with context: + # handles all output + # We use get_from_prior to ensure that, even if the render checkbox was flipped, data persists + # Input gen allows me to streamline generating a lot of objects by making it super duper quick + tabs = st.tabs(["HSV Tweakers", "Coordinates", "Misc"]) + # HSV user input + with tabs[0]: + l, h = st.columns(2) + # lower HSV slider + L_labels = ["Lower Hue", "Lower Saturation", "Lower Value"] + default_vals = [0, 0, 200] + default_vals = get_from_prior(L_labels, default_vals) + + l_h, l_s, l_v = input_gen(l.slider, L_labels, [0], [255], default_vals) + U_labels = ["Upper Hue", "Upper Saturation", "Upper Value"] + default_vals = [255, 50, 255] + default_vals = get_from_prior(U_labels, default_vals) + # upper HSV slider + u_h, u_s, u_v = input_gen(h.slider, U_labels, [0], [255], default_vals) + # Coordinate input widget + with tabs[1]: + x, y = st.columns(2) + x_labels = ["Bottom Left x", "Top Left x", + "Bottom Right x", "Top Right x"] + default_vals = [12, 66, 635, 595] + default_vals = get_from_prior(x_labels, default_vals) + bl_x, tl_x, br_x, tr_x = input_gen( + x.number_input, x_labels, [0], [640], default_vals) + y_labels = ["Bottom Left y", "Top Left y", + "Bottom Right y", "Top Right y"] + default_vals = [355, 304, 344, 308] + default_vals = get_from_prior(y_labels, default_vals) + bl_y, tl_y, br_y, tr_y = input_gen( + y.number_input, y_labels, [0], [480], default_vals) + + # This is souced from LaneDetection + bl = (bl_x, bl_y) + tl = (tl_x, tl_y) + br = (br_x, br_y) + tr = (tr_x, tr_y) + pts1 = np.float32([tl, bl, tr, br]) + pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]]) + # Matrix to warp the image for birdseye window + + # This is what we're really looking for, and what we might want to consider returning as a custom ros message... + # https://github.com/Box-Robotics/ros2_numpy + matrix = cv2.getPerspectiveTransform(pts1, pts2) + lower = np.array([l_h, l_s, l_v]) + upper = np.array([u_h, u_s, u_v]) + + pub = get_publisher(None) + img = pub.reference_img + + if img is not None: + cols = st.columns(3) + cols[0].image(img, "Left reference image for transformation") + transformed_left = cv2.warpPerspective(img, matrix, (640, 480)) + hsv_transformed_left = cv2.cvtColor( + transformed_left, cv2.COLOR_BGR2HSV) + mask = cv2.inRange(hsv_transformed_left, lower, upper) + cols[1].image(transformed_left, "Left Birds Eye View Preview") + cols[2].image(mask, "Left Mask Preview") + # TODO: Make this button publish the custom ROS2 Message :) + st.button("Publish!", on_click=demo_publish) + #TODO: Make this ubtton do soemthign + st.button("RESET TO DEFAULT") + +@st.cache_resource +def get_publisher(_tabs): + handler = Image_Handler(_tabs) + return handler + +@st.cache_resource +def get_subscriber(): + handler = Publisher() + return handler + # See ../../ros2/src/lanefollowingtest/LaneDetection.py @@ -206,11 +208,14 @@ def input_handler(): st.set_page_config( page_title="Lane Detection", page_icon="🛣") - time.sleep(0.2) + sidebar() + + time.sleep(0.1) st.write( "This page is designed to control the lane detection functionality, and allow for quick edits to the algorithm.") - render = st.checkbox("Render Video Feed") + render = st.checkbox("Render Video Feed",value=True) + display_holder = st.container() if render: - render_handler() + render_handler(display_holder) else: - input_handler() + input_handler(display_holder) diff --git a/ros2/UI/pages/Object_Detection.py b/ros2/UI/pages/Object_Detection.py index 919c0b9c7..0fb9f2289 100644 --- a/ros2/UI/pages/Object_Detection.py +++ b/ros2/UI/pages/Object_Detection.py @@ -1,5 +1,5 @@ import streamlit as st - +from ui_generics import * def image_source_renderer(image): pass @@ -12,4 +12,6 @@ def image_source_renderer(image): st.set_page_config( page_title="Object Detection", page_icon="🛑") + sidebar() + diff --git a/ros2/UI/ui_generics.py b/ros2/UI/ui_generics.py index 139597f9c..96f39283b 100644 --- a/ros2/UI/ui_generics.py +++ b/ros2/UI/ui_generics.py @@ -1,2 +1,9 @@ +import streamlit as st +def sidebar(): + with st.sidebar: + st.page_link("Homepage.py", label=":derelict_house_building: Homepage", icon=None, help="Opens Homepage", disabled=False, use_container_width=True) + st.page_link("pages/Lane_Detection.py", label=":motorway: Lane detection", icon=None, help="Opens Lane detection", disabled=False, use_container_width=True) + st.page_link("pages/Object_Detection.py", label=":octagonal_sign: Object detection", icon=None, help="Opens Object detection", disabled=False, use_container_width=True) + From ed9cf28a61c61179f978dc2d6e1caaf8c5a6160a Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 18 Apr 2024 12:48:35 -0400 Subject: [PATCH 17/20] Replaced some testing code --- ros2/UI/pages/Lane_Detection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index 86c8b9d6b..b2a11d3ac 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -17,7 +17,7 @@ class Image_Handler(Node): def __init__(self, containers): self.reference_img = None - self.topics = ["/camera/image", "/raw_right", "/tf_left", + self.topics = ["/raw_left", "/raw_right", "/tf_left", "/tf_right", "/sliding_left", "/sliding_right"] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() From cccf83b59257e434e850426198246b414321be26 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 18 Apr 2024 15:29:39 -0400 Subject: [PATCH 18/20] Removed some more testing code --- ros2/UI/pages/Lane_Detection.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index b2a11d3ac..0d33b3259 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -17,7 +17,7 @@ class Image_Handler(Node): def __init__(self, containers): self.reference_img = None - self.topics = ["/raw_left", "/raw_right", "/tf_left", + self.topics = ["/camera/image", "/raw_right", "/tf_left", "/tf_right", "/sliding_left", "/sliding_right"] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() @@ -81,7 +81,6 @@ def render_handler(context): frame_containers.append(st.empty()) # This hunk initializes the ROS2 nodes without breaking anything :) # Should not need to be tuoched - st.write(len(frame_containers)) handler = get_publisher(frame_containers) while (True): # try: @@ -186,7 +185,7 @@ def input_handler(context): cols[2].image(mask, "Left Mask Preview") # TODO: Make this button publish the custom ROS2 Message :) st.button("Publish!", on_click=demo_publish) - #TODO: Make this ubtton do soemthign + #TODO: Make this button do soemthing (Read and write to file) st.button("RESET TO DEFAULT") @st.cache_resource From f7e4d121ae48b1a5f9f675cdab924ed190214235 Mon Sep 17 00:00:00 2001 From: "Vaibhav.hariani@gmail.com" Date: Thu, 18 Apr 2024 16:35:04 -0400 Subject: [PATCH 19/20] something changed & I can't tell you what it is --- ros2/UI/pages/Lane_Detection.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index 0d33b3259..dfdd4dd0b 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -17,7 +17,7 @@ class Image_Handler(Node): def __init__(self, containers): self.reference_img = None - self.topics = ["/camera/image", "/raw_right", "/tf_left", + self.topics = ["raw_left", "/raw_right", "/tf_left", "/tf_right", "/sliding_left", "/sliding_right"] super().__init__('Streamlit_Image_Handler') self._bridge = CvBridge() @@ -83,13 +83,9 @@ def render_handler(context): # Should not need to be tuoched handler = get_publisher(frame_containers) while (True): - # try: - rclpy.spin_once(handler) - time.sleep(.01) - # except: - # with st.empty(): - # st.warning("Something went wrong, unrender & rerender, close all other tabs.") - # break + rclpy.spin_once(handler) + time.sleep(.01) + def demo_publish(): publisher = get_publisher() msg = String() From a7f2d9a63b065ddf5d31fe71445d08bdc1375b88 Mon Sep 17 00:00:00 2001 From: Vaibhav Hariani Date: Thu, 18 Apr 2024 17:00:19 -0400 Subject: [PATCH 20/20] "Typos & Some running issues" --- ros2/UI/pages/Lane_Detection.py | 30 ++++++++++++++++-------------- ros2/UI/ui_generics.py | 4 ++-- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py index dfdd4dd0b..07cdbb3b9 100644 --- a/ros2/UI/pages/Lane_Detection.py +++ b/ros2/UI/pages/Lane_Detection.py @@ -50,7 +50,9 @@ def camData_callback(self, msg_in, img_location, img_to_repl=None): st.image(img) def lane_state_callback(self, msg): - st.write(msg.data) + pass + # container = st.empty() + # container.write(msg.data) # This class is your publisher: Flesh it out and integrate accordingly @@ -84,7 +86,6 @@ def render_handler(context): handler = get_publisher(frame_containers) while (True): rclpy.spin_once(handler) - time.sleep(.01) def demo_publish(): publisher = get_publisher() @@ -96,11 +97,11 @@ def demo_publish(): def get_from_prior(labels, defaults): defaults = np.resize(defaults, len(labels)) - for i in range(len(labels)): - if labels[i] not in st.session_state: - st.session_state[labels[i]] = defaults[i] - else: - defaults[i] = st.session_state[labels[i]] + # for i in range(len(labels)): + # if labels[i] not in st.session_state: + # st.session_state[labels[i]] = defaults[i] + # else: + # defaults[i] = st.session_state[labels[i]] return defaults @@ -113,6 +114,11 @@ def input_gen(func, labels, lowers, uppers, vals): labels[i], lowers[i], uppers[i], vals[i])) return func_list +def clear_session_state(*labels): + for labelset in labels: + for label in labelset: + del st.session_state[label] + def input_handler(context): rclpy.try_shutdown() @@ -129,7 +135,6 @@ def input_handler(context): L_labels = ["Lower Hue", "Lower Saturation", "Lower Value"] default_vals = [0, 0, 200] default_vals = get_from_prior(L_labels, default_vals) - l_h, l_s, l_v = input_gen(l.slider, L_labels, [0], [255], default_vals) U_labels = ["Upper Hue", "Upper Saturation", "Upper Value"] default_vals = [255, 50, 255] @@ -144,13 +149,13 @@ def input_handler(context): default_vals = [12, 66, 635, 595] default_vals = get_from_prior(x_labels, default_vals) bl_x, tl_x, br_x, tr_x = input_gen( - x.number_input, x_labels, [0], [640], default_vals) + x.slider, x_labels, [0], [640], default_vals) y_labels = ["Bottom Left y", "Top Left y", "Bottom Right y", "Top Right y"] default_vals = [355, 304, 344, 308] default_vals = get_from_prior(y_labels, default_vals) bl_y, tl_y, br_y, tr_y = input_gen( - y.number_input, y_labels, [0], [480], default_vals) + y.slider, y_labels, [0], [480], default_vals) # This is souced from LaneDetection bl = (bl_x, bl_y) @@ -180,9 +185,8 @@ def input_handler(context): cols[1].image(transformed_left, "Left Birds Eye View Preview") cols[2].image(mask, "Left Mask Preview") # TODO: Make this button publish the custom ROS2 Message :) + #TODO: Make this button do soemthing (Read and write to file) st.button("Publish!", on_click=demo_publish) - #TODO: Make this button do soemthing (Read and write to file) - st.button("RESET TO DEFAULT") @st.cache_resource def get_publisher(_tabs): @@ -204,8 +208,6 @@ def get_subscriber(): page_title="Lane Detection", page_icon="🛣") sidebar() - - time.sleep(0.1) st.write( "This page is designed to control the lane detection functionality, and allow for quick edits to the algorithm.") render = st.checkbox("Render Video Feed",value=True) diff --git a/ros2/UI/ui_generics.py b/ros2/UI/ui_generics.py index 96f39283b..9c5e7d1bd 100644 --- a/ros2/UI/ui_generics.py +++ b/ros2/UI/ui_generics.py @@ -4,6 +4,6 @@ def sidebar(): with st.sidebar: st.page_link("Homepage.py", label=":derelict_house_building: Homepage", icon=None, help="Opens Homepage", disabled=False, use_container_width=True) - st.page_link("pages/Lane_Detection.py", label=":motorway: Lane detection", icon=None, help="Opens Lane detection", disabled=False, use_container_width=True) - st.page_link("pages/Object_Detection.py", label=":octagonal_sign: Object detection", icon=None, help="Opens Object detection", disabled=False, use_container_width=True) + st.page_link("pages/Lane_Detection.py", label=":motorway: Lane Detection", icon=None, help="Opens Lane detection", disabled=False, use_container_width=True) + st.page_link("pages/Object_Detection.py", label=":octagonal_sign: Object Detection", icon=None, help="Opens Object detection", disabled=False, use_container_width=True)