Skip to content

lane following with split cameras fully implemented #180

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 34 additions & 20 deletions ros2/src/lanefollowingtest/LaneDetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,18 @@

# Inputs from both cameras
vidcap_left = cv2.VideoCapture("/dev/video0")
vidcap_right = cv2.VideoCapture("/dev/video2")

vidcap_left.set(3,640)
vidcap_left.set(4,480)
vidcap_right = cv2.VideoCapture("/dev/video4")
vidcap_right.set(3,640)
vidcap_right.set(4,480)

def nothing(x):
pass


class Individual_Follower():

def __init__(self):
self._fit = None
self._binary_warped = None
Expand Down Expand Up @@ -71,7 +75,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6):
current = np.int32(np.mean(nonzerox[good_inds]))

# Concatenate the arrays of indices
if (lane_inds.size() > 0):
if (len(lane_inds) > 0):
lane_inds = np.concatenate(lane_inds)

# Extract line pixel positions
Expand Down Expand Up @@ -181,25 +185,33 @@ def measure_position_meters(self):
# The car is assumed to be placed in the center of the picture
# If the deviation is negative, the car is on the felt hand side of the center of the lane
veh_pos = (
(self._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix
(self._left_follower._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix
return veh_pos / 100

def timer_callback(self):
# TODO: Move these out into a separate calibrator class that saves values to a .csv
# THESE SHOULD NOT BE GENERATE ON EVERY TIMER CALLBACK, BAD DESIGN (ON MY PART)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 200, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - S", "Trackbars", 50, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
# cv2.namedWindow("Trackbars")
# cv2.createTrackbar("L - H", "Trackbars", 0, 255, nothing)
# cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
# cv2.createTrackbar("L - V", "Trackbars", 200, 255, nothing)
# cv2.createTrackbar("U - H", "Trackbars", 255, 255, nothing)
# cv2.createTrackbar("U - S", "Trackbars", 50, 255, nothing)
# cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
# l_h = cv2.getTrackbarPos("L - H", "Trackbars")
# l_s = cv2.getTrackbarPos("L - S", "Trackbars")
# l_v = cv2.getTrackbarPos("L - V", "Trackbars")
# u_h = cv2.getTrackbarPos("U - H", "Trackbars")
# u_s = cv2.getTrackbarPos("U - S", "Trackbars")
# u_v = cv2.getTrackbarPos("U - V", "Trackbars")
l_h = 0
l_s = 0
l_v = 200

u_h = 255
u_s = 50
u_v = 255

lower = np.array([l_h, l_s, l_v])
upper = np.array([u_h, u_s, u_v])
# ## Choosing points for perspective transformation
Expand All @@ -226,7 +238,8 @@ def timer_callback(self):
if not(success_l and success_r):
return
for image in images:
frame = cv2.resize(image[0], (640, 480))
frame = image[0]
# frame = cv2.resize(image[0], (640, 480))
# I might've cooked with this list comprehension
(cv2.circle(frame,point,5,(0,0,255),-1) for point in (bl,tl,br,tr))
transformed_frame = cv2.warpPerspective(
Expand All @@ -240,6 +253,8 @@ def timer_callback(self):
self._left_follower.set_binwarp(binwarp=mask)
else:
self._right_follower.set_binwarp(binwarp=mask)
cv2.imshow("Original" + image[1], frame)
cv2.imshow("Bird's Eye View" + image[1], transformed_frame)

result_left = self._left_follower.Plot_Line()
result_right = self._right_follower.Plot_Line()
Expand All @@ -254,8 +269,7 @@ def timer_callback(self):
msg_out = Float64()
msg_out.data = pos
self.camData_publisher.publish(msg_out) # Publish the error
cv2.imshow("Original", frame)
cv2.imshow("Bird's Eye View", transformed_frame)

if cv2.waitKey(10) == 27:
return

Expand Down
24 changes: 24 additions & 0 deletions ros2/src/lanefollowingtest/camera_sanity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import cv2
import time


vidcap_right = cv2.VideoCapture("/dev/video1")

vidcap_left = cv2.VideoCapture("/dev/video3")

while True:
# time.sleep(.007)
right = vidcap_right.read()
if(right[0]):
cv2.imshow("right",right[1])
# time.sleep(.007)


left = vidcap_left.read()
if(left[0]):
cv2.imshow("left", left[1])



if cv2.waitKey(10) == 27:
break
13 changes: 13 additions & 0 deletions ros2/src/lanefollowingtest/camera_sanity_alt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import cv2

vidcap_left = cv2.VideoCapture("/dev/video3")
vidcap_left.set(3,640)
vidcap_left.set(4,480)


while True:
left = vidcap_left.read()
if(left[0]):
cv2.imshow("left", left[1])
if cv2.waitKey(10) == 27:
break
43 changes: 43 additions & 0 deletions ros2/src/lanefollowingtest/copypaste.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import cv2
import threading

class camThread(threading.Thread):
def __init__(self, previewName, cam_label):
threading.Thread.__init__(self)
self.previewName = previewName
self.cam_label = cam_label
def run(self):
print ("Starting " + self.previewName)
camPreview(self.previewName, self.cam_label)

def camPreview(previewName, cam_label):
cv2.namedWindow(previewName)
cam = cv2.VideoCapture(cam_label)
if cam.isOpened(): # try to get the first frame
rval, frame = cam.read()
else:
rval = False
while True:
cam.set(3,640)
cam.set(4,480)
frame = cam.read()
if(frame[0]):
cv2.imshow(previewName, frame[1])
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break


# rval, frame = cam.read()
# if(rval):
# cv2.imshow(previewName, frame)
# key = cv2.waitKey(20)
# cam.release()

cv2.destroyWindow(previewName)

# Create two threads as follows
thread1 = camThread("Camera 1", "/dev/video1")
thread2 = camThread("Camera 2", "/dev/video3")
thread1.start()
thread2.start()