-
Notifications
You must be signed in to change notification settings - Fork 1
/
Car_Finding_Lane_Lines.py
165 lines (139 loc) · 6.41 KB
/
Car_Finding_Lane_Lines.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
from collections import deque
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import *
import numpy as np
def convert_hls(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
def select_white_yellow(image):
converted = convert_hls(image)
# white color mask
lower = np.uint8([ 0, 200, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(converted, lower, upper)
# yellow color mask
lower = np.uint8([ 10, 0, 100])
upper = np.uint8([ 40, 255, 255])
yellow_mask = cv2.inRange(converted, lower, upper)
# combine the mask
mask = cv2.bitwise_or(white_mask, yellow_mask)
return cv2.bitwise_and(image, image, mask = mask)
def convert_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def apply_smoothing(image, kernel_size=15):
# Kernel_size must be postivie and odd
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
def detect_edges(image, low_threshold=50, high_threshold=150):
return cv2.Canny(image, low_threshold, high_threshold)
def select_region(image):
# first, define the polygon by vertices
# image.shape[0]: y, image.shape[1]: x
rows, cols = image.shape[:2]
bottom_left = [cols*0.1, rows*0.95]
top_left = [cols*0.4, rows*0.6]
bottom_right = [cols*0.9, rows*0.95]
top_right = [cols*0.6, rows*0.6]
# the vertices are an array of polygons (i.e array of arrays) and the data type must be integer
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
def hough_lines(image):
# Returns hough lines (not the image with lines)
return cv2.HoughLinesP(image, rho=1, theta=np.pi/180, threshold=20, minLineLength=20, maxLineGap=300)
def average_slope_intercept(lines):
left_lines = [] # (slope, intercept)
left_weights = [] # (length,)
right_lines = [] # (slope, intercept)
right_weights = [] # (length,)
for line in lines:
for x1, y1, x2, y2 in line:
if x2==x1:
continue # ignore a vertical line
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope*x1
length = np.sqrt((y2-y1)**2+(x2-x1)**2)
if slope < 0: # y is reversed in image
left_lines.append((slope, intercept))
left_weights.append((length))
else:
right_lines.append((slope, intercept))
right_weights.append((length))
# add more weight to longer lines
left_lane = np.dot(left_weights, left_lines) /np.sum(left_weights) if len(left_weights) >0 else None
right_lane = np.dot(right_weights, right_lines)/np.sum(right_weights) if len(right_weights)>0 else None
return left_lane, right_lane # (slope, intercept), (slope, intercept)
def make_line_points(y1, y2, line):
# Convert a line represented in slope and intercept into pixel point
if line is None:
return None
slope, intercept = line
# make sure everything is integer as cv2.line requires it
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
def lane_lines(image, lines):
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0] # bottom of the image
y2 = y1*0.6 # slightly lower than the middle
left_line = make_line_points(y1, y2, left_lane)
right_line = make_line_points(y1, y2, right_lane)
return left_line, right_line
def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=20):
# make a separate image to draw lines and combine with the orignal later
line_image = np.zeros_like(image)
for line in lines:
if line is not None:
cv2.line(line_image, *line, color, thickness)
# image1 * α + image2 * β + λ
# image1 and image2 must be the same shape.
return cv2.addWeighted(image, 1.0, line_image, 0.95, 0.0)
QUEUE_LENGTH=50
class LaneDetector:
def __init__(self):
self.left_lines = deque(maxlen=QUEUE_LENGTH)
self.right_lines = deque(maxlen=QUEUE_LENGTH)
def process(self, image):
white_yellow = select_white_yellow(image)
gray = convert_gray_scale(white_yellow)
smooth_gray = apply_smoothing(gray)
edges = detect_edges(smooth_gray)
regions = select_region(edges)
lines = hough_lines(regions)
left_line, right_line = lane_lines(image, lines)
def mean_line(line, lines):
if line is not None:
lines.append(line)
if len(lines)>0:
# Convert array to tuples
line = np.mean(lines, axis=0, dtype=np.int32)
# make sure it's tuples not numpy array for cv2.line to work
line = tuple(map(tuple, line))
return line
left_line = mean_line(left_line, self.left_lines)
right_line = mean_line(right_line, self.right_lines)
return draw_lane_lines(image, (left_line, right_line))
# def process_video(test_video, video_input, video_output):
# detector = LaneDetector()
# clip = VideoFileClip(os.path.join(test_videos, video_input))
# processed = clip.fl_image(detector.process)
# processed.write_videofile(os.path.join('output_videos', video_output), audio=False)
# def process_video(video_input, video_output):
# detector = LaneDetector()
# white_output = 'test_videos_output/' + video_output
# clip1 = VideoFileClip("test_videos/" + video_input)
# white_clip = clip1.fl_image(detector.process)
# white_clip.write_videofile(white_output, audio=False)
def process_video(video_input, video_output):
detector = LaneDetector()
clip = VideoFileClip(os.path.join('test_videos', video_input))
# Modify a clip as you want using custom filters
# clip.fl_image(filter) with filter is a function serve for VideoFileClip Object
processed = clip.fl_image(detector.process)
# Returns a copy of the clip with a new default fps and filter the audio
processed.write_videofile(os.path.join('output_videos', video_output), audio=False)
if __name__ == "__main__":
process_video('Resource/challenge_input1.mp4', 'Resource/challenge_output1.mp4')
process_video('Resource/challenge_input2.mp4', 'Resource/challenge_output2.mp4')
process_video('Resource/challenge_input3.mp4', 'Resource/challenge_output3.mp4')