Skip to content

Commit

Permalink
removed video capture from calibration method and replaced with code …
Browse files Browse the repository at this point in the history
…to get images from folder
  • Loading branch information
gjcho committed Nov 10, 2023
1 parent 936fb91 commit ece078f
Show file tree
Hide file tree
Showing 2 changed files with 189 additions and 18 deletions.
38 changes: 20 additions & 18 deletions calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,11 @@ def __init__(self):
self.images = []
self.objpoints = [] # 3d point in real world space
self.imgpoints = [] # 2d points in image plane.
self.images_dir = 'pathname'
self.images_format = '.jpg'
self.square_size = 0.0
self.width = 0
self.height = 0

def load_images(self, directory):
img_dir = directory# Enter Directory of all images
Expand All @@ -21,7 +26,7 @@ def load_images(self, directory):
img = cv2.imread(f1)
self.images.append(img)

def calibrate_camera(self, square_size, width, height):
def calibrate_chessboard(self, square_size, width, height):
chessboard_images = self.images
# use at least 10 images of the chessboard at different angles
# square_size: the size of each square of the actual chessboard in cm
Expand All @@ -34,47 +39,44 @@ def calibrate_camera(self, square_size, width, height):
objp = objp * square_size

# arrays from object points and image points from all images
self.objpoints = [] # 3d points in the real world space
self.imgpoints = [] # 2d points in the image plane

cap = cv2.VideoCapture(0)
found = 0
while(found < 2): # Here, 10 can be changed to whatever number you like to choose
ret, img = cap.read() # Capture frame-by-frame
#Iterate through all images
for fname in chessboard_images:
img = cv2.imread(str(fname))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
ret, corners = cv2.findChessboardCorners(gray, (width, height), None)

# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp) # Certainly, every loop objp is the same, in 3D.
if ret:
self.objpoints.append(objp) # Certainly, every loop objp is the same, in 3D.
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
self.imgpoints.append(corners2)

found += 1

# When everything done, release the capture
cap.release()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, gray.shape[::-1], None, None)
return mtx, dist
return [mtx, dist]

def undistortImage(self, image):
ret, cameraMatrix, dist, rvecs, tvecs = cv.calibrateCamera(self.objpoints, self.imgpoints, frameSize, None, None)
mtx, dist = calibrate_chessboard(self.images_dir, self.images_format, self.square_size, self.width, self.height)

img = cv.imread(image)
h, w = img.shape[:2]
newCameraMatrix, roi = cv.getOptimalNewCameraMatrix(cameraMatrix, dist, (w,h), 1, (w,h))
newCameraMatrix, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))


# Undistort
dst = cv.undistort(img, cameraMatrix, dist, None, newCameraMatrix)
dst = cv.undistort(img, mtx, dist, None, newCameraMatrix)

# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('caliResult1.png', dst)

# Undistort with Remapping
mapx, mapy = cv.initUndistortRectifyMap(cameraMatrix, dist, None, newCameraMatrix, (w,h), 5)
mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newCameraMatrix, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)

# crop the image
Expand Down
169 changes: 169 additions & 0 deletions detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
import cv2
import math
import numpy as np
import apriltag
from target import Target


class Detector:

def __init__(self):
pass

def detectGameElement(self, array, objectsToDetect: list):
frame = array
results = dict(zip(objectsToDetect,
[None for i in range(len(objectsToDetect))]))
cone = {
"MEAN": [25.98, 241.47, 254.63],
"STDEV": [2.64, 26.68, 1.72]
}

colors = {
'CUBE': [[158, 255, 255], [110, 100, 100]],
"CONE": [[cone["MEAN"][0]+cone["STDEV"][0]*2,
cone["MEAN"][1]+cone["STDEV"][1]*2,
cone["MEAN"][2]+cone["STDEV"][2]*2],
[cone["MEAN"][0]-cone["STDEV"][0]*3,
cone["MEAN"][1]-cone["STDEV"][1]*3,
cone["MEAN"][2]-cone["STDEV"][2]*50]]

}
for object in objectsToDetect:
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_frame,
np.array(colors[object][1]),
np.array(colors[object][0]))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
morph = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.medianBlur(mask, 5)
if (object == "CUBE"):
contours, hier = cv2.findContours(morph,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea)
cnt = None
# contour = contours[len(contours) -1]
for contour in contours:
print("Area", cv2.contourArea(contour))
if(cv2.contourArea(contour)>1000):
print("Added Area", cv2.contourArea(contour))
tx,ty,tw,th = cv2.boundingRect(contour)
#print(tx, ty, tw, th)
if (not (tx == 0 and ty == 0 and tw == frame.shape[1] and th == frame.shape[0])):
passed = False
if (tw * th > 200):
passed = True
if passed:
cnt = contour

if cnt is not None:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 0, 255), 2)
cv2.circle(frame, (int(x + w / 2), int(y + h / 2)),
radius=0, color=(0, 0, 255), thickness=5)
cv2.putText(frame, object, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255),
2, cv2.LINE_AA)
results[object] = Target(cnt, object)

if (object == "CONE"):
straight_contours,
hier = cv2.findContours(morph,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[:2]
straight_contours = sorted(straight_contours,
key=cv2.contourArea)
cnt = None

for contour in straight_contours:
if(cv2.contourArea(contour)>1000):
tx,ty,tw,th = cv2.boundingRect(contour)
#print(tx, ty, tw, th)
if (not (tx == 0 and ty == 0 and tw == frame.shape[1] and th == frame.shape[0])):
passed = False
if (tw * th > 200):
passed = True
if passed:
cnt = contour
temp_ctr_max = contour

if cnt is not None:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame, (x, y),
(x + w, y + h), (0, 0, 255), 2)
cv2.circle(frame, (int(x + w/2),
int(y + h/2)), radius=0,
color=(0, 0, 255), thickness=5)
cv2.putText(frame, object, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, (0, 0, 255), 2, cv2.LINE_AA)
results[object] = Target(cnt, object)
results[object] = Target(temp_ctr_max, object)

while True:
cv2.imshow("o", frame)
if cv2.waitKey(0):
break

cv2.destroyAllWindows()

return results

def detectColoredShape(self, array, rgb_col):
color_dict_HSV = {'black': [[180, 255, 30], [0, 0, 0]],
'white': [[180, 18, 255], [0, 0, 231]],
'red1': [[180, 255, 255], [159, 50, 70]],
'red2': [[9, 255, 255], [0, 50, 70]],
'green': [[89, 255, 255], [36, 50, 70]],
'blue': [[128, 255, 255], [90, 50, 70]],
'yellow': [[35, 255, 255], [25, 50, 70]],
'purple': [[158, 255, 255], [129, 50, 70]],
'orange': [[24, 255, 255], [10, 50, 70]],
'gray': [[180, 18, 230], [0, 0, 40]]}
frame = array
results = dict(zip(rgb_col, [None for i in range(len(rgb_col))]))

print(rgb_col)

for object in rgb_col:
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_frame,
np.array(color_dict_HSV[object][1]),
np.array(color_dict_HSV[object][0]))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
morph = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
contours, hier = cv2.findContours(morph,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[:2]
x = 0
y = 0
w = 0
h = 0
for contour in contours:
tx, ty, tw, th = cv2.boundingRect(contour)
print(tx, ty, tw, th)
if (tw * th > w * h and not
(tx == 0 and ty == 0 and
tw == frame.shape[1] and th == frame.shape[0])):
x = tx
y = ty
w = tw
h = th

results[object] = [x, y, w, h]
print("X: %2d, Y: %2d, W: %2d, H: %2d" % (x, y, w, h))
while True:
if cv2.waitKey(0):
break
cv2.destroyAllWindows()

results[object] = Target([x, y, w, h], object)

return results

def detectAprilTag(self, array):
results = []

return results

0 comments on commit ece078f

Please sign in to comment.