diff --git a/Iris-Recognition-master/Iris-Recognition-master/FeatureExtraction.py b/Iris-Recognition-master/Iris-Recognition-master/FeatureExtraction.py new file mode 100644 index 0000000..6c1732d --- /dev/null +++ b/Iris-Recognition-master/Iris-Recognition-master/FeatureExtraction.py @@ -0,0 +1,84 @@ +import cv2 +import numpy as np +import glob +import math +import scipy +from scipy.spatial import distance +from scipy import signal +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA +import matplotlib.pyplot as plt +import pandas as pd +from sklearn import metrics + + +#modulating function as defined in paper +def m(x ,y, f): + val = np.cos(2*np.pi*f*math.sqrt(x **2 + y**2)) + return val +#spatial filter as defined in paper +def gabor(x, y, dx, dy, f): + gb = (1/(2*math.pi*dx*dy))*np.exp(-0.5*(x**2 / dx**2 + y**2 / dy**2)) * m(x, y, f) + return gb + +#function to calculate spatial filter over 8x8 blocks +def spatial(f,dx,dy): + sfilter=np.zeros((8,8)) + for i in range(8): + for j in range(8): + sfilter[i,j]=gabor((-4+j),(-4+i),dx,dy,f) + return sfilter + +def get_vec(convolvedtrain1,convolvedtrain2): + feature_vec=[] + for i in range(6): + for j in range(64): + #Run 8 by 8 filtered block iteratively over the entire image + start_height = i*8 + end_height = start_height+8 + start_wid = j*8 + end_wid = start_wid+8 + grid1 = convolvedtrain1[start_height:end_height, start_wid:end_wid] + grid2 = convolvedtrain2[start_height:end_height, start_wid:end_wid] + + # Channel 1 + absolute = np.absolute(grid1) + # mean + mean = np.mean(absolute) + feature_vec.append(mean) + #deviation + std = np.mean(np.absolute(absolute-mean)) + feature_vec.append(std) + + # Channel 2 + absolute = np.absolute(grid2) + # mean + mean = np.mean(absolute) + feature_vec.append(mean) + #deviation + std = np.mean(np.absolute(absolute-mean)) + feature_vec.append(std) + + return feature_vec + +def FeatureExtraction(enhanced): + con1=[] + con2=[] + #get spatial filters + filter1=spatial(0.67,3,1.5) + filter2=spatial(0.67,4,1.5) + + feature_vector=[] + + for i in range(len(enhanced)): + img=enhanced[i] + #define a 48x512 region over which the filters are applied + img_roi=img[:48,:] + + filtered1=scipy.signal.convolve2d(img_roi,filter1,mode='same') + filtered2=scipy.signal.convolve2d(img_roi,filter2,mode='same') + + con1.append(filtered1) + con2.append(filtered2) + fv=get_vec(filtered1,filtered2) + feature_vector.append(fv) + return feature_vector #each feature vector has a dimension of 1536 \ No newline at end of file diff --git a/Iris-Recognition-master/Iris-Recognition-master/ImageEnhancement.py b/Iris-Recognition-master/Iris-Recognition-master/ImageEnhancement.py new file mode 100644 index 0000000..1bdb54e --- /dev/null +++ b/Iris-Recognition-master/Iris-Recognition-master/ImageEnhancement.py @@ -0,0 +1,21 @@ + +# coding: utf-8 + +import cv2 +import numpy as np +import glob +import math +from scipy.spatial import distance +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA +import matplotlib.pyplot as plt +import pandas as pd +from sklearn import metrics + +#Equalizes the histogram of the image +def ImageEnhancement(normalized): + enhanced=[] + for res in normalized: + res = res.astype(np.uint8) + im=cv2.equalizeHist(res) + enhanced.append(im) + return enhanced \ No newline at end of file diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig1.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig1.png new file mode 100644 index 0000000..4cc5824 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig1.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig2.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig2.png new file mode 100644 index 0000000..fcc9aac Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig2.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig3.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig3.png new file mode 100644 index 0000000..d5a0bfe Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig3.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig4.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig4.png new file mode 100644 index 0000000..f7877ed Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig4.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig5.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig5.png new file mode 100644 index 0000000..e95ac02 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig5.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig6.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig6.png new file mode 100644 index 0000000..3fc5a1c Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig6.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig7.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig7.png new file mode 100644 index 0000000..92ac343 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig7.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/Fig8.png b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig8.png new file mode 100644 index 0000000..40ddab6 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/Fig8.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/code1.png b/Iris-Recognition-master/Iris-Recognition-master/Images/code1.png new file mode 100644 index 0000000..4a810e7 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/code1.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/code2.png b/Iris-Recognition-master/Iris-Recognition-master/Images/code2.png new file mode 100644 index 0000000..07bc343 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/code2.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/Images/code3.png b/Iris-Recognition-master/Iris-Recognition-master/Images/code3.png new file mode 100644 index 0000000..b857264 Binary files /dev/null and b/Iris-Recognition-master/Iris-Recognition-master/Images/code3.png differ diff --git a/Iris-Recognition-master/Iris-Recognition-master/IrisLocalization.py b/Iris-Recognition-master/Iris-Recognition-master/IrisLocalization.py new file mode 100644 index 0000000..c6cabf9 --- /dev/null +++ b/Iris-Recognition-master/Iris-Recognition-master/IrisLocalization.py @@ -0,0 +1,76 @@ + +# coding: utf-8 + +import cv2 +import numpy as np +import glob +import math +from scipy.spatial import distance +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA +import matplotlib.pyplot as plt +import pandas as pd +from sklearn import metrics + +def IrisLocalization(images): + #convert image to a color image + target = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in images] + boundary=[] #initialize empty list that will eventually contain all the images with boundaries + centers=[] #initialize empty list that will contain the centers of the boundary circles + for img in target: + + draw_img=img + + # remove noise by blurring the image + blur = cv2.bilateralFilter(img, 9,75,75) + img=blur + + #estimate the center of pupil + horizontalProjection = np.mean(img,0); + verticalProjection = np.mean(img,1); + center_x=horizontalProjection.argmin() + center_y=verticalProjection.argmin() + + #recalculate of pupil by concentrating on a 120X120 area + centrecrop_x = img[center_x-60:center_x+60] + centrecrop_y = img[center_y-60:center_y+60] + horizontalProjection = np.mean(centrecrop_y,0); + verticalProjection = np.mean(centrecrop_x,0); + crop_center_x=horizontalProjection.argmin() + crop_center_y=verticalProjection.argmin() + + cimg=img.copy() + cv2.circle(cimg,(crop_center_x,crop_center_y),1,(255,0,0),2) + + #apply Canny edge detector on the masked image + maskimage = cv2.inRange(img, 0, 70) + output = cv2.bitwise_and(img, maskimage) + edged = cv2.Canny(output, 100, 220) + + # Apply Hough transform to find potential boundaries of pupil + circles = cv2.HoughCircles(edged, cv2.HOUGH_GRADIENT, 10, 100) + + #define the center of the pupil + a = (crop_center_x,crop_center_y) + + out = img.copy() + min_dst=math.inf + for i in circles[0]: + #find the circle whose center is closest to the approx center found above + b=(i[0],i[1]) + dst = distance.euclidean(a, b) + if dst