Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
alvonx committed May 8, 2021
0 parents commit 1b25e3c
Show file tree
Hide file tree
Showing 14 changed files with 272 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/alvonCV.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

73 changes: 73 additions & 0 deletions .idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Empty file added LICENCE.txt
Empty file.
Empty file added README.md
Empty file.
76 changes: 76 additions & 0 deletions alvonCV/FaceDetectionModule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import cv2
import time
import mediapipe as mp


class FaceDetector:
def __init__(self, minDetectionConf=0.5):
self.minDetectionConf = minDetectionConf
self.mpFaceDetection = mp.solutions.face_detection
self.mpDraw = mp.solutions.drawing_utils
self.faceDetection = self.mpFaceDetection.FaceDetection(min_detection_confidence=self.minDetectionConf)

def findFaces(self, img, draw=False):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.faceDetection.process(imgRGB)
bboxs = []
if self.results.detections:
for id, detection in enumerate(self.results.detections):
bboxC = detection.location_data.relative_bounding_box
ih, iw, ic = img.shape
bbox = int(bboxC.xmin * iw), int(bboxC.ymin * ih), \
int(bboxC.width * iw), int(bboxC.height * ih)
bboxs.append([id, bbox, detection.score])

right_eye = (detection.location_data.relative_keypoints[0].x * iw, detection.location_data.relative_keypoints[0].y*ih)
if draw:
self.fancyBBoxDraw(img, bbox)
cv2.circle(img, (int(right_eye[0]), int(right_eye[1])), 1, (0, 0, 255), 1)
cv2.putText(img, f'Score: {int(detection.score[0] * 100)}', (bbox[0], bbox[1] - 20),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)

return img, bboxs

def fancyBBoxDraw(self, img, bbox, l=20, t=6):
x, y, w, h = bbox
x1, y1 = x + w, y + h

cv2.rectangle(img, bbox, (255, 0, 255), 2)

# top left x,y
cv2.line(img, (x, y), (x + l, y), (255, 0, 255), t)
cv2.line(img, (x, y), (x, y + l), (255, 0, 255), t)

# top right x1,y
cv2.line(img, (x1, y), (x1 - l, y), (255, 0, 255), t)
cv2.line(img, (x1, y), (x1, y + l), (255, 0, 255), t)

# bottom left x,y1
cv2.line(img, (x, y1), (x + l, y1), (255, 0, 255), t)
cv2.line(img, (x, y1), (x, y1 - l), (255, 0, 255), t)

# bottom right x1,y1
cv2.line(img, (x1, y1), (x1 - l, y1), (255, 0, 255), t)
cv2.line(img, (x1, y1), (x1, y1 - l), (255, 0, 255), t)

return img


def main():
cap = cv2.VideoCapture(0)
pTime = 0
faceDetectorObj = FaceDetector()

while True:
success, img = cap.read()
img, bboxs = faceDetectorObj.findFaces(img, draw=True)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (20, 70), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 2)
cv2.imshow("Image", img)
cv2.waitKey(1)


if __name__ == "__main__":
main()
58 changes: 58 additions & 0 deletions alvonCV/FaceMeshModule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import cv2
import mediapipe as mp
import time


class FaceMeshDetector:
def __init__(self, staticMode=False, maxFaces=5, minDetectionConf=0.5, minTrackConf=0.5):
self.staticMode = staticMode
self.maxFaces = maxFaces
self.minDetectionConf = minDetectionConf
self.minTrackConf = minTrackConf

self.mpDraw = mp.solutions.drawing_utils
self.mpFaceMesh = mp.solutions.face_mesh
self.facemesh = self.mpFaceMesh.FaceMesh(self.staticMode, self.maxFaces,
self.minDetectionConf, self.minTrackConf)
self.drawSpec = self.mpDraw.DrawingSpec(thickness=1, circle_radius=1)

def findFaceMesh(self, img, draw=True):
self.imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.facemesh.process(self.imgRGB)
faces = []
if self.results.multi_face_landmarks:
for faceLms in self.results.multi_face_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, faceLms, self.mpFaceMesh.FACE_CONNECTIONS, self.drawSpec, self.drawSpec)

face = []
for id, lm in enumerate(faceLms.landmark):
ih, iw, ic = img.shape
x, y = int(lm.x*iw), int(lm.y*ih)
face.append([x, y])
faces.append(face)

return img


def main():
cap = cv2.VideoCapture(0)
pTime = 0
detector = FaceMeshDetector()
while True:
success, img = cap.read()

img, faces = detector.findFaceMesh(img)
if len(faces):
print(len(faces))

cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)
cv2.imshow('Image', img)
cv2.waitKey(1)


if __name__ == "__main__":
main()
2 changes: 2 additions & 0 deletions alvonCV/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from alvonCV.FaceMeshModule import FaceMeshDetector
from alvonCV.FaceDetectionModule import FaceDetector
Empty file added setup.cfg
Empty file.
28 changes: 28 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from distutils.core import setup

setup(
name='alvonCV', # How you named your package folder (MyLib)
packages=['alvonCV'], # Chose the same as "name"
version='0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description='Computer Vision Helper Package', # Give a short description about your library
author='Deepak Singh', # Type in your name
author_email='deepaksinghgs30@gmail.com', # Type in your E-Mail
url='https://github.com/alvon-X/alvonCV', # Provide either the link to your github or to your website
download_url='https://github.com/user/reponame/archive/v_01.tar.gz', # I explain this later on
keywords=['Computer Vision', 'Face Mesh', 'Face Detection', 'alvon'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'opencv-python',
'mediapipe',
],
classifiers=[
'Development Status :: 3 - Alpha',
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.6', # Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)

0 comments on commit 1b25e3c

Please sign in to comment.