-
-
Notifications
You must be signed in to change notification settings - Fork 53
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #2 from Jayshah6699/main
Updated My Fork
- Loading branch information
Showing
54 changed files
with
17,009 additions
and
190 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
## FACE RECOGNITION AND CLASSIFICATION | ||
|
||
|
||
## The image used for training | ||
|
||
![](face_rec/faces/Bharath%20C%20S.jpg) | ||
|
||
|
||
## The image given input for classification | ||
![](face_rec/test1.PNG) | ||
|
||
|
||
## The output image with label | ||
![](face_rec/output_face_rec.PNG) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
import face_recognition as fr | ||
import os | ||
import cv2 | ||
import face_recognition | ||
import numpy as np | ||
from time import sleep | ||
|
||
|
||
def get_encoded_faces(): | ||
""" | ||
looks through the faces folder and encodes all | ||
the faces | ||
:return: dict of (name, image encoded) | ||
""" | ||
encoded = {} | ||
|
||
for dirpath, dnames, fnames in os.walk("./faces"): | ||
for f in fnames: | ||
if f.endswith(".jpg") or f.endswith(".png"): | ||
face = fr.load_image_file("faces/" + f) | ||
encoding = fr.face_encodings(face)[0] | ||
encoded[f.split(".")[0]] = encoding | ||
|
||
return encoded | ||
|
||
|
||
def unknown_image_encoded(img): | ||
""" | ||
encode a face given the file name | ||
""" | ||
face = fr.load_image_file("faces/" + img) | ||
encoding = fr.face_encodings(face)[0] | ||
|
||
return encoding | ||
|
||
|
||
def classify_face(im): | ||
""" | ||
will find all of the faces in a given image and label | ||
them if it knows what they are | ||
:param im: str of file path | ||
:return: list of face names | ||
""" | ||
faces = get_encoded_faces() | ||
faces_encoded = list(faces.values()) | ||
known_face_names = list(faces.keys()) | ||
|
||
img = cv2.imread(im, 1) | ||
#img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) | ||
#img = img[:,:,::-1] | ||
|
||
face_locations = face_recognition.face_locations(img) | ||
unknown_face_encodings = face_recognition.face_encodings(img, face_locations) | ||
|
||
face_names = [] | ||
for face_encoding in unknown_face_encodings: | ||
# See if the face is a match for the known face(s) | ||
matches = face_recognition.compare_faces(faces_encoded, face_encoding) | ||
name = "Unknown" | ||
|
||
# use the known face with the smallest distance to the new face | ||
face_distances = face_recognition.face_distance(faces_encoded, face_encoding) | ||
best_match_index = np.argmin(face_distances) | ||
if matches[best_match_index]: | ||
name = known_face_names[best_match_index] | ||
|
||
face_names.append(name) | ||
|
||
for (top, right, bottom, left), name in zip(face_locations, face_names): | ||
# Draw a box around the face | ||
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2) | ||
|
||
# Draw a label with a name below the face | ||
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED) | ||
font = cv2.FONT_HERSHEY_DUPLEX | ||
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2) | ||
|
||
|
||
# Display the resulting image | ||
while True: | ||
|
||
cv2.imshow('Video', img) | ||
if cv2.waitKey(1) & 0xFF == ord('q'): | ||
return face_names | ||
|
||
|
||
print(classify_face("test1.PNG")) | ||
|
||
|
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
176 changes: 176 additions & 0 deletions
176
Build own facial detection/own_facial_recogniton_notebook.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
cmake | ||
dlib | ||
face_recognition | ||
numpy | ||
opencv-python |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
The Chicago Crime dataset contains a summary of the reported crimes occurred in the City of Chicago from 2001 to 2017. | ||
|
||
Dataset has been obtained from the Chicago Police Department's CLEAR (Citizen Law Enforcement Analysis and Reporting) system. | ||
|
||
Dataset contains the following columns: | ||
|
||
ID: Unique identifier for the record. | ||
|
||
Case Number: The Chicago Police Department RD Number (Records Division Number), which is unique to the incident. | ||
|
||
Date: Date when the incident occurred. | ||
|
||
Block: address where the incident occurred | ||
|
||
IUCR: The Illinois Uniform Crime Reporting code. | ||
|
||
Primary Type: The primary description of the IUCR code. | ||
|
||
Description: The secondary description of the IUCR code, a subcategory of the primary description. | ||
|
||
Location Description: Description of the location where the incident occurred. | ||
|
||
Arrest: Indicates whether an arrest was made. | ||
|
||
Domestic: Indicates whether the incident was domestic-related as defined by the Illinois Domestic Violence Act. | ||
|
||
Beat: Indicates the beat where the incident occurred. A beat is the smallest police geographic area – each beat has a dedicated police beat car. | ||
|
||
District: Indicates police district where the incident occurred. | ||
|
||
Ward: The ward (City Council district) where incident occurred. | ||
|
||
Community Area: Indicates the community area where the incident occurred. Chicago has 77 community areas. | ||
|
||
FBI Code: Indicates the crime classification as outlined in the FBI's National Incident-Based Reporting System (NIBRS). | ||
|
||
X Coordinate: The x coordinate of the location where the incident occurred in State of Illinois. | ||
|
||
Y Coordinate: The y coordinate of the location where the incident occurred in State of Illinois. | ||
|
||
Year: Year the incident occurred. | ||
|
||
Updated On: Date and time the record was last updated. | ||
|
||
Latitude: The latitude of the location where the incident occurred. This location is shifted from the actual location for partial redaction but falls on the same block. | ||
|
||
Longitude: The longitude of the location where the incident occurred. This location is shifted from the actual location for partial redaction but falls on the same block. | ||
|
||
Location: The location where the incident occurred | ||
|
||
Prophet is open source software released by Facebook’s Core Data Science team. | ||
|
||
Prophet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. | ||
|
||
Prophet works best with time series that have strong seasonal effects and several seasons of historical data. | ||
|
||
For more information, please check this out: | ||
|
||
https://research.fb.com/prophet-forecasting-at-scale/ | ||
https://facebook.github.io/prophet/docs/quick_start.html#python-api | ||
|
||
NOTE: | ||
|
||
You must install fbprophet package as follows: pip install fbprophet | ||
|
||
If you encounter an error, try: conda install -c conda-forge fbprophet | ||
|
||
Prophet implements an additive regression model with four elements: | ||
|
||
A piecewise linear, Prophet automatically picks up change points in the data and identifies any change in trends. | ||
|
||
A yearly seasonal component modeled using Fourier series. | ||
|
||
A weekly seasonal component. | ||
|
||
A holiday list that can be manually provided. | ||
|
||
Additive Regression model takes the form: | ||
𝑌=𝛽_0+∑2_(𝑗=1)^𝑝▒〖𝑓_𝑗 (𝑋_𝑗 )+𝜖〗 | ||
The functions 𝑓_𝑗 (𝑥_𝑗) are unknown smoothing functions fit from the data | ||
|
||
Reference: https://research.fb.com/prophet-forecasting-at-scale/ |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified
BIN
-14.3 KB
(54%)
Document matcher/output/document1.png/2-header-dilated-eroded.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
web: sh setup.sh && streamlit run app.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# ASL-prediction | ||
|
||
![](screenshot.png) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "batch_input_shape": [null, 50, 50, 3], "dtype": "float32", "filters": 5, "kernel_size": [5, 5], "strides": [1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1], "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [4, 4], "padding": "valid", "strides": [4, 4], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 15, "kernel_size": [5, 5], "strides": [1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1], "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [4, 4], "padding": "valid", "strides": [4, 4], "data_format": "channels_last"}}, {"class_name": "Flatten", "config": {"name": "flatten_1", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 3, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.3.1", "backend": "tensorflow"} |
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
import streamlit as st | ||
from tensorflow.keras.models import model_from_json | ||
from numpy import asarray | ||
import numpy as np | ||
import base64 | ||
from PIL import Image | ||
|
||
|
||
json_file = open("ResNetModel.json","r") | ||
loaded_json_model = json_file.read() | ||
json_file.close() | ||
|
||
|
||
model = model_from_json(loaded_json_model) | ||
model.load_weights("ResNetModelWeights.h5") | ||
labels = list("ABCDEF") | ||
st.title("Finger Sign Classification") | ||
|
||
st.markdown(""" | ||
<style> | ||
body { | ||
color: #000; | ||
background-color:white; | ||
</style> | ||
""", unsafe_allow_html=True) | ||
|
||
st.set_option('deprecation.showfileUploaderEncoding', False) | ||
uploaded_file = st.file_uploader("Upload image ", type=["png","jpg","jpeg"]) | ||
|
||
if st.button('Predict'): | ||
|
||
if uploaded_file is None: | ||
st.error("Please Upload Image !!") | ||
else: | ||
img = Image.open(uploaded_file) | ||
img = img.resize((50,50)) | ||
img = asarray(img) | ||
|
||
print(img.shape) | ||
img = img.reshape((1,img.shape[0],img.shape[1],img.shape[2])) | ||
|
||
pred = labels[np.argmax(model.predict(img))] | ||
st.image(img, use_column_width=True) | ||
st.header("The uploaded image indicates sign of alphabet "+ pred) | ||
|
||
|
||
|
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
mkdir -p ~/.streamlit/ | ||
|
||
echo "\ | ||
[server]\n\ | ||
port = $PORT\n\ | ||
enableCORS = false\n\ | ||
headless = true\n\ | ||
\n\ | ||
" > ~/.streamlit/config.toml |
Oops, something went wrong.