Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
## === Stage 1 --- Build dlib wheel ===
FROM python:3.8

## Build 'dlib' separately - it takes a long time and needs a lot of space
RUN apt-get update && apt-get install -y build-essential cmake
RUN mkdir -p /tmp/dlib
RUN pip wheel --use-pep517 --wheel-dir /tmp/dlib dlib
RUN ln /tmp/dlib/dlib-*.whl /tmp/dlib.whl


## === Stage 2 --- Build the actual container ===
FROM python:3.8

WORKDIR /app
EXPOSE 5000

# Use 'dlib.whl' from the above container
COPY --from=0 /tmp/dlib /tmp/dlib
RUN pip install /tmp/dlib/dlib-*.whl
RUN rm -rf /tmp/dlib

# libGL1 is required for opencv-python
RUN apt-get update && apt-get install -y libgl1 && apt-get clean

# Download YOLOv4 model files (also change mlapiconfig.docker.ini
# if you need some other models)
COPY get_models.sh .
RUN INSTALL_YOLOV3=no INSTALL_TINYYOLOV3=no INSTALL_YOLOV4=yes INSTALL_TINYYOLOV4=no ./get_models.sh

# Prevent re-installing if source has changed but not requirements.txt
COPY requirements.txt .
RUN pip install -r requirements.txt && rm -rf /root/.cache

# Copy all the files
COPY . .
RUN rm -rf .git

# Use Docker config customised for Docker
RUN cp mlapiconfig.docker.ini mlapiconfig.ini

CMD [ "python3", "./mlapi.py", "-c", "mlapiconfig.ini" ]
28 changes: 25 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,31 @@ Tip of the Hat
===============
A tip of the hat to [Adrian Rosebrock](https://www.pyimagesearch.com/about/) to get me started. His articles are great.

Running in a Docker container
=============================
A simple `Dockerfile` is included with this project. It is preconfigured for *YOLOv4* model but can easily be changed to use any other available models.

To build it run:

```
git clone https://github.com/pliablepixels/mlapi.git
cd mlapi
docker build -t mlapi:local .
```

Once built you can run the container:

```
docker run --rm -it -e MLAPI_USER="myuser" -e MLAPI_PASSWORD="Passw0rd" -p 5000:5000 mlapi:local
```

To quickly test it run `curl http://localhost:5000/api/v1/health`, then follow the instructions below under the *Running* section.

Containerized Fork
==================
themoosman maintains a containerized fork of this [repo](https://github.com/themoosman/mlapi). This fork runs as a container and has been refactored to a WSGI (NGINX + Gunicorn + Flask) application. Please **do not** post questions about his containerized fork here. Please post issues in his fork.
------------------
[@themoosman](https://github.com/themoosman) maintains a [containerized fork of this mlapi](https://github.com/themoosman/mlapi).

This fork has been refactored to a WSGI (NGINX + Gunicorn + Flask) application. Please **do not** post questions about his containerized fork here. Please post issues in his fork.

Install
=======
Expand All @@ -37,7 +59,7 @@ Note that this package also needs OpenCV which is not installed by the above ste

Then:
```
git clone https://github.com/pliablepixels/mlapi
git clone https://github.com/pliablepixels/mlapi.git
cd mlapi
sudo -H pip3 install -r requirements.txt
```
Expand Down
2 changes: 1 addition & 1 deletion get_models.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ INSTALL_TINYYOLOV4=${INSTALL_TINYYOLOV4:-yes}
INSTALL_CORAL_EDGETPU=${INSTALL_CORAL_EDGETPU:-no}

TARGET_DIR='./models'
WGET=$(which wget)
WGET="$(which wget) --progress=bar:force:noscroll"

# utility functions for color coded pretty printing
print_error() {
Expand Down
135 changes: 135 additions & 0 deletions mlapiconfig.docker.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
[general]
# This is an optional file
# If specified, you can specify tokens with secret values in that file
# and onlt refer to the tokens in your main config file

secrets=./secrets.ini
#secrets=./secrets.mine

# port that mlapi will listen on. Default 5000
port=5000

# Maximum # of processes that will be forked
# to handle requests. Note that each process will
# have its own copy of the model, so memory can
# build up very quickly
# This number also dictates how many requests will be executed in parallel
# The rest will be queued

# For now, keep this to 1 if you are on a GPU
processes=1

# the secret key that will be used to sign
# JWT tokens. Make sure you change the value
# in your secrets.ini
mlapi_secret_key=!MLAPI_SECRET_KEY

# folder where images will be uploaded
# default ./images
images_path=./images

# folder where the user DB will be stored
db_path=./db

# If specified, will limit detected object size to this amount of
# the total image size passed. Can help avoiding weird detections
# You can specify as % or px. Default is px
# Remember the image is resized to 416x416 internally. better
# to keep in %
max_detection_size=100%

# You can now limit the # of detection process
# per target processor. If not specified, default is 1
# Other detection processes will wait to acquire lock

cpu_max_processes=3
tpu_max_processes=1
gpu_max_processes=1

# NEW: Time to wait in seconds per processor to be free, before
# erroring out. Default is 120 (2 mins)
cpu_max_lock_wait=120
tpu_max_lock_wait=120
gpu_max_lock_wait=120


[object]

# for Yolov3
#object_framework=opencv
#object_processor=cpu
#object_config=./models/yolov3/yolov3.cfg
#object_weights=./models/yolov3/yolov3.weights
#object_labels=./models/yolov3/coco.names

# for Tiny Yolov3
#object_framework=opencv
#object_processor=cpu
#object_config=./models/tinyyolov3/yolov3-tiny.cfg
#object_weights=./models/tinyyolov3/yolov3-tiny.weights
#object_labels=./models/tinyyolov3/coco.names

# for Yolov4
object_framework=opencv
object_processor=cpu
object_config=./models/yolov4/yolov4.cfg
object_weights=./models/yolov4/yolov4.weights
object_labels=./models/yolov4/coco.names

# for Tiny Yolov4
#object_framework=opencv
#object_processor=cpu
#object_config=./models/tinyyolov4/yolov4-tiny.cfg
#object_weights=./models/tinyyolov4/yolov4-tiny.weights
#object_labels=./models/tinyyolov4/coco.names

# for Google Coral Edge TPU
#object_framework=coral_edgetpu
#object_processor=tpu
#object_weights=./models/coral_edgetpu/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite
#object_labels=./models/coral_edgetpu/coco_indexed.names


[face]
face_detection_framework=dlib
face_recognition_framework=dlib
face_num_jitters=0
face_upsample_times=1
face_model=cnn
face_train_model=hog
face_recog_dist_threshold=0.6
face_recog_knn_algo=ball_tree

known_images_path=./known_faces
unknown_images_path=./unknown_faces

unknown_face_name=unknown face
save_unknown_faces=yes
save_unknown_faces_leeway_pixels=50

[alpr]

alpr_use_after_detection_only=yes
alpr_api_type=cloud

# -----| If you are using plate recognizer | ------
alpr_service=plate_recognizer
alpr_key=!PLATEREC_ALPR_KEY
platerec_stats=yes
#platerec_regions=['us','cn','kr']
platerec_min_dscore=0.1
platerec_min_score=0.2

# ----| If you are using openALPR |-----
#alpr_service=open_alpr
#alpr_key=!OPENALPR_ALPR_KEY
#openalpr_recognize_vehicle=1
#openalpr_country=us
#openalpr_state=ca
# openalpr returns percents, but we convert to between 0 and 1
#openalpr_min_confidence=0.3

# ----| If you are using openALPR command line |-----
openalpr_cmdline_binary=alpr
openalpr_cmdline_params=-j -d
openalpr_cmdline_min_confidence=0.3
5 changes: 3 additions & 2 deletions modules/common_params.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
import modules.log as g_log


Expand Down Expand Up @@ -71,7 +72,7 @@
},
'mlapi_secret_key':{
'section': 'general',
'default': None,
'default': os.getenv('MLAPI_SECRET_KEY'),
'type': 'string',
},

Expand Down Expand Up @@ -195,7 +196,7 @@
},
'alpr_key': {
'section': 'alpr',
'default': '',
'default': os.getenv('MLAPI_ALPR_KEY'),
'type': 'string',
},
'alpr_use_after_detection_only': {
Expand Down
38 changes: 21 additions & 17 deletions modules/db.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
from tinydb import TinyDB, Query, where
from passlib.hash import bcrypt
import modules.common_params as g
Expand All @@ -16,24 +17,27 @@ def __init__(self):
self.query = Query()
g.log.debug ('DB engine ready')
if not len(self.users):
g.log.debug ('Initializing default users')

print ('--------------- User Creation ------------')
print ('Please configure atleast one user:')
while True:
name = input ('user name:')
if not name:
print ('Error: username needed')
continue
p1 = getpass.getpass('Please enter password:')
if not p1:
print ('Error: password cannot be empty')
continue
p2 = getpass.getpass('Please re-enter password:')
if p1 != p2:
print ('Passwords do not match, please re-try')
continue
break
if os.getenv('MLAPI_USER') and os.getenv('MLAPI_PASSWORD'):
name = os.getenv('MLAPI_USER')
p1 = os.getenv('MLAPI_PASSWORD')
g.log.debug('Creating user from $MLAPI_USER: {}'.format(name))
else:
print ('Please configure at least one user:')
while True:
name = input ('User name:')
if not name:
print ('Error: username needed')
continue
p1 = getpass.getpass('Please enter password:')
if not p1:
print ('Error: password cannot be empty')
continue
p2 = getpass.getpass('Please re-enter password:')
if p1 != p2:
print ('Passwords do not match, please re-try')
continue
break
self.users.insert({'name':name, 'password':self._get_hash(p1)})
print ('------- User: {} created ----------------'.format(name))

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ scikit_learn
face_recognition
pyzm>=0.1.27
portalocker
opencv-python>=4.3