Skip to content

Commit

Permalink
Merge pull request #18 from virasad/develop
Browse files Browse the repository at this point in the history
Release v1.0.0 stable
  • Loading branch information
a-sharifi authored Jul 5, 2022
2 parents 8e9c74b + b5a2f26 commit ecbd1d6
Show file tree
Hide file tree
Showing 31 changed files with 1,624 additions and 0 deletions.
218 changes: 218 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839

# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf

# Generated files
.idea/**/contentModel.xml

# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml

# Gradle
.idea/**/gradle.xml
.idea/**/libraries

# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr

# CMake
cmake-build-*/

# Mongo Explorer plugin
.idea/**/mongoSettings.xml

# File-based project format
*.iws

# IntelliJ
out/

# mpeltonen/sbt-idea plugin
.idea_modules/

# JIRA plugin
atlassian-ide-plugin.xml

# Cursive Clojure plugin
.idea/replstate.xml

# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties

# Editor-based Rest Client
.idea/httpRequests

# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser

### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/
volumes/
dataset2/
dataset2/images/


/dataset2/
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
# semantic_segment_servicev2

Will be updated soon
66 changes: 66 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
version: "3.8"
services:
inference:
# network_mode: host
container_name: "semantic-segment-inference2"
build: "./inference"
command: "uvicorn main:app --host 0.0.0.0 --port 5553 --reload"
ports:
- "5553:5553"
volumes:
- ./inference:/code
- ./volumes/weights:/weights

environment:
- PORT=5556
- WEIGHTS_DIR=/weights
ipc: host
shm_size: 1024M
deploy:
resources:
reservations:
devices:
- capabilities: [ gpu ]
networks:
- segment-network
#
train:
# network_mode: host
container_name: "semantic-segment-train2"
build: "./train"
command: "uvicorn main:app --host 0.0.0.0 --port 5554 --reload"
ports:
- "5554:5554"
volumes:
- ./train:/code
- ./volumes/weights:/weights
- ./volumes/dataset:/dataset
environment:
- RESPONSE_URL=http://web:8000/api/v1/train/done
- LOGGER_URL=http://127.0.0.1:8000/logger
- PORT=5554
- IS_LOGGER_ON=False
- WEIGHTS_DIR=/weights
ipc: host
shm_size: 1024M
deploy:
resources:
reservations:
devices:
- capabilities: [ gpu]
networks:
- segment-network

redis:
restart: always
image: redis:latest
expose:
- "6379"
volumes:
- ./volumes/redis-data:/data
networks:
- segment-network

networks:
segment-network:

8 changes: 8 additions & 0 deletions docker-gpu-install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list

sudo apt-get update
sudo apt-get install -y nvidia-docker2
12 changes: 12 additions & 0 deletions inference/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
FROM pytorch/pytorch:1.12.0-cuda11.3-cudnn8-devel
ENV TZ=UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime

RUN rm /etc/apt/sources.list.d/cuda.list

RUN apt-get update
RUN apt-get install ffmpeg libsm6 libxext6 -y

WORKDIR /code/
COPY requirements.txt /code/
RUN pip install -r requirements.txt
72 changes: 72 additions & 0 deletions inference/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import io

import cv2
import numpy as np
from fastapi import FastAPI, File, UploadFile
from starlette.responses import StreamingResponse

from predict import InferenceSeg

# import imantics


app = FastAPI(
title="Segmentation Model",
description="Api for segmentation model training and inference",
version="0.1.0",
contact={
"name": "Virasad",
"url": "https://virasad.ir",
"email": "info@virasad.ir",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
)
detector = InferenceSeg('mobilenet_v2', 'imagenet')


@app.get("/")
def root():
return {"message": "Welcome to the Segmentation API get documentation at /docs"}


@app.post("/predict")
async def predict(image: UploadFile = File(...), return_image: bool = False, return_coco: bool = False,
):
try:
if return_image and return_coco:
raise ValueError('return_image and return_coco cannot be True at the same time')

contents = await image.read()
nparr = np.fromstring(contents, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
response = detector.predict_data(img, return_image, return_coco)
if return_image:
return StreamingResponse(io.BytesIO(response), media_type='image/jpeg')

elif return_coco:
return {"results": response}
return {"message": "Success"}


except Exception as e:
return {"message": str(e)}

@app.post("/n-classes")
async def set_classes(n_classes: int):
detector.set_classes(n_classes)
return {"message": "Success"}


@app.post("/set-size")
async def set_size(weight: int, height: int):
detector.set_size(weight, height)
return {"message": "Success"}


@app.post("/set-model")
async def set_model(model_path: str):
detector.set_model(model_path)
return {"message": "Success"}
Empty file added inference/model/__init__.py
Empty file.
Loading

0 comments on commit ecbd1d6

Please sign in to comment.