Skip to content

Commit f20b45d

Browse files
author
dmitroprobachay
committed
Update trt inference
1 parent d3169ca commit f20b45d

File tree

11 files changed

+264
-353
lines changed

11 files changed

+264
-353
lines changed

docker/tensorrt/Dockerfile

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# nVidia TensorRT Base Image
2-
ARG TRT_CONTAINER_VERSION=21.07
3-
FROM nvcr.io/nvidia/tensorrt:${TRT_CONTAINER_VERSION}-py3
2+
ARG TRT_CONTAINER_VERSION=21.12
3+
FROM nvcr.io/nvidia/pytorch:${TRT_CONTAINER_VERSION}-py3
44

55
ENV DEBIAN_FRONTEND noninteractive
66
ENV TZ=Europe/Kiev
@@ -22,7 +22,6 @@ RUN apt-get install -y libturbojpeg
2222

2323
RUN pip install --upgrade pip
2424
RUN pip install pillow==8.0.1
25-
RUN pip install torch==1.11.0+cu115 torchvision==0.12.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html
2625
RUN pip install setuptools
2726
RUN pip install "PyYAML>=5.3"
2827
RUN pip install "numpy>=1.16.*"

examples/ju/benchmark/runtime-test-tensorrt.ipynb

Lines changed: 109 additions & 155 deletions
Large diffs are not rendered by default.

examples/ju/inference/number-plate-recognition-tensorrt.ipynb

Lines changed: 135 additions & 188 deletions
Large diffs are not rendered by default.

examples/py/benchmark/runtime-test.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@
55

66
from _paths import nomeroff_net_dir
77
from nomeroff_net import pipeline
8+
import faulthandler
9+
10+
11+
faulthandler.enable()
812

913
warnings.filterwarnings("ignore")
1014

examples/py/model_convertors/convert_yolo_to_tensorrt.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
"""
66
import sys
77
import os
8+
import pathlib
89
import argparse
910
import subprocess
1011

@@ -16,7 +17,7 @@ def parse_args():
1617
ap = argparse.ArgumentParser()
1718
ap.add_argument("-f", "--filepath",
1819
default=os.path.join(os.path.abspath(os.getcwd()),
19-
"../../../data/model_repository/numberplate_options/1/model.onnx"),
20+
"../../../data/model_repository/yolov5s/1/model.engine"),
2021
required=False,
2122
type=str,
2223
help="Result onnx model filepath")
@@ -28,15 +29,20 @@ def main():
2829
args = parse_args()
2930
model_filepath = args["filepath"]
3031

32+
# make dirs
33+
p = pathlib.Path(os.path.dirname(model_filepath))
34+
p.mkdir(parents=True, exist_ok=True)
35+
3136
# download and append to path yolo repo
3237
info = modelhub.download_repo_for_model("yolov5")
3338
repo_path = info["repo_path"]
3439
model_info = modelhub.download_model_by_name("yolov5")
3540
path_to_model = model_info["path"]
41+
print(f'python3 ./export.py --weights={path_to_model} --include=engine --device 0 --dynamic;')
3642
res = path_to_model.replace(".pt", ".engine")
3743
# python3 ./export.py --weights yolov5s-2021-12-14.pt --include engine --device 0 --dinamic
3844
subprocess.call([f'cd {repo_path}; '
39-
f'python3 ./export.py --weights={path_to_model} --include=engine --device 0 --dinamic;'
45+
f'python3 ./export.py --weights={path_to_model} --include=engine --device 0;'
4046
f'cp {res} {model_filepath}'], shell=True)
4147

4248

nomeroff_net/pipelines/number_plate_classification_trt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,6 @@ def forward(self, inputs: Any, **forward_parameters: Dict) -> Any:
2424
model_outputs = []
2525
for inp in inputs:
2626
model_output = self.detector.forward([inp])
27-
model_output = unzip(model_output)
27+
model_output = unzip(model_output)[0]
2828
model_outputs.append(model_output)
2929
return model_outputs

nomeroff_net/pipelines/number_plate_localization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def preprocess(self, inputs: Any, **preprocess_parameters: Dict) -> Any:
4141
@no_grad()
4242
def forward(self, images: Any, **forward_parameters: Dict) -> Any:
4343
model_outputs = self.detector.predict(images)
44-
return unzip([model_outputs, images])
44+
return unzip([images, model_outputs])
4545

4646
def postprocess(self, inputs: Any, **postprocess_parameters: Dict) -> Any:
4747
return inputs

nomeroff_net/pipelines/number_plate_localization_trt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def preprocess(self, inputs: Any, **preprocess_parameters: Dict) -> Any:
3636
@no_grad()
3737
def forward(self, images: Any, **forward_parameters: Dict) -> Any:
3838
detected_images_bboxs = self.detector.predict(images)
39-
return unzip([images, detected_images_bboxs])
39+
return unzip([detected_images_bboxs, images])
4040

4141
def postprocess(self, inputs: Any, **postprocess_parameters: Dict) -> Any:
4242
return inputs

nomeroff_net/pipelines/number_plate_text_reading_trt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,5 @@ def forward(self, inputs: Any, **forward_parameters: Dict) -> Any:
2727
model_inputs = self.detector.preprocess([image], [label], [line])
2828
model_output = self.detector.forward(model_inputs)
2929
model_output = self.detector.postprocess(model_output)
30-
model_outputs.append(model_output)
30+
model_outputs.append(model_output[0])
3131
return unzip([images, model_outputs, labels])

nomeroff_net/pipes/number_plate_classificators/options_detector_trt.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ def run_engine(self, input_image):
8080
self.height,
8181
self.width
8282
))
83+
input_image = np.array(input_image)
8384
# Allocate host and device buffers
8485
bindings = []
8586
outputs = []

nomeroff_net/pipes/number_plate_localizators/yolo_v5_detector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,4 +44,4 @@ def predict(self, imgs: List[np.ndarray], min_accuracy: float = 0.5) -> List:
4444
for item in img_item.to_dict(orient="records")
4545
if item["confidence"] > min_accuracy]
4646
for img_item in model_outputs.pandas().xyxy]
47-
return model_outputs
47+
return np.array(model_outputs)

0 commit comments

Comments
 (0)