Skip to content

Commit

Permalink
Merge pull request #14 from ziqinyeow/windows-exe
Browse files Browse the repository at this point in the history
feat: tested fastapi exe in windows
  • Loading branch information
ziqinyeow authored May 1, 2024
2 parents 9866919 + 1510246 commit 83fe4be
Show file tree
Hide file tree
Showing 9 changed files with 215 additions and 22 deletions.
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,8 @@ clean:
demo:
python demo.py

b:
windows:
pyinstaller -c -F --clean --hidden-import=cv2 --hidden-import=supervision --hidden-import=addict --hidden-import=chex --hidden-import=lap --hidden-import=optax --hidden-import=einshape --hidden-import=haiku --hidden-import=mediapy --name sidecar --specpath dist --distpath dist examples/fastapi-pyinstaller/server.py

mac:
pyinstaller -c -F --clean --name sidecar --specpath dist --distpath dist examples/fastapi-pyinstaller/server.py
7 changes: 6 additions & 1 deletion examples/fastapi-pyinstaller/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,14 @@
git clone https://github.com/ziqinyeow/juxtapose
cd juxtapose
pip install .
pip install uninstall juxtapose
pip install uninstall juxtapose ultralytics yapf
pip install pyinstaller fastapi uvicorn[standard] python-multipart juxtematics

# mac
pyinstaller -c -F --clean --name sidecar --specpath dist --distpath dist examples/fastapi-pyinstaller/server.py

# windows
pyinstaller -c -F --clean --hidden-import=cv2 --hidden-import=supervision --hidden-import=addict --hidden-import=chex --hidden-import=lap --hidden-import=optax --hidden-import=einshape --hidden-import=haiku --hidden-import=mediapy --name sidecar --specpath dist --distpath dist examples/fastapi-pyinstaller/server.py
```

## How to run the exe
Expand Down
3 changes: 2 additions & 1 deletion examples/fastapi-pyinstaller/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
fastapi
uvicorn[standard]
python-multipart
python-multipart
juxtematics
5 changes: 3 additions & 2 deletions examples/fastapi-pyinstaller/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@
from juxtematics.constants import BODY_JOINTS_MAP
from fastapi.middleware.cors import CORSMiddleware
from tempfile import NamedTemporaryFile
import onnxruntime as ort

importing_time = time.time()

port = 8000
port = 1421

app = FastAPI(title="Juxt API", docs_url="/api/docs", openapi_url="/api/openapi.json")

Expand All @@ -41,7 +42,7 @@

@app.get("/")
def ok():
return {"status": "ok"}
return {"status": "ok", "gpu": ort.get_device() == "GPU"}


@app.get("/dir")
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "poetry.masonry.api"

[tool.poetry]
name = "juxtapose"
version = "0.0.24"
version = "0.0.25"
description = ""
authors = ["Zi Qin <ziqinyeow@gmail.com>"]
license = "MIT"
Expand Down
2 changes: 1 addition & 1 deletion src/juxtapose/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.0.24"
__version__ = "0.0.25"

from .rtm import RTM
from .detectors import RTMDet, YOLOv8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,132 @@
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_

# from timm.models.layers import DropPath, to_2tuple, trunc_normal_

from juxtapose.detectors.groundingdino.util.misc import NestedTensor

from itertools import repeat
import collections.abc
import math
import warnings


def drop_path(
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor


class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""

def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep

def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)

def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"


# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return tuple(x)
return tuple(repeat(x, n))

return parse


to_2tuple = _ntuple(2)


def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0

if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)

# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)

# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)

# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()

# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)

# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor


def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b)


class Mlp(nn.Module):
"""Multilayer perceptron."""
Expand Down Expand Up @@ -424,9 +546,9 @@ def __init__(
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
drop_path=(
drop_path[i] if isinstance(drop_path, list) else drop_path
),
norm_layer=norm_layer,
)
for i in range(depth)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,47 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath

# from timm.models.layers import DropPath


def drop_path(
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor


class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""

def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep

def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)

def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"


class FeatureResizer(nn.Module):
Expand Down Expand Up @@ -127,7 +167,11 @@ def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None):
self._reset_parameters()

def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)

def _reset_parameters(self):
nn.init.xavier_uniform_(self.v_proj.weight)
Expand Down Expand Up @@ -171,7 +215,9 @@ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
value_l_states = value_l_states.view(*proj_shape)

src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
attn_weights = torch.bmm(
query_states, key_states.transpose(1, 2)
) # bs*nhead, nimg, ntxt

if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
Expand All @@ -191,7 +237,9 @@ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
) # Do not increase 50000, data type half has quite limited range

attn_weights_T = attn_weights.transpose(1, 2)
attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0]
attn_weights_l = (
attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0]
)
if self.clamp_min_for_underflow:
attn_weights_l = torch.clamp(
attn_weights_l, min=-50000
Expand All @@ -204,7 +252,9 @@ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
# mask vison for language
if attention_mask_v is not None:
attention_mask_v = (
attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
attention_mask_v[:, None, None, :]
.repeat(1, self.num_heads, 1, 1)
.flatten(0, 1)
)
attn_weights_l.masked_fill_(attention_mask_v, float("-inf"))

Expand All @@ -213,7 +263,9 @@ def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
# mask language for vision
if attention_mask_l is not None:
attention_mask_l = (
attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
attention_mask_l[:, None, None, :]
.repeat(1, self.num_heads, 1, 1)
.flatten(0, 1)
)
attn_weights.masked_fill_(attention_mask_l, float("-inf"))
attn_weights_v = attn_weights.softmax(dim=-1)
Expand Down Expand Up @@ -275,13 +327,21 @@ def __init__(
self.layer_norm_v = nn.LayerNorm(v_dim)
self.layer_norm_l = nn.LayerNorm(l_dim)
self.attn = BiMultiHeadAttention(
v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
v_dim=v_dim,
l_dim=l_dim,
embed_dim=embed_dim,
num_heads=num_heads,
dropout=dropout,
)

# add layer scale for training stability
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
self.gamma_v = nn.Parameter(
init_values * torch.ones((v_dim)), requires_grad=True
)
self.gamma_l = nn.Parameter(
init_values * torch.ones((l_dim)), requires_grad=True
)

def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
v = self.layer_norm_v(v)
Expand Down
5 changes: 3 additions & 2 deletions src/juxtapose/rtm.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import supervision as sv

from typing import List, Union, Generator, Literal
import onnxruntime as ort

import torch

Expand Down Expand Up @@ -62,11 +63,11 @@ def __init__(
det: DETECTOR_TYPES = "rtmdet-m",
pose: POSE_ESTIMATOR_TYPES = "rtmpose-m",
tracker: TRACKER_TYPES = "bytetrack",
device: DEVICE_TYPES = "cuda" if torch.cuda.is_available() else "cpu",
device: DEVICE_TYPES = "cuda" if ort.get_device() == 'GPU' else "cpu",
annotator=Annotator(),
captions="person .",
) -> None:
if device == "cuda" and not torch.cuda.is_available():
if device == "cuda" and not ort.get_device() == "GPU":
LOGGER.info(f"Auto switch to CPU, as you are running without CUDA")
device = "cpu"

Expand Down

0 comments on commit 83fe4be

Please sign in to comment.