Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code Refactor for Speed and Readability #9

Merged
merged 2 commits into from
Jun 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 16 additions & 18 deletions detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,10 @@
targets_path = "utils/targets_c60.mat"

parser = argparse.ArgumentParser()
parser.add_argument("-image_folder", type=str, default="./1047.tif", help="path to images")
parser.add_argument("-output_folder", type=str, default="./output", help="path to outputs")
# Get data configuration
if platform == "darwin": # macos
parser.add_argument("-image_folder", type=str, default="./1047.tif", help="path to images")
parser.add_argument("-output_folder", type=str, default="./output", help="path to outputs")
cuda = torch.cuda.is_available()
else: # gcp
parser.add_argument("-image_folder", type=str, default="./1047.tif", help="path to images")
parser.add_argument("-output_folder", type=str, default="./output", help="path to outputs")
cuda = False

cuda = torch.cuda.is_available() if platform == "darwin" else False
# python3 detect.py -plot_flag 1
parser.add_argument("-plot_flag", type=bool, default=True)
parser.add_argument("-secondary_classifier", type=bool, default=False)
Expand All @@ -34,9 +28,9 @@

def detect(opt):
if opt.plot_flag:
os.system("rm -rf " + opt.output_folder + "_img")
os.makedirs(opt.output_folder + "_img", exist_ok=True)
os.system("rm -rf " + opt.output_folder)
os.system(f"rm -rf {opt.output_folder}_img")
os.makedirs(f"{opt.output_folder}_img", exist_ok=True)
os.system(f"rm -rf {opt.output_folder}")
os.makedirs(opt.output_folder, exist_ok=True)
device = torch.device("cuda:0" if cuda else "cpu")

Expand Down Expand Up @@ -138,7 +132,7 @@ def detect(opt):
# pred[:, 1] += y1
# preds.append(pred.unsqueeze(0))

if len(preds) > 0:
if preds:
detections = non_max_suppression(
torch.cat(preds, 1), opt.conf_thres, opt.nms_thres, mat_priors, img, model2, device
)
Expand All @@ -151,7 +145,7 @@ def detect(opt):
# Bounding-box colors
color_list = [[random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)] for _ in range(len(classes))]

if len(img_detections) == 0:
if not img_detections:
return

# Iterate through images and save plot of detections
Expand All @@ -175,10 +169,10 @@ def detect(opt):

# write results to .txt file
results_path = os.path.join(opt.output_folder, path.split("/")[-1])
if os.path.isfile(results_path + ".txt"):
os.remove(results_path + ".txt")
if os.path.isfile(f"{results_path}.txt"):
os.remove(f"{results_path}.txt")

results_img_path = os.path.join(opt.output_folder + "_img", path.split("/")[-1])
results_img_path = os.path.join(f"{opt.output_folder}_img", path.split("/")[-1])
with open(results_path.replace(".bmp", ".tif") + ".txt", "a") as file:
for i in unique_classes:
n = (detections[:, -1].cpu() == i).sum()
Expand Down Expand Up @@ -212,7 +206,11 @@ def detect(opt):
if opt.plot_flag:
from scoring import score

score.score(opt.output_folder + "/", "/Users/glennjocher/Downloads/DATA/xview/xView_train.geojson", ".")
score.score(
f"{opt.output_folder}/",
"/Users/glennjocher/Downloads/DATA/xview/xView_train.geojson",
".",
)


class ConvNetb(nn.Module):
Expand Down
6 changes: 3 additions & 3 deletions models.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def create_modules(module_defs):

elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[layer_i] for layer_i in layers])
filters = sum(output_filters[layer_i] for layer_i in layers)
modules.add_module("route_%d" % i, EmptyLayer())

elif module_def["type"] == "shortcut":
Expand Down Expand Up @@ -81,7 +81,7 @@ class YOLOLayer(nn.Module):
def __init__(self, anchors, nC, img_dim, anchor_idxs):
super(YOLOLayer, self).__init__()

anchors = [(a_w, a_h) for a_w, a_h in anchors] # (pixels)
anchors = list(anchors)
nA = len(anchors)

self.anchors = anchors
Expand Down Expand Up @@ -161,7 +161,7 @@ def forward(self, p, targets=None, requestPrecision=False, weight=None, epoch=No

# Mask outputs to ignore non-existing objects (but keep confidence predictions)
nM = mask.sum().float()
nGT = sum([len(x) for x in targets])
nGT = sum(len(x) for x in targets)
if nM > 0:
# wC = weight[torch.argmax(tcls, 1)] # weight class
# wC /= sum(wC)
Expand Down
92 changes: 44 additions & 48 deletions utils/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@
class ImageFolder: # for eval-only
def __init__(self, path, batch_size=1, img_size=416):
if os.path.isdir(path):
self.files = sorted(glob.glob("%s/*.*" % path))
self.files = sorted(glob.glob(f"{path}/*.*"))
elif os.path.isfile(path):
self.files = [path]

self.nF = len(self.files) # number of image files
self.nB = math.ceil(self.nF / batch_size) # number of batches
self.batch_size = batch_size
self.height = img_size
assert self.nF > 0, "No images found in path %s" % path
assert self.nF > 0, f"No images found in path {path}"

# RGB normalization values
self.rgb_mean = np.array([60.134, 49.697, 40.746], dtype=np.float32).reshape((3, 1, 1))
Expand Down Expand Up @@ -57,11 +57,11 @@ def __len__(self):
class ListDataset: # for training
def __init__(self, path, batch_size=1, img_size=608, targets_path=""):
self.path = path
self.files = sorted(glob.glob("%s/*.bmp" % path))
self.files = sorted(glob.glob(f"{path}/*.bmp"))
self.nF = len(self.files) # number of image files
self.nB = math.ceil(self.nF / batch_size) # number of batches
self.batch_size = batch_size
assert self.nB > 0, "No images found in path %s" % path
assert self.nB > 0, f"No images found in path {path}"
self.height = img_size
# load targets
self.mat = scipy.io.loadmat(targets_path)
Expand Down Expand Up @@ -109,7 +109,7 @@ def __next__(self):

img_all = []
labels_all = []
for index, files_index in enumerate(range(ia, ib)):
for files_index in range(ia, ib):
# img_path = self.files[self.shuffled_vector[files_index]] # BGR
img_path = "%s/%g.bmp" % (self.path, self.shuffled_vector[files_index])
# img_path = '/Users/glennjocher/Downloads/DATA/xview/train_images/2294.bmp'
Expand Down Expand Up @@ -414,60 +414,56 @@ def random_affine(
imw = cv2.warpPerspective(
img, M, dsize=(height, height), flags=cv2.INTER_LINEAR, borderValue=borderValue
) # BGR order (YUV-equalized BGR means)
# borderValue = [40.746, 49.697, 60.134]) # RGB

# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 1:5].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])

# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)

# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T

# reject warped points outside of image
np.clip(xy, 0, height, out=xy)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / area0 > 0.1) & (ar < 10)

targets = targets[i]
targets[:, 1:5] = xy[i]

return imw, targets, M
else:
if targets is None:
return imw
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 1:5].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])

# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)

# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T

# reject warped points outside of image
np.clip(xy, 0, height, out=xy)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / area0 > 0.1) & (ar < 10)

targets = targets[i]
targets[:, 1:5] = xy[i]

return imw, targets, M


def convert_tif2bmp(p="/Users/glennjocher/Downloads/DATA/xview/val_images_bmp"):
import glob

import cv2

files = sorted(glob.glob("%s/*.tif" % p))
files = sorted(glob.glob(f"{p}/*.tif"))
for i, f in enumerate(files):
print("%g/%g" % (i + 1, len(files)))

img = cv2.imread(f)

cv2.imwrite(f.replace(".tif", ".bmp"), img)
os.system("rm -rf " + f)
os.system(f"rm -rf {f}")
Loading