Skip to content

Commit

Permalink
experiment files kitti processing
Browse files Browse the repository at this point in the history
  • Loading branch information
SVivdich02 committed May 15, 2024
1 parent 7c80b21 commit 66ed043
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 165 deletions.
20 changes: 6 additions & 14 deletions main_calc_metrics_by_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,11 @@
import csv


def calculate_metrics(exec_id):
def calculate_metrics(file_name):
values_pres = []
values_recall = []
values_fScore = []
with open(
"experiment_{}_sem_voxel_offset0_T0l02_50.csv".format(exec_id), "r"
) as file:
with open(file_name, "r") as file:
reader = csv.DictReader(file)
for row in reader:
values_pres.append(row["precision"])
Expand Down Expand Up @@ -58,31 +56,25 @@ def calculate_metrics(exec_id):
if float(value) == 0.0:
fscore0 += 1

print("start exec_id={}".format(exec_id))
print(
"precision={}, 1={}, 0={}".format(
sum_pres/float(len(values_pres)), pres1, pres0
sum_pres/float(len(values_pres)), pres1/float(len(values_pres)), pres0/float(len(values_pres))
)
)
print(
"recall={}, 1={}, 0={}".format(
sum_recall/float(len(values_recall)), recall1, recall0
sum_recall/float(len(values_recall)), recall1/float(len(values_recall)), recall0/float(len(values_recall))
)
)
print(
"fscore={}, 1={}, 0={}".format(
sum_fScore/float(len(values_fScore)), fscore1, fscore0
sum_fScore/float(len(values_fScore)), fscore1/float(len(values_fScore)), fscore0/float(len(values_fScore))
)
)
print("finish exec_id={}".format(exec_id))


def main():
calculate_metrics(1)
calculate_metrics(2)
calculate_metrics(3)
calculate_metrics(4)
calculate_metrics(5)
calculate_metrics("experiment_2_a5b5_sem_voxel_offset0_T0l025_50.csv")


if __name__ == "__main__":
Expand Down
86 changes: 11 additions & 75 deletions main_kitti_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,15 +166,10 @@ def segment_pcds(config):
def process_kitti(
from_num,
to_num,
id_exec,
alpha_physical_distance,
beta_instance_distance,
T_normalized_cut,
):

reduce_detail_int_to_union_threshold = 0.5
reduce_detail_int_to_mask_threshold = 0.6

current_from_num = from_num

while current_from_num < to_num:
Expand All @@ -189,8 +184,8 @@ def process_kitti(
"alpha_physical_distance": alpha_physical_distance,
"beta_instance_distance": beta_instance_distance,
"T_normalized_cut": T_normalized_cut,
"reduce_detail_int_to_union_threshold": reduce_detail_int_to_union_threshold,
"reduce_detail_int_to_mask_threshold": reduce_detail_int_to_mask_threshold,
"reduce_detail_int_to_union_threshold": 0.5,
"reduce_detail_int_to_mask_threshold": 0.6,
"cam_name": "cam2",
"R": 18,
"nb_neighbors": 25,
Expand All @@ -201,8 +196,8 @@ def process_kitti(

result_tuple = segment_pcds(config)

file_name = "experiment_{}_sem_voxel_offset0_T0l02/start{}_end{}.pickle".format(
id_exec, config.start_index, config.end_index
file_name = "experiment_2_a5b5_sem_voxel_offset0_T0l025/start{}_end{}.pickle".format(
config.start_index, config.end_index
)
new_file = open(file_name, "w")
new_file.close()
Expand All @@ -215,79 +210,20 @@ def process_kitti(


def main():
start_pcd_num = 1500
start_pcd_num = 0
end_pcd_num = 4540
T_normalized_cut_common = 0.02

exec_id_1 = 1
alpha_physical_distance_1 = 5
beta_instance_distance_1 = 3
print("start exec_id={}".format(exec_id_1))
process_kitti(
start_pcd_num,
end_pcd_num,
exec_id_1,
alpha_physical_distance_1,
beta_instance_distance_1,
T_normalized_cut_common,
)
print("finish exec_id={}".format(exec_id_1))

exec_id_2 = 2
alpha_physical_distance_2 = 5
beta_instance_distance_2 = 5
print("start exec_id={}".format(exec_id_2))
process_kitti(
start_pcd_num,
end_pcd_num,
exec_id_2,
alpha_physical_distance_2,
beta_instance_distance_2,
T_normalized_cut_common,
)
print("finish exec_id={}".format(exec_id_2))

exec_id_3 = 3
alpha_physical_distance_3 = 3
beta_instance_distance_3 = 5
print("start exec_id={}".format(exec_id_3))
process_kitti(
start_pcd_num,
end_pcd_num,
exec_id_3,
alpha_physical_distance_3,
beta_instance_distance_3,
T_normalized_cut_common,
)
print("finish exec_id={}".format(exec_id_3))

exec_id_4 = 4
alpha_physical_distance_4 = 3
beta_instance_distance_4 = 3
print("start exec_id={}".format(exec_id_4))
process_kitti(
start_pcd_num,
end_pcd_num,
exec_id_4,
alpha_physical_distance_4,
beta_instance_distance_4,
T_normalized_cut_common
)
print("finish exec_id={}".format(exec_id_4))
alpha_physical_distance = 5
beta_instance_distance = 5
T_normalized_cut = 0.025

exec_id_5 = 5
alpha_physical_distance_5 = 7
beta_instance_distance_5 = 7
print("start exec_id={}".format(exec_id_5))
process_kitti(
start_pcd_num,
end_pcd_num,
exec_id_5,
alpha_physical_distance_5,
beta_instance_distance_5,
T_normalized_cut_common,
alpha_physical_distance,
beta_instance_distance,
T_normalized_cut,
)
print("finish exec_id={}".format(exec_id_5))


if __name__ == "__main__":
Expand Down
147 changes: 71 additions & 76 deletions main_kitti_processing_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,94 +59,89 @@ def build_pred_inst_array(
def main():

from_num = 0
to_num = 1500
to_num = 4540

execution_ids = [1, 2, 3, 4, 5]
instance_thresholds = [50]
instance_thresholds = [5, 20, 30, 50]

for execution_id in execution_ids:
for instance_threshold in instance_thresholds:
print("Start to process instance_threshold={}".format(instance_threshold))

for instance_threshold in instance_thresholds:
print("Start to process instance_threshold={}".format(instance_threshold))
current_from_num = from_num

current_from_num = from_num
skipped = 0
while current_from_num < to_num:
start_index = current_from_num
end_index = start_index + 4

skipped = 0
while current_from_num < to_num:
start_index = current_from_num
end_index = start_index + 4

file_name = (
"experiment_{}_sem_voxel_offset0_T0l02/start{}_end{}.pickle".format(
execution_id, start_index, end_index
)
file_name = (
"experiment_2_a5b5_sem_voxel_offset0_T0l025/start{}_end{}.pickle".format(
start_index, end_index
)
)

with open(file_name, "rb") as file:
data = pickle.load(file)

trace = data[4]["trace_graphcut"]
clusters = data[5]["clusters_graphcut"]
inst_label_array_for_clustering = data[6]["inst_label_array_for_clustering"]

if (
inst_label_array_for_clustering.sum() == 0
): # в облаке нет инстансов => пропускаем
skipped += 1
print(
"start_index={}, end_index={} skip".format(
start_index, end_index
)
with open(file_name, "rb") as file:
data = pickle.load(file)

trace = data[4]["trace_graphcut"]
clusters = data[5]["clusters_graphcut"]
inst_label_array_for_clustering = data[6]["inst_label_array_for_clustering"]

if (
inst_label_array_for_clustering.sum() == 0
): # в облаке нет инстансов => пропускаем
skipped += 1
print(
"start_index={}, end_index={} skip".format(
start_index, end_index
)
current_from_num = end_index
continue

pred_inst_array = build_pred_inst_array(
copy.deepcopy(inst_label_array_for_clustering),
clusters,
copy.deepcopy(trace),
instance_threshold,
)
current_from_num = end_index
continue

pred_labels = pred_inst_array
gt_labels = inst_label_array_for_clustering
tp_condition = "iou"
precision_res = precision(pred_labels, gt_labels, tp_condition)
recall_res = recall(pred_labels, gt_labels, tp_condition)
fScore_res = fScore(pred_labels, gt_labels, tp_condition)

gt_labels_unique = set(gt_labels)
gt_labels_unique.discard(0)

pred_labels_unique = set(pred_labels)
pred_labels_unique.discard(0)

with open(
"experiment_{}_sem_voxel_offset0_T0l02_{}.csv".format(
execution_id, instance_threshold
),
"a",
newline="",
) as file:
writer = csv.writer(file)

writer.writerow(
[
str(start_index),
str(end_index),
str(precision_res),
str(recall_res),
str(fScore_res),
len(gt_labels_unique),
len(pred_labels_unique),
len(clusters),
]
)
pred_inst_array = build_pred_inst_array(
copy.deepcopy(inst_label_array_for_clustering),
clusters,
copy.deepcopy(trace),
instance_threshold,
)

current_from_num = end_index
pred_labels = pred_inst_array
gt_labels = inst_label_array_for_clustering
tp_condition = "iou"
precision_res = precision(pred_labels, gt_labels, tp_condition)
recall_res = recall(pred_labels, gt_labels, tp_condition)
fScore_res = fScore(pred_labels, gt_labels, tp_condition)

gt_labels_unique = set(gt_labels)
gt_labels_unique.discard(0)

pred_labels_unique = set(pred_labels)
pred_labels_unique.discard(0)

with open(
"experiment_2_a5b5_sem_voxel_offset0_T0l025_{}.csv".format(instance_threshold),
"a",
newline="",
) as file:
writer = csv.writer(file)

writer.writerow(
[
str(start_index),
str(end_index),
str(precision_res),
str(recall_res),
str(fScore_res),
len(gt_labels_unique),
len(pred_labels_unique),
len(clusters),
]
)

current_from_num = end_index

print(skipped)
print("Finish to process instance_threshold={}".format(instance_threshold))
print(skipped)
print("Finish to process instance_threshold={}".format(instance_threshold))


if __name__ == "__main__":
Expand Down

0 comments on commit 66ed043

Please sign in to comment.