Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions models/common_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def iou_per_class(preds, labels, num_classes=3):
ious.append(iou)
return ious

def calculate_metrics(all_preds, all_targets, num_classes, total_pixels, correct_pixels):
def calculate_metrics(all_preds, all_targets, num_classes, total_pixels, correct_pixels, mean_uncertainty=None):
"""
Calculate validation metrics from predictions and targets.

Expand Down Expand Up @@ -175,6 +175,8 @@ def calculate_metrics(all_preds, all_targets, num_classes, total_pixels, correct
metrics = {
"pixel_accuracy": pixel_accuracy,
}
if mean_uncertainty is not None:
metrics["mean_uncertainty"] = mean_uncertainty
avg_iou = 0
for cls in range(num_classes):
metrics[f"iou_{cls}"] = iou[cls]
Expand All @@ -193,14 +195,17 @@ def calculate_metrics(all_preds, all_targets, num_classes, total_pixels, correct
# metrics["confusion_matrix"] = cm.flatten().tolist()

# Print results
print(f"\nPixel Accuracy: {pixel_accuracy:.4f}, mIoU: {avg_iou}")
if mean_uncertainty is not None:
print(f"\nPixel Accuracy: {pixel_accuracy:.4f}, mIoU: {avg_iou:.4f}, Mean Uncertainty: {mean_uncertainty:.4f}")
else:
print(f"\nPixel Accuracy: {pixel_accuracy:.4f}, mIoU: {avg_iou:.4f}")
print(f"{'Class':<6} {'IoU':>6} {'Precision':>10} {'Recall':>8} {'F1':>6}")
for cls in range(num_classes):
print(f"{cls:<6} {iou[cls]:>6.3f} {precision[cls]:>10.3f} {recall[cls]:>8.3f} {f1[cls]:>6.3f}")

return metrics

def validate_all(model, val_loader, params_dict):
def validate_all(model, val_loader, params_dict, mean_uncertainty=None):
"""
Validation function for single-head U-Net models.
Expects data loader to return (x, y) format.
Expand Down Expand Up @@ -257,7 +262,7 @@ def validate_all(model, val_loader, params_dict):
avg_loss = total_loss / total_batches

# Calculate metrics
metrics = calculate_metrics(all_preds, all_targets, params_dict["num_classes"], total_pixels, correct_pixels)
metrics = calculate_metrics(all_preds, all_targets, params_dict["num_classes"], total_pixels, correct_pixels,mean_uncertainty)
if "loss" in params_dict:
metrics["val_loss"] = avg_loss
metrics["neg_val_loss"] = -avg_loss
Expand Down
19 changes: 17 additions & 2 deletions models/model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
import argparse
from tqdm import tqdm

def single_pass_uncertainty(logits):
probs = torch.softmax(logits, dim=1)
entropy = -(probs * torch.log(probs + 1e-8)).sum(dim=1)
return entropy

def save_inference_images(ibatch, save_inference_dir, results, inputs, outputs, preds, targets, batch_size, test_df, save_logits, num_classes):
if isinstance(inputs, list):
Expand Down Expand Up @@ -105,6 +109,8 @@ def evaluate_on_test_set(
print(f"Save Inference to: {save_inference_dir}")

with torch.no_grad():
total_uncertainty = 0.0
total_pixels = 0
results=[]
for i, (inputs, targets) in enumerate(tqdm(test_loader, desc="Inference Progress")):

Expand All @@ -113,11 +119,18 @@ def evaluate_on_test_set(
inputs = inputs.to(device)
outputs = model(inputs)
'''
if isinstance(outputs, tuple):
logits = outputs[0]
else:
logits = outputs

uncertainty = single_pass_uncertainty(logits).cpu()
total_uncertainty += uncertainty.sum().item()
total_pixels += uncertainty.numel()
if isinstance(outputs, tuple):
preds = torch.argmax(outputs[0], dim=1).cpu()
else:
preds = torch.argmax(outputs, dim=1).cpu()

targets = targets.cpu()
all_preds.append(preds)
all_targets.append(targets)
Expand All @@ -134,7 +147,9 @@ def evaluate_on_test_set(

print(f"Saved inference results to {csv_path}")

metrics = validate_all(model, test_loader, params_dict)
if total_pixels > 0:
mean_uncertainty = total_uncertainty / total_pixels
metrics = validate_all(model, test_loader, params_dict,mean_uncertainty)

if wandbrun:
wandbrun.log(metrics)
Expand Down