diff --git a/.gitignore b/.gitignore index bec4f9f..af03091 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,10 @@ .DS_Store /Data/fsoco_segmentation_train /Segmentation/Yolact_minimal +/DepthEstimation/TestData +/DepthEstimation/Lite-Mono +/DepthEstimation/output +/DepthEstimation/output_best +/DepthEstimation/weights /SampleData/driverless.mp4 *.txt diff --git a/DepthEstimation/README.md b/DepthEstimation/README.md new file mode 100644 index 0000000..d01f7d4 --- /dev/null +++ b/DepthEstimation/README.md @@ -0,0 +1,17 @@ +# Lite Mono archive +The model was deemed 'not good enough' to continue use. It is too slow, and just not good enough at large depths +Clone +``` +git@github.com:noahzn/Lite-Mono.git +``` +into this folder and run +``` +python main.py +``` +in order to get depth estimation + +A good test video source for /TestData/ is +``` +https://www.youtube.com/watch?v=o5vES5QaeiQ +``` + diff --git a/DepthEstimation/main.py b/DepthEstimation/main.py new file mode 100644 index 0000000..53f7675 --- /dev/null +++ b/DepthEstimation/main.py @@ -0,0 +1,171 @@ +import os +import sys +import argparse +import numpy as np +import cv2 +import torch +from torchvision import transforms +import matplotlib.cm as cm +import matplotlib.colors as colors +from PIL import Image + +sys.path.append('./Lite-Mono') +import networks +from layers import disp_to_depth + +MODEL_CONFIGS = { + 'tiny': { + 'name': 'lite-mono-tiny', + 'folder': 'lite-mono-tiny_640x192', + 'description': 'Lightest/fastest (2.2M params, 640x192)' + }, + 'best': { + 'name': 'lite-mono-8m', + 'folder': 'lite-mono-8m_1024x320', + 'description': 'Best quality/heaviest (8.7M params, 1024x320)' + } +} + + +class DepthEstimator: + def __init__(self, weightsFolder, model="lite-mono-tiny", useCuda=True): + self.device = torch.device("cuda" if torch.cuda.is_available() and useCuda else "cpu") + + encoderPath = os.path.join(weightsFolder, "encoder.pth") + decoderPath = os.path.join(weightsFolder, "depth.pth") + + encoderDict = torch.load(encoderPath, map_location=self.device) + decoderDict = torch.load(decoderPath, map_location=self.device) + + self.feedHeight = encoderDict['height'] + self.feedWidth = encoderDict['width'] + + print(f"Loading {model} model ({self.feedWidth}x{self.feedHeight}) on {self.device}") + + self.encoder = networks.LiteMono(model=model, height=self.feedHeight, width=self.feedWidth) + modelDict = self.encoder.state_dict() + self.encoder.load_state_dict({k: v for k, v in encoderDict.items() if k in modelDict}) + self.encoder.to(self.device) + self.encoder.eval() + + self.depthDecoder = networks.DepthDecoder(self.encoder.num_ch_enc, scales=range(3)) + depthModelDict = self.depthDecoder.state_dict() + self.depthDecoder.load_state_dict({k: v for k, v in decoderDict.items() if k in depthModelDict}) + self.depthDecoder.to(self.device) + self.depthDecoder.eval() + + self.toTensor = transforms.ToTensor() + + def processFrame(self, frame): + originalHeight, originalWidth = frame.shape[:2] + + frameRgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + framePil = Image.fromarray(frameRgb) + frameResized = framePil.resize((self.feedWidth, self.feedHeight), Image.LANCZOS) + + inputTensor = self.toTensor(frameResized).unsqueeze(0).to(self.device) + + with torch.no_grad(): + features = self.encoder(inputTensor) + outputs = self.depthDecoder(features) + + disp = outputs[("disp", 0)] + dispResized = torch.nn.functional.interpolate( + disp, (originalHeight, originalWidth), mode="bilinear", align_corners=False) + + dispNp = dispResized.squeeze().cpu().numpy() + + vmax = np.percentile(dispNp, 95) + normalizer = colors.Normalize(vmin=dispNp.min(), vmax=vmax) + mapper = cm.ScalarMappable(norm=normalizer, cmap='magma') + colormapped = (mapper.to_rgba(dispNp)[:, :, :3] * 255).astype(np.uint8) + + return colormapped, dispNp + + def processVideo(self, videoPath, outputFolder="output", saveFrames=True, displayLive=False): + os.makedirs(outputFolder, exist_ok=True) + + cap = cv2.VideoCapture(videoPath) + if not cap.isOpened(): + raise ValueError(f"Cannot open video: {videoPath}") + + fps = cap.get(cv2.CAP_PROP_FPS) + frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + print(f"Processing video: {frameCount} frames at {fps:.2f} FPS ({width}x{height})") + + outputVideoPath = os.path.join(outputFolder, "depth_output.mp4") + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + outVideo = cv2.VideoWriter(outputVideoPath, fourcc, fps, (width, height)) + + frameIdx = 0 + while True: + ret, frame = cap.read() + if not ret: + break + + depthColor, depthRaw = self.processFrame(frame) + depthBgr = cv2.cvtColor(depthColor, cv2.COLOR_RGB2BGR) + + outVideo.write(depthBgr) + + if saveFrames and frameIdx % 30 == 0: + cv2.imwrite(os.path.join(outputFolder, f"frame_{frameIdx:06d}_depth.png"), depthBgr) + np.save(os.path.join(outputFolder, f"frame_{frameIdx:06d}_depth.npy"), depthRaw) + + if displayLive: + combined = np.hstack((frame, depthBgr)) + cv2.imshow('Original | Depth', combined) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + frameIdx += 1 + if frameIdx % 30 == 0: + print(f"Processed {frameIdx}/{frameCount} frames ({100*frameIdx/frameCount:.1f}%)") + + cap.release() + outVideo.release() + if displayLive: + cv2.destroyAllWindows() + + print(f"\nComplete! Processed {frameIdx} frames") + print(f"Output video: {outputVideoPath}") + return outputVideoPath + + +def main(): + parser = argparse.ArgumentParser(description='Lite-Mono Depth Estimation') + parser.add_argument('--model', type=str, default='tiny', choices=['tiny', 'best'], + help='Model to use: tiny (fastest) or best (highest quality)') + parser.add_argument('--video', type=str, default='./TestData/test1.mp4', + help='Path to input video') + parser.add_argument('--output', type=str, default=None, + help='Output folder (default: ./output_)') + parser.add_argument('--cuda', action='store_true', default=True, + help='Use CUDA if available') + + args = parser.parse_args() + + config = MODEL_CONFIGS[args.model] + weightsFolder = f"./weights/{config['folder']}" + outputFolder = args.output or f"./output_{args.model}" + + print(f"Using model: {config['description']}") + + if not os.path.exists(weightsFolder): + print(f"Error: Weights folder not found: {weightsFolder}") + print("Please download the model weights first") + return + + if not os.path.exists(args.video): + print(f"Error: Video not found: {args.video}") + return + + estimator = DepthEstimator(weightsFolder, model=config['name'], useCuda=args.cuda) + estimator.processVideo(args.video, outputFolder=outputFolder, saveFrames=True, displayLive=False) + + +if __name__ == '__main__': + main() diff --git a/PathPlanning/README.md b/PathPlanning/README.md new file mode 100644 index 0000000..8c97bf2 --- /dev/null +++ b/PathPlanning/README.md @@ -0,0 +1,336 @@ +# Path Planning Module + +This module implements real-time path planning for the UCSC FSAE driverless vehicle, generating optimal racing paths at 25 Hz. + +**Based on**: AMZ Driverless (ETH Zurich) boundary estimation algorithm +- **Paper**: "AMZ Driverless: The Full Autonomous Racing System" (Kabzan et al., 2019) +- **Competition Results**: 1st place FSG 2017, 2018, FSI 2018 +- **Performance**: 28.48s fastest lap, 1.5g lateral acceleration +- **Reference**: arXiv:1905.05150v1, Section 4.3 +- **GitHub**: https://github.com/AMZ-Driverless + +## Current Status (Dec 2025) + +**Overall Progress**: 3 of 5 core phases complete (60%) + +✅ **Completed**: +- Delaunay Triangulation with vectorized performance (0.8-2.5ms, 400-1250 Hz) +- Path Tree Population with breadth-first search and forward-arc filtering +- Path Smoothing with cubic splines and Formula Student constraint validation + +⏳ **Remaining**: +- Cost Function implementation (5 AMZ terms) +- Beam Search pruning for path selection +- Control Planning interface definition + +**Performance**: Currently well within 40ms budget. Delaunay + smoothing = ~5-12ms total. + +## Module Overview + +### Core Files + +1. **config.py** - Configuration and constants + - Track constraints (min width 3m, turning radius 4.5m) + - Performance parameters (25 Hz, beam width) + - Cost function weights (qc, qw, qs, qcolor, ql) + - Vehicle dynamics parameters + - Sensor range (~10m) + +2. **delaunay.py** - Delaunay Triangulation ✅ **[COMPLETE]** + - Uses `scipy.spatial.Delaunay` for triangulation (NOT CGAL) + - Creates triangulation from cone [x, y] positions + - Extracts midpoint graph for waypoints with adjacency connections + - **Important**: Handles duplicate midpoints (interior edges shared by 2 triangles) + - Vectorized implementation for performance (>100 Hz on test data) + - Multi-track visualization utilities (matplotlib) + - **AMZ Implementation**: Used CGAL library, we use scipy for simplicity + - **Performance**: 0.8-2.5ms for 10-30 cones (400-1250 Hz) + - **Status**: ✅ Complete + +3. **path_tree.py** - Path Tree Population (Breadth-First) ✅ **[COMPLETE]** + - Builds breadth-first tree of possible paths through midpoint graph + - `find_nearest_waypoints()`: Selects k nearest waypoints in forward arc (±90°) + - `get_path_tree()`: Grows tree from vehicle position with configurable depth + - Prevents loops by tracking visited waypoints in each path + - Forward-only constraint (no backwards waypoints) + - Limited tree depth to meet 40ms cycle time constraint + - Rich visualization showing paths, cones, vehicle, and heading + - **AMZ approach**: Breadth-first manner, fixed iteration limit + - **Status**: ✅ Complete + +4. **cost_functions.py** - Path Cost Evaluation (5 AMZ Terms) + + Implements the exact AMZ cost function with 5 weighted terms: + + - **Term 1**: Maximum angle change between path segments + - Rationale: Sharp corners use multiple cones, sudden angles unlikely + + - **Term 2**: Track width standard deviation (~3m expected) + - Rationale: Rules specify 3m min width, unlikely to change drastically + + - **Term 3**: Cone spacing standard deviation (~5m max expected) + - Rationale: Cones typically spaced evenly along boundaries + + - **Term 4**: Wrong color probability (blue=left, yellow=right) + - Rationale: Uses probabilistic color estimates from perception + - Becomes zero if no color information available + + - **Term 5**: Path length deviation from sensor range (~10m) + - Rationale: Penalize too short/long paths, prefer sensor range length + + **Cost Function Formula**: + ```python + cost = qc * normalize(max_angle_change)² + + qw * normalize(track_width_stddev)² + + qs * normalize(cone_spacing_stddev)² + + qcolor * normalize(wrong_color_prob)² + + ql * normalize(path_length_diff)² + ``` + + - Each term normalized, squared, and weighted + - Weights (qc, qw, qs, qcolor, ql) tuned experimentally + - **Status**: Not Started (Deadline: Dec 3) + +5. **beam_search.py** - Beam Search Pruning + - Prunes path tree to keep only top-k best paths at each level + - Prevents exponential growth of search tree + - Limits computational complexity for real-time performance + - Integrates cost function evaluation + - **AMZ result**: Only 4.2% of paths left track, mostly at >7m distance + - **Status**: Not Started (Deadline: Nov 29) + +6. **path_smoother.py** - Spline Interpolation ✅ **[COMPLETE]** + - `smooth_path()`: Smooths discrete waypoints using cubic splines (splprep/splev) + - Calculates curvature at each point: κ = (dx·d²y - dy·d²x) / (dx² + dy²)^1.5 + - Validates against Formula Student constraints (min 9m turning diameter) + - Multi-panel visualization: path with curvature coloring, curvature profile, turn radius + - Pass/fail indicators for each test track + - Test suite includes 6 track configurations (straight, curve, hairpin, slalom, oval, s-curve) + - **AMZ approach**: Feeds smoothed path to pure pursuit controller + - **Status**: ✅ Complete + +7. **path_planner.py** - Main Integration Module + - 25 Hz planning loop (40ms cycle time) + - Integrates all 5 phases of algorithm + - SLAM interface (input): receives cone positions at 25 Hz + - Control planning interface (output): sends path data at 25 Hz + - Real-time performance monitoring + - **AMZ timing**: 50ms in SLAM mode, 40ms in racing mode + - **Status**: Not Started (Deadline: Dec 6) + +## Algorithm Pipeline + +Based on AMZ Section 4.3: Boundary Estimation + +``` +SLAM -> Cones -> Delaunay -> Waypoints -> Path Tree -> Beam Search -> Best Path -> Spline Smooth -> Control + | | + +------------------------------------ 25 Hz Loop (40ms cycle time) ----------------------------------+ +``` + +### Phase 1: Graph Generation (Delaunay Triangulation) +- **Input**: Cone positions [x, y] from SLAM +- **Library**: scipy.spatial.Delaunay (AMZ uses CGAL) +- **Process**: + 1. Create Delaunay triangulation of all visible cones + 2. Triangles maximize minimum internal angles + 3. Extract midpoints of all triangle edges + 4. Handle duplicate midpoints from shared interior edges +- **Output**: Midpoint waypoint graph +- **AMZ approach**: Discretizes X-Y space into connected triangles + +### Phase 2: Tree Population (Breadth-First Search) +- **Input**: Waypoints + current vehicle position +- **Process**: + 1. Start from vehicle position + 2. Build tree by connecting to surrounding edge midpoints + 3. Each level represents one waypoint ahead + 4. Branches represent alternative route choices + 5. Limit depth by iteration count (computational budget) +- **Output**: Tree of all possible paths +- **AMZ approach**: Breadth-first manor, explores all possible center lines + +### Phase 3: Beam Search (Cost-Based Pruning) +- **Input**: Path tree from Phase 2 +- **Process**: + 1. Evaluate each path using 5-term cost function + 2. Normalize, square, and weight each cost term + 3. Keep only top-k paths with lowest cost at each level + 4. Discard worst paths (beam search pruning) +- **Output**: Top-k best paths +- **AMZ result**: 4.2% invalid paths at FSG 2018, mostly >7m away + +### Phase 4: Path Selection & Smoothing +- **Input**: Best path waypoints from Phase 3 +- **Process**: + 1. Select path with absolute lowest cost + 2. Fit spline through discrete waypoints + 3. Calculate curvature along path +- **Output**: Smooth continuous path with curvature +- **AMZ approach**: Passes to pure pursuit controller + +### Phase 5: Control Interface +- **Input**: Smooth path + curvatures +- **Process**: + 1. Calculate velocity profile (optional, may be control team's job) + 2. Package waypoints, velocities, curvatures + 3. Send to control planning at 25 Hz +- **Output**: Path data for control planning +- **Coordinate with**: Ray Zou, Nathan Margolis (Control Planning team) + +## Running Tests + +Each module has standalone tests in its `if __name__ == "__main__"` block: + +```bash +# Test Delaunay triangulation ✅ +python PathPlanning/delaunay.py + +# Test path tree ✅ +python PathPlanning/path_tree.py + +# Test path smoother ✅ +python PathPlanning/path_smoother.py + +# Test cost functions (not yet implemented) +python PathPlanning/cost_functions.py + +# Test beam search (not yet implemented) +python PathPlanning/beam_search.py + +# Test full integrated system (not yet implemented) +python PathPlanning/path_planner.py +``` + +### Test Outputs + +**delaunay.py**: +- Prints performance benchmarks for 5 test tracks (cones, time, Hz) +- Shows multi-panel visualization of triangulation + waypoint graph +- Validates vectorized implementation performance + +**path_tree.py**: +- Generates all possible paths from vehicle position through waypoint graph +- Visualizes up to 10 sample paths with different colors/styles +- Shows vehicle position, heading arrow, cones, and available waypoints +- Forward-arc filtering demonstration (±90° from vehicle heading) + +**path_smoother.py**: +- Generates 6 test tracks (straight, simple curve, hairpin, slalom, oval, s-curve) +- Shows 3-panel visualization for each track: + - Path with curvature color coding (red = high curvature) + - Curvature profile vs. path progress + - Turn radius with Formula Student compliance zones +- Prints pass/fail status for min 9m turning diameter constraint + +## Performance Requirements + +- **Frequency**: 25 Hz (40ms cycle time) +- **Hardware**: Raspberry Pi (limited CPU/memory) +- **Real-time**: Must complete before next SLAM update +- **Robustness**: Handle noisy monocular vision data +- **AMZ benchmark**: 50ms cycle time in SLAM mode, achieved 1.5g lateral acceleration + +### Computational Budget (Current Performance) + +- Delaunay triangulation: ✅ 0.8-2.5ms for 10-30 cones (400-1250 Hz) +- Tree building: TBD (depends on max_depth and k_start parameters) +- Cost evaluation: ⏳ Not yet implemented +- Beam search: ⏳ Not yet implemented +- Spline smoothing: ✅ ~5-10ms (acceptable for 25 Hz) +- **Total target**: <40ms on Raspberry Pi +- **Current progress**: 3/5 phases complete, well within budget so far + +## Key Implementation Differences from AMZ + +| Aspect | AMZ (ETH Zurich) | Our Implementation | +|--------|------------------|-------------------| +| Triangulation Library | CGAL (C++) | scipy.spatial.Delaunay (Python) | +| Update Frequency | 50ms (20 Hz) SLAM mode | 40ms (25 Hz) target | +| Hardware | Industrial PC + GPU | Raspberry Pi (CPU only) | +| Sensors | LiDAR + Stereo Camera | Monocular Camera only | +| Programming Language | C++ / ROS | Python / ROS | +| Sensor Range | ~10m (LiDAR) | ~10m (monocular depth estimation) | + +**Key Challenge**: Monocular vision provides noisier/sparser cone detections than LiDAR, so our cost function needs to be more robust. + +## Integration Points + +### Input from Localization/SLAM +- **Team**: Parker Costa, Hansika Nerusa, Chanchal Mukeshsingh +- **Format**: TBD - coordinate on cone position data structure + - Need: [x, y] positions in global frame + - Need: Color probabilities [p_blue, p_yellow, p_orange, p_unknown] + - Need: Position uncertainties (covariance matrices) +- **Update rate**: 25 Hz + +### Output to Control Planning +- **Team**: Ray Zou, Nathan Margolis +- **Format**: TBD - coordinate on path/velocity data structure + - Provide: Waypoint coordinates [x, y] array + - Provide: Desired velocities at each waypoint (optional) + - Provide: Curvature at each waypoint + - Provide: Path confidence/quality metric +- **Update rate**: 25 Hz + +## Development Timeline + +| Task | Deadline | Status | +|------|----------|--------| +| Delaunay Triangulation (scipy) | Nov 8 | ✅ **COMPLETE** | +| Test & Visualize | Nov 15 | ✅ **COMPLETE** | +| Path Tree Population | Nov 22 | ✅ **COMPLETE** | +| Path Smoothing (scipy splines) | Dec 3 | ✅ **COMPLETE** | +| Beam Search Pruning | Nov 29 | ⏳ **NOT STARTED** | +| Cost Functions (5 AMZ terms) | Dec 3 | ⏳ **NOT STARTED** | +| Control Interface | Dec 6 | ⏳ **NOT STARTED** | + +## Dependencies + +```python +import numpy as np +from scipy.spatial import Delaunay # Delaunay triangulation +from scipy.interpolate import splprep, splev, CubicSpline # Path smoothing +import matplotlib.pyplot as plt # Visualization +``` + +No CGAL required! Everything uses standard scipy/numpy. + +## AMZ Cost Function Tuning Guide + +The 5 cost weights need experimental tuning on your specific tracks: + +```python +# In config.py - starting values (tune these!) +qc = 1.0 # Angle change weight +qw = 1.0 # Track width variance weight +qs = 1.0 # Cone spacing variance weight +qcolor = 2.0 # Color mismatch weight (higher = trust colors more) +ql = 0.5 # Path length deviation weight +``` + +**Tuning strategy**: +1. Start with all weights = 1.0 +2. Test on simple_track and oval_track patterns +3. Visualize which paths are selected +4. Increase weights for terms that should be more important +5. qcolor should be high if perception gives good colors, low if noisy +6. AMZ used squared and normalized costs, so relative weights matter more than absolute + +## Team + +- Nathan Yee +- Suhani Agarwal +- Aedan Benavides + +## References + +**Primary**: +- Kabzan et al. "AMZ Driverless: The Full Autonomous Racing System", 2019 +- Section 4.3: Boundary Estimation (our algorithm) +- Section 5: MPC Control (Control Planning team's reference) + +**Additional**: +- Delaunay, B. "Sur la sphère vide", 1934 (original Delaunay paper) +- scipy.spatial.Delaunay documentation +- Formula Student Germany Rules (track specifications) diff --git a/PathPlanning/beam_search.py b/PathPlanning/beam_search.py new file mode 100644 index 0000000..2741bab --- /dev/null +++ b/PathPlanning/beam_search.py @@ -0,0 +1,9 @@ +""" +Beam Search Path Pruning +Deadline: Nov 29, 2025 +""" + +import numpy as np +from typing import List +from path_tree import PathNode +import config diff --git a/PathPlanning/config.py b/PathPlanning/config.py new file mode 100644 index 0000000..7142bcd --- /dev/null +++ b/PathPlanning/config.py @@ -0,0 +1,45 @@ +""" +Path Planning Configuration +Contains all constants and parameters for the path planning system +""" + +# Performance Requirements +PLANNING_FREQUENCY_HZ = 25 # Hz - must compute new path 25 times/sec +CYCLE_TIME_MS = 40 # ms - max computation time (1000/25) + +# Track Constraints (from Formula Student Driverless rules) +MIN_TRACK_WIDTH = 3.0 # meters (DD.2.2.2.c) +MIN_TURNING_DIAMETER = 9.0 # meters (DD.2.2.2.d) +MIN_TURNING_RADIUS = MIN_TURNING_DIAMETER / 2 # 4.5 meters +MAX_STRAIGHT_LENGTH = 80.0 # meters (DD.2.2.2.a) +LAP_LENGTH_MIN = 200.0 # meters (DD.2.2.2.e) +LAP_LENGTH_MAX = 500.0 # meters (DD.2.2.2.e) + +# Cone Colors (from DD.1.1) +CONE_COLOR_BLUE = 0 # Left track border +CONE_COLOR_YELLOW = 1 # Right track border +CONE_COLOR_ORANGE_SMALL = 2 # Entry/exit lanes +CONE_COLOR_ORANGE_LARGE = 3 # Start/finish lines + +# Beam Search Parameters +BEAM_WIDTH = 10 # Number of best paths to keep at each tree level +MAX_TREE_DEPTH = 15 # Maximum number of waypoints to look ahead + +# Cost Function Weights +WEIGHT_BOUNDARY_VIOLATION = 1000.0 # Heavily penalize leaving track +WEIGHT_TURNING_RADIUS = 100.0 # Penalize sharp turns +WEIGHT_CURVATURE_CHANGE = 50.0 # Prefer smooth paths +WEIGHT_DISTANCE = 1.0 # Prefer shorter paths +WEIGHT_CENTER_LINE = 10.0 # Prefer staying near center of track + +# Path Smoothing Parameters +SPLINE_DEGREE = 3 # Cubic spline +SPLINE_SMOOTHING_FACTOR = 0.1 # Lower = smoother, higher = closer to waypoints + +# Vehicle Dynamics (estimated - coordinate with Control Planning team) +VEHICLE_MAX_SPEED = 20.0 # m/s (72 km/h - from competitor data) +VEHICLE_MAX_ACCELERATION = 15.0 # m/s^2 (0-100 km/h in ~2s) + +# Noise/Robustness Parameters +MAX_CONE_POSITION_ERROR = 0.5 # meters - expected SLAM noise from monocular vision +MIN_CONES_FOR_VALID_PATH = 4 # Minimum cones needed to plan a path diff --git a/PathPlanning/cost_functions.py b/PathPlanning/cost_functions.py new file mode 100644 index 0000000..66ef316 --- /dev/null +++ b/PathPlanning/cost_functions.py @@ -0,0 +1,8 @@ +""" +Cost Function Heuristics for Path Planning +Deadline: Dec 3, 2025 +""" + +import numpy as np +from typing import List, Tuple +import config diff --git a/PathPlanning/delaunay.py b/PathPlanning/delaunay.py new file mode 100644 index 0000000..8d7343d --- /dev/null +++ b/PathPlanning/delaunay.py @@ -0,0 +1,135 @@ +from scipy.spatial import Delaunay +from collections import defaultdict +import numpy as np +import matplotlib.pyplot as plt +import time + +def get_midpoints(cones): + # Get a midpoint graph from the Delaunay Triangulation + points = np.array(cones) + tri = Delaunay(points) + waypoint_graph = defaultdict(set) + + # Vectorize this for speedup instead of using a loop + # Get coordinates of each point + p1 = points[tri.simplices[:,0]] + p2 = points[tri.simplices[:,1]] + p3 = points[tri.simplices[:,2]] + + # Compute midpoints of each edge and store + wayp1p2 = ((p1 + p2) / 2) + wayp1p3 = ((p1 + p3) / 2) + wayp2p3 = ((p2 + p3) / 2) + + for i in range(len(wayp1p2)): + wp1, wp2, wp3 = wayp1p2[i], wayp1p3[i], wayp2p3[i] + # Convert to tuples (hashable for dict keys) + wp1_tuple = tuple(np.round(wp1, decimals=6)) + wp2_tuple = tuple(np.round(wp2, decimals=6)) + wp3_tuple = tuple(np.round(wp3, decimals=6)) + # Each waypoint connects to the other 2 in the triangle + waypoint_graph[wp1_tuple].add(wp2_tuple) + waypoint_graph[wp1_tuple].add(wp3_tuple) + waypoint_graph[wp2_tuple].add(wp1_tuple) + waypoint_graph[wp2_tuple].add(wp3_tuple) + waypoint_graph[wp3_tuple].add(wp1_tuple) + waypoint_graph[wp3_tuple].add(wp2_tuple) + + # Stack all the waypoint arrays into one vertically + waypoints = np.unique(np.vstack([wayp1p2, wayp1p3, wayp2p3]), axis=0) + + return waypoints, waypoint_graph, tri + +def visualize(test_cases): + # Visualize one or more tracks with triangulation and waypoint graph + # test_cases: list of (name, cone_data) tuples + + n = len(test_cases) + cols = min(n, 3) + rows = (n + cols - 1) // cols + fig, axes = plt.subplots(rows, cols, figsize=(6*cols, 5*rows), squeeze=False) + axes = axes.flatten() + + for i, (name, cone_data) in enumerate(test_cases): + ax = axes[i] + points = np.array(cone_data) + waypoints, waypoint_graph, tri = get_midpoints(cone_data) + + # Plot triangulation and cones + ax.triplot(points[:, 0], points[:, 1], tri.simplices, 'b-', linewidth=0.5) + ax.plot(points[:, 0], points[:, 1], 'ro', markersize=6, label="Cones") + ax.plot(waypoints[:, 0], waypoints[:, 1], 'go', markersize=4, label="Waypoints") + + # Draw waypoint graph connections + for wp, neighbors in waypoint_graph.items(): + for neighbor in neighbors: + ax.plot([wp[0], neighbor[0]], [wp[1], neighbor[1]], 'g-', linewidth=0.8, alpha=0.4) + + ax.set_title(f'{name} ({len(points)} cones)') + ax.legend(fontsize=8) + ax.grid(True, alpha=0.3) + ax.set_aspect('equal', adjustable='box') + + # Hide unused subplots + for j in range(n, len(axes)): + axes[j].set_visible(False) + + plt.tight_layout() + plt.show() + +if __name__ == "__main__": + # Test data option 1: Simple track-like pattern + simple_track = [ + [0, 0], [1, 0], [2, 0.5], [3, 1], [4, 1.5], + [4, -0.5], [3, -1], [2, -1.5], [1, -1], [0, -0.5] + ] + + # Test data option 2: Oval/loop track (more realistic) + oval_track = [ + # Outer boundary + [0, 0], [2, -1], [4, -1.5], [6, -1.5], [8, -1], [10, 0], + [10, 2], [8, 3], [6, 3.5], [4, 3.5], [2, 3], [0, 2], + # Inner boundary + [2, 0.5], [4, 0], [6, 0], [8, 0.5], [8, 1.5], [6, 2], + [4, 2], [2, 1.5] + ] + + # Test data option 3: Slalom/chicane pattern + slalom = [ + [0, 0], [1, 1], [2, -1], [3, 1.5], [4, -1.5], + [5, 2], [6, -2], [7, 2.5], [8, -2.5], [9, 0], + [0, -0.5], [1, -2], [2, 1], [3, -2.5], [4, 2], + [5, -3], [6, 2.5], [7, -3.5], [8, 3], [9, 0.5] + ] + + # Test data option 4: Grid pattern (stress test) + grid = [] + for x in range(0, 10, 2): + for y in range(-4, 5, 2): + grid.append([x, y]) + + # Test data option 5: Random scattered (realistic chaos) + import random + random.seed(42) + random_scatter = [[random.uniform(0, 10), random.uniform(-3, 3)] for _ in range(30)] + + # Choose which test data to use + test_cases = [ + ("Random 30", random_scatter), + ("Simple Track", simple_track), + ("Slalom", slalom), + ("Grid", grid), + ("Oval Track", oval_track) + ] + + for name, cone_data in test_cases: + points = np.array(cone_data) + + start = time.perf_counter() + waypoints, waypoint_graph, tri = get_midpoints(cone_data) + elapsed = time.perf_counter() - start + + print(f"{name:15} | Cones: {len(points):3} | Time: {elapsed*1000:6.2f} ms | Hz: {1/elapsed:6.1f}") + + # Visualize all test tracks + visualize(test_cases) diff --git a/PathPlanning/path_planner.py b/PathPlanning/path_planner.py new file mode 100644 index 0000000..07c1e09 --- /dev/null +++ b/PathPlanning/path_planner.py @@ -0,0 +1,10 @@ +import numpy as np +from delaunay import get_midpoints +from path_tree import get_path_tree +from cost_functions import +from beam_search import +from path_smoother import smooth_path +import config + +def test() : + return 1 \ No newline at end of file diff --git a/PathPlanning/path_smoother.py b/PathPlanning/path_smoother.py new file mode 100644 index 0000000..679cfcb --- /dev/null +++ b/PathPlanning/path_smoother.py @@ -0,0 +1,145 @@ +import numpy as np +from scipy.interpolate import splprep, splev +import matplotlib.pyplot as plt +import config + +def smooth_path(waypoints, num_points=100): + # u: parameter values of original waypoints + # that measure how far along the path each waypoint is + # given as a normalized value [0,1] + # tck: tuple(knots, coefficients, degree of spline) + tck, u = splprep([waypoints[:, 0], waypoints[:, 1]], s=0, k=3) + + # get num_points evenly spaced points + u_fine = np.linspace(0, 1, num_points) + + # get smoothed x,y coordinates and path + x_smooth, y_smooth = splev(u_fine, tck) + smooth_path = np.column_stack([x_smooth, y_smooth]) + + # get curvature (curvature = 1/radius) + dx_dt, dy_dt = splev(u_fine, tck, der=1) + d2x_dt2, d2y_dt2 = splev(u_fine, tck, der=2) + + curvature = (dx_dt * d2y_dt2 - dy_dt * d2x_dt2) / (dx_dt**2 + dy_dt**2)**1.5 + + return smooth_path, curvature + +def visualize(test_cases): + """ + Visualize smooth paths with curvature analysis. + + Args: + test_cases: List of (name, waypoints) tuples + """ + n = len(test_cases) + fig, axes = plt.subplots(n, 3, figsize=(18, 5*n), squeeze=False) + + for i, (name, waypoints) in enumerate(test_cases): + # Smooth the path + smooth, curvature = smooth_path(waypoints, num_points=200) + + # Calculate turn radius (clipped for visualization) + radius = np.zeros_like(curvature) + for j, k in enumerate(curvature): + radius[j] = 1.0 / abs(k) if abs(k) > 0.001 else 100.0 + radius = np.clip(radius, 0, 50) + + # Calculate path progress percentage + path_progress = np.linspace(0, 100, len(curvature)) + + # Plot 1: Path with curvature coloring + ax1 = axes[i, 0] + scatter = ax1.scatter(smooth[:, 0], smooth[:, 1], + c=np.abs(curvature), cmap='RdYlGn_r', + s=20, vmin=0, vmax=0.3) + ax1.plot(waypoints[:, 0], waypoints[:, 1], 'ko-', + markersize=8, linewidth=1, alpha=0.5, label='Input waypoints') + ax1.set_title(f'{name} - Smooth Path (colored by curvature)', fontweight='bold') + ax1.set_xlabel('X (m)') + ax1.set_ylabel('Y (m)') + ax1.axis('equal') + ax1.grid(True, alpha=0.3) + ax1.legend() + cbar = plt.colorbar(scatter, ax=ax1) + cbar.set_label('|Curvature| (1/m)') + + # Plot 2: Curvature profile + ax2 = axes[i, 1] + ax2.plot(path_progress, curvature, 'b-', linewidth=2) + ax2.axhline(y=0, color='k', linestyle='-', linewidth=0.5) + ax2.axhline(y=0.222, color='r', linestyle='--', linewidth=1.5, + label='Max allowed (9m diameter)') + ax2.axhline(y=-0.222, color='r', linestyle='--', linewidth=1.5) + ax2.fill_between(path_progress, -0.222, 0.222, alpha=0.2, color='green') + ax2.set_title('Curvature Profile', fontweight='bold') + ax2.set_xlabel('Path Progress (%)') + ax2.set_ylabel('Curvature (1/m)') + ax2.grid(True, alpha=0.3) + ax2.legend() + + # Plot 3: Turn radius + ax3 = axes[i, 2] + ax3.plot(path_progress, radius, 'g-', linewidth=2) + ax3.fill_between(path_progress, radius, 50, alpha=0.3, color='green') + ax3.axhline(y=4.5, color='r', linestyle='--', linewidth=1.5, + label='Min allowed (4.5m)') + ax3.fill_between(path_progress, 0, 4.5, alpha=0.2, color='red') + ax3.set_title('Turn Radius', fontweight='bold') + ax3.set_xlabel('Path Progress (%)') + ax3.set_ylabel('Radius (m)') + ax3.set_ylim([0, 50]) + ax3.grid(True, alpha=0.3) + ax3.legend() + + # Print statistics + max_curv = np.max(np.abs(curvature)) + min_radius = 1.0 / max_curv if max_curv > 0 else float('inf') + status = "✓ PASS" if max_curv <= 0.222 else "✗ FAIL" + print(f"{name:20s} | Max curvature: {max_curv:.4f} | Min radius: {min_radius:.2f}m | {status}") + + plt.tight_layout() + plt.show() + +if __name__ == "__main__": + # Test track configurations + straight_line = np.array([ + [0, 0], [5, 0], [10, 0], [15, 0], [20, 0] + ]) + + simple_curve = np.array([ + [0, 0], [5, 0], [10, 2], [15, 5], [20, 5] + ]) + + hairpin = np.array([ + [0, 0], [10, 0], [15, 5], [15, 10], + [10, 15], [5, 15], [0, 10], [0, 5] + ]) + + slalom = np.array([ + [0, 0], [5, 2], [10, 0], [15, -2], + [20, 0], [25, 2], [30, 0] + ]) + + oval_track = np.array([ + [0, 0], [10, 0], [20, 0], [25, 5], [25, 10], + [20, 15], [10, 15], [0, 15], [-5, 10], [-5, 5] + ]) + + s_curve = np.array([ + [0, 0], [5, 2], [10, 5], [15, 7], + [20, 7], [25, 5], [30, 2], [35, 0] + ]) + + # Test cases + test_cases = [ + ("Straight Line", straight_line), + ("Simple Curve", simple_curve), + ("Hairpin Turn", hairpin), + ("Slalom", slalom), + ("Oval Track", oval_track), + ("S-Curve", s_curve) + ] + + # Visualize + visualize(test_cases) diff --git a/PathPlanning/path_tree.py b/PathPlanning/path_tree.py new file mode 100644 index 0000000..1aa1f9e --- /dev/null +++ b/PathPlanning/path_tree.py @@ -0,0 +1,190 @@ +""" +Path Tree Population Module +Deadline: Nov 22, 2025 +""" + +import numpy as np +import matplotlib.pyplot as plt +import random +import config +from delaunay import get_midpoints + +# Need vehicle_heading in RADIANS from localization +# Find k nearest waypoints in the forward arc (±90°) of the vehicle +def find_nearest_waypoints(vehicle_pos, vehicle_heading, waypoints, k): + # compute vector from vehicle position to each waypoint + vectors = waypoints - vehicle_pos + + # compute angles of each waypoint using arctan2 for 4 quadrant calc + angles = np.arctan2(vectors[:, 1], vectors[:, 0]) + angle_diff = angles - vehicle_heading + + # normalize angles to the range [-pi, pi] + angle_diff = np.arctan2(np.sin(angle_diff), np.cos(angle_diff)) + + # create boolean array to keep all waypoints that are in front of the car and apply filter + forward_mask = np.abs(angle_diff) < (np.pi / 2) + masked_waypoints = waypoints[forward_mask] + + # compute the magnitude of each vector using np.linalg.norm and get k shortest distances + distances = np.linalg.norm(masked_waypoints - vehicle_pos, axis=1) + k_indices = np.argsort(distances)[:k] + + # return the k nearest waypoints + k_waypoints = masked_waypoints[k_indices] + return [tuple(np.round(wp, decimals=6)) for wp in k_waypoints] + +# Generate path tree from cone positions (main API function) +def get_path_tree(cones, vehicle_pos, vehicle_heading, max_depth, k_start): + # get the possible waypoints + waypoints, waypoint_graph, tri = get_midpoints(cones) + + # get the k nearest waypoints + starting_waypoints = find_nearest_waypoints(vehicle_pos, vehicle_heading, waypoints, k_start) + + # Helper function to check if waypoint is forward of vehicle + def is_forward(waypoint): + # Check if waypoint is in forward half-plane from vehicle + vec = np.array(waypoint) - np.array(vehicle_pos) + # Gets vector in the direction of the vehicle + forward_dir = np.array([np.cos(vehicle_heading), np.sin(vehicle_heading)]) + # If the two vectors are in similar direction, their dot product will be > 0 + return np.dot(vec, forward_dir) > 0 + + current_level = [[wp] for wp in starting_waypoints] + for _ in range(max_depth): + next_level = [] + for path in current_level: + last = path[-1] + for next_wp in waypoint_graph.get(last, set()): + # Only add waypoints that are forward AND not already visited + if next_wp not in path and is_forward(next_wp): + next_level.append(path + [next_wp]) + + current_level = next_level + + return current_level + +# Visualize the paths with cones and vehicle position +def visualize_paths(paths, cones, vehicle_pos, vehicle_heading, max_paths=None): + + plt.figure(figsize=(14, 10)) + + # Convert to numpy array for easier plotting + cones = np.array(cones) + vehicle_pos = np.array(vehicle_pos) + + # Get all possible waypoints from Delaunay triangulation + waypoints, _, _ = get_midpoints(cones) + + # Filter to only forward-facing waypoints (±90° arc) + vectors = waypoints - vehicle_pos + angles = np.arctan2(vectors[:, 1], vectors[:, 0]) + angle_diff = angles - vehicle_heading + angle_diff = np.arctan2(np.sin(angle_diff), np.cos(angle_diff)) + forward_mask = np.abs(angle_diff) < (np.pi / 2) + masked_waypoints = waypoints[forward_mask] + + # Plot only masked waypoints (small but visible) + plt.scatter(masked_waypoints[:, 0], masked_waypoints[:, 1], c='lightgray', s=30, + marker='o', edgecolors='gray', linewidth=0.5, + label='Available Waypoints', zorder=2, alpha=0.7) + + # Plot cones + plt.scatter(cones[:, 0], cones[:, 1], c='red', s=150, + marker='o', edgecolors='darkred', linewidth=2, + label='Cones', zorder=5, alpha=0.8) + + # Plot vehicle position + plt.scatter(vehicle_pos[0], vehicle_pos[1], c='blue', s=500, + marker='*', edgecolors='black', linewidth=2, + label='Vehicle', zorder=10) + + # Plot vehicle heading arrow + arrow_length = 0.8 + dx = arrow_length * np.cos(vehicle_heading) + dy = arrow_length * np.sin(vehicle_heading) + plt.arrow(vehicle_pos[0], vehicle_pos[1], dx, dy, + head_width=0.25, head_length=0.2, fc='blue', ec='black', + linewidth=1.5, zorder=10) + + # Randomly sample paths to display if specified + if max_paths and len(paths) > max_paths: + paths_to_show = random.sample(paths, max_paths) + else: + paths_to_show = paths + + # Plot all paths with different colors and styles + colors = ['green', 'orange', 'purple', 'cyan', 'magenta', + 'lime', 'pink', 'gold', 'brown', 'navy'] + line_styles = ['-', '--', '-.', ':'] # Solid, dashed, dash-dot, dotted + + for i, path in enumerate(paths_to_show): + if len(path) == 0: + continue + + # Convert path to numpy array for plotting + path_array = np.array(path) + color = colors[i % len(colors)] + linestyle = line_styles[i % len(line_styles)] + + # Plot path as connected line with style variation + plt.plot(path_array[:, 0], path_array[:, 1], + color=color, linewidth=2.5, alpha=0.8, linestyle=linestyle, + marker='o', markersize=7, markeredgecolor='black', + markeredgewidth=0.8, zorder=3) + + # Add path number at the end of each path + end_point = path_array[-1] + plt.text(end_point[0], end_point[1], f' {i+1}', + fontsize=10, fontweight='bold', color=color, + bbox=dict(boxstyle='round,pad=0.3', facecolor='white', + edgecolor=color, alpha=0.8), zorder=15) + + # Highlight starting waypoint with larger marker + start_point = path_array[0] + plt.scatter(start_point[0], start_point[1], + c=color, s=200, marker='s', edgecolors='black', + linewidth=2, zorder=8, alpha=0.9) + + # Add path count to legend + if len(paths) > 0: + if max_paths and len(paths) > max_paths: + label_text = f'{len(paths_to_show)}/{len(paths)} paths shown' + else: + label_text = f'{len(paths)} paths generated' + plt.plot([], [], color='gray', linewidth=2.5, label=label_text) + + plt.xlabel('X position (m)', fontsize=13, fontweight='bold') + plt.ylabel('Y position (m)', fontsize=13, fontweight='bold') + plt.title('Path Planning Visualization', fontsize=16, fontweight='bold', pad=20) + plt.legend(loc='best', fontsize=11, framealpha=0.9) + plt.grid(True, alpha=0.4, linestyle='--') + plt.axis('equal') + plt.tight_layout() + plt.show() + + +if __name__ == "__main__": + + oval_track = [ + # Outer boundary + [0, 0], [2, -1], [4, -1.5], [6, -1.5], [8, -1], [10, 0], + [10, 2], [8, 3], [6, 3.5], [4, 3.5], [2, 3], [0, 2], + # Inner boundary + [2, 0.5], [4, 0], [6, 0], [8, 0.5], [8, 1.5], [6, 2], + [4, 2], [2, 1.5] + ] + + vehicle_pos = [3, -0.5] # On racing line between inner/outer cones + vehicle_heading = np.radians(0) # Heading straight down the track + + paths = get_path_tree( + cones=oval_track, + vehicle_pos=vehicle_pos, + vehicle_heading=vehicle_heading, + max_depth=4, + k_start=2 + ) + + visualize_paths(paths, oval_track, vehicle_pos, vehicle_heading, max_paths=10)