diff --git a/config/config_det.template.yaml b/config/config_det.template.yaml index a57e84f..f595362 100644 --- a/config/config_det.template.yaml +++ b/config/config_det.template.yaml @@ -1,11 +1,9 @@ -################################### ##### Detection by inference ##### ####### template ####### # Automatic detection of Mineral Extraction Sites (MES) in images # 1-Prepare the tiles geometry according to the AOI and zoom level prepare_data.py: - srs: EPSG:2056 # Projection of the input file datasets: shapefile: ./data/AoI/swissimage_footprint_SWISSIMAGE_YEAR.shp output_folder: ./output/det/SWISSIMAGE_YEAR @@ -13,12 +11,16 @@ prepare_data.py: # 2-Request tiles according to the provided AOI and tiles parameters generate_tilesets.py: - debug_mode: False + debug_mode: + enable: False # sample of tiles + nb_tiles_max: 5000 + working_directory: . datasets: - aoi_tiles_geojson: ./output/det/SWISSIMAGE_YEAR/tiles.geojson - orthophotos_web_service: - type: XYZ # supported values: 1. MIL = Map Image Layer 2. WMS 3. XYZ - url: https://wmts.geo.admin.ch/1.0.0/ch.swisstopo.swissimage-product/default/SWISSIMAGE_YEAR/3857/{z}/{x}/{y}.jpeg + aoi_tiles: ./output/det/SWISSIMAGE_YEAR/tiles.geojson + image_source: + type: XYZ # supported values: 1. MIL = Map Image Layer 2. WMS 3. XYZ 4. FOLDER + year: SWISSIMAGE_YEAR # supported values: 1. multi-year (tiles of different year), 2. (i.e. 2020) + location: https://wmts.geo.admin.ch/1.0.0/ch.swisstopo.swissimage-product/default/{year}/3857/{z}/{x}/{y}.jpeg output_folder: ./output/det/SWISSIMAGE_YEAR tile_size: 256 # per side, in pixels overwrite: True @@ -30,28 +32,28 @@ generate_tilesets.py: contributor: swisstopo url: https://swisstopo.ch license: - name: Unknown - url: + name: unknown + url: unknown + categories_file: ./data/category_ids.json -# 3-Perform the object detection based on the optimized trained model +# Object detection with the optimised trained model make_detections.py: - working_folder: ./output/det/SWISSIMAGE_YEAR + working_directory: ./output/det/SWISSIMAGE_YEAR log_subfolder: logs sample_tagged_img_subfolder: sample_tagged_images - COCO_files: # relative paths, w/ respect to the working_folder + COCO_files: # relative paths, w/ respect to the working_folder oth: COCO_oth.json - detectron2_config_file: ../../../config/detectron2_config_dqry.yaml # path relative to the working_folder + detectron2_config_file: ../../../config/detectron2_config.yaml # path relative to the working_folder model_weights: - pth_file: ../../../models/model_0002999.pth # trained model minimizing the validation loss curve, - # monitoring of the training process via tensorboard (tensorboard --logdir ) - # for the provided parameters model_0002999.pth is chosen - image_metadata_json: ./output/det/SWISSIMAGE_YEAR/img_metadata.json - rdp_simplification: # rdp = Ramer-Douglas-Peucker + pth_file: ../../../models/model_0002999.pth # trained model minimising the validation loss curve, monitor the training process via tensorboard (tensorboard --logdir ) + image_metadata_json: img_metadata.json + rdp_simplification: # rdp = Ramer-Douglas-Peucker enabled: True - epsilon: 2.0 # cf. https://rdp.readthedocs.io/en/latest/ + epsilon: 2.0 # cf. https://rdp.readthedocs.io/en/latest/ score_lower_threshold: 0.3 + remove_det_overlap: False # if several detections overlap (IoU > 0.5), only the one with the highest confidence score is retained -# 4-Filtering and merging detection polygons to improve results +# Filter and merge detection polygons to improve results filter_detections.py: year: SWISSIMAGE_YEAR detections: ./output/det/SWISSIMAGE_YEAR/oth_detections_at_0dot3_threshold.gpkg diff --git a/config/config_track.yaml b/config/config_track.yaml index fe1d230..1ac5aec 100644 --- a/config/config_track.yaml +++ b/config/config_track.yaml @@ -1,16 +1,15 @@ -################################################ ### Identifying and tracking MES over time ### # Track object in different datasets over years track_detections.py: - years: <[YEAR1, YEAR2, YEAR3,...]> # Provide a list of years used for detection + years: [YEAR1, YEAR2, YEAR3,...] # Provide a list of years used for detection datasets: detection: ./data/oth_detections_at_0dot3_threshold_year-{year}_score-0dot95_area-5000_elevation-1200_distance-10.geojson # Final detection file, produced by filter_detections.py output_folder: ./output/track/oth_detections_at_0dot3_threshold_score-0dot95_area-5000_elevation-1200_distance-10 # Plots plots.py: - object_id: <[ID_OBJECT1, ID_OBJECT2, ID_OBJECT3,...]> # Provide a list of id_object defined by track_detections.py + object_id: [ID_OBJECT1, ID_OBJECT2, ID_OBJECT3,...] # Provide a list of id_object defined by track_detections.py plots: ['area-year'] datasets: detection: ./output/track/oth_detections_at_0dot3_threshold_score-0dot95_area-5000_elevation-1200_distance-10/detections_years.geojson # Object tracking file, produced by track_detections.py diff --git a/config/config_trne.yaml b/config/config_trne.yaml index db63a25..b907efd 100644 --- a/config/config_trne.yaml +++ b/config/config_trne.yaml @@ -1,8 +1,7 @@ -############################################# ####### Model training and evaluation ####### # Training of automatic detection of Mineral Extraction Sites (MES) in images with a provided ground truth -# 1-Prepare the tiles geometry according to the AOI and zoom level +# Produce tile geometries based on the AoI extent and zoom level prepare_data.py: srs: EPSG:2056 datasets: @@ -12,21 +11,44 @@ prepare_data.py: # empty_tiles_year: # If "empty_tiles_aoi" selected then provide a year. Choice: (1) numeric (i.e. 2020), (2) [year1, year2] (random selection of a year within a given year range) # empty_tiles_shp: ./data/empty_tiles/ # Provided shapefile of selected empty tiles. Only one 'empty_tiles' option can be selected # category: # If it exists, indicate the attribute column name of the label class - output_folder: ./output/trne - zoom_level: 16 #z, keep between 15 and 18 + output_folder: ./output/trne/ + zoom_level: 16 # z, keep between 15 and 18 -# 2-Fetch of tiles (online server) and split into 3 datasets: train, test, validation +# Fetch of tiles (online server) and split into 3 datasets: train, test, validation generate_tilesets.py: - debug_mode: False # sample of tiles + debug_mode: + enable: False # sample of tiles + nb_tiles_max: 2000 + working_directory: . datasets: - aoi_tiles_geojson: ./output/trne/tiles.geojson - ground_truth_labels_geojson: ./output/trne/labels.geojson - orthophotos_web_service: - type: XYZ # supported values: 1. MIL = Map Image Layer 2. WMS 3. XYZ - url: https://wmts.geo.admin.ch/1.0.0/ch.swisstopo.swissimage-product/default/2020/3857/{z}/{x}/{y}.jpeg # Models are trained on 2020 (default year) SWISSIMAGE mosaic - output_folder: ./output/trne + aoi_tiles: output/trne/tiles.geojson + ground_truth_labels: output/trne/labels.geojson + # fp_labels: + # fp_shp: output_trne/FP.geojson + # frac_trn: 0.7 # fraction of fp tiles to add to the trn dataset, then the remaining tiles will be split in 2 and added to tst and val datasets + image_source: + # ############# + # type: FOLDER + # year: multi-year # Optional, supported values: 1. multi-year (tiles of different year), 2. (i.e. 2020) + # location: + # srs: "EPSG:3857" + # ############# + # type: WMS # supported values: 1. MIL = Map Image Layer 2. WMS 3. XYZ 4. FOLDER + # location: https://wms.geo.admin.ch/service + # layers: ch.swisstopo.swissimage + # srs: "EPSG:2056" + # ############ + type: XYZ # supported values: 1. MIL = Map Image Layer 2. WMS 3. XYZ 4. FOLDER + year: 2020 # supported values: 1. multi-year (tiles of different year), 2. (i.e. 2020) + location: https://wmts.geo.admin.ch/1.0.0/ch.swisstopo.swissimage-product/default/{year}/3857/{z}/{x}/{y}.jpeg + # ############ + # empty_tiles: # add empty tiles to datasets + # tiles_frac: 0.5 # fraction (relative to the number of tiles intersecting labels) of empty tiles to add + # frac_trn: 0.7 # fraction of empty tiles to add to the trn dataset, then the remaining tiles will be split in 2 and added to tst and val datasets + # keep_oth_tiles: False # keep tiles in oth dataset not intersecting oth labels + output_folder: output/trne tile_size: 256 # per side, in pixels - overwrite: False + overwrite: True n_jobs: 10 COCO_metadata: year: 2021 @@ -35,51 +57,53 @@ generate_tilesets.py: contributor: swisstopo url: https://swisstopo.ch license: - name: Unknown - url: + name: unknown + url: unknown -# 3-Train the model with the detectron2 algorithm -# Monitor the training process via tensorboard (tensorboard --logdir ). Choice of the optimized model: minimisation of the validation loss curve +# Train the model with the detectron2 algorithm train_model.py: - working_folder: ./output/trne + working_directory: ./output/trne/ log_subfolder: logs sample_tagged_img_subfolder: sample_tagged_images COCO_files: # relative paths, w/ respect to the working_folder trn: COCO_trn.json val: COCO_val.json tst: COCO_tst.json - detectron2_config_file: ../../config/detectron2_config_dqry.yaml # path relative to the working_folder + detectron2_config_file: ../../config/detectron2_config.yaml # path relative to the working_folder model_weights: model_zoo_checkpoint_url: COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml -# 4-Perform the object detection based on the optimized trained model +# Object detection with the optimised trained model make_detections.py: - working_folder: ./output/trne + working_directory: ./output/trne/ log_subfolder: logs sample_tagged_img_subfolder: sample_tagged_images - COCO_files: # relative paths, w/ respect to the working_folder + COCO_files: # relative paths, w/ respect to the working_folder trn: COCO_trn.json val: COCO_val.json tst: COCO_tst.json - detectron2_config_file: ../../config/detectron2_config_dqry.yaml # path relative to the working_folder + detectron2_config_file: ../../config/detectron2_config.yaml # path relative to the working_folder model_weights: - pth_file: ./logs/model_0002999.pth # trained model minimizing the validation loss curve, - # monitoring of the training process via tensorboard (tensorboard --logdir ) - # for the provided parameters model_0002999.pth is chosen - image_metadata_json: ./output/trne/img_metadata.json - rdp_simplification: # rdp = Ramer-Douglas-Peucker - enabled: true - epsilon: 2.0 # cf. https://rdp.readthedocs.io/en/latest/ + pth_file: ./logs/model_0002999.pth # trained model minimising the validation loss curve, monitor the training process via tensorboard (tensorboard --logdir ) + image_metadata_json: img_metadata.json + rdp_simplification: # rdp = Ramer-Douglas-Peucker + enabled: True + epsilon: 2.0 # cf. https://rdp.readthedocs.io/en/latest/ score_lower_threshold: 0.05 + remove_det_overlap: False # if several detections overlap (IoU > 0.5), only the one with the highest confidence score is retained -# 5-Evaluate the quality of the detection for the different datasets with metrics calculation +# Evaluate the detection quality for the different datasets by calculating metrics assess_detections.py: + working_directory: ./output/trne/ datasets: - ground_truth_labels_geojson: ./output/trne/labels.geojson - image_metadata_json: ./output/trne/img_metadata.json - split_aoi_tiles_geojson: ./output/trne/split_aoi_tiles.geojson # aoi = Area of Interest + ground_truth_labels: labels.geojson + split_aoi_tiles: split_aoi_tiles.geojson # aoi = Area of Interest + categories: category_ids.json detections: - trn: ./output/trne/trn_detections_at_0dot05_threshold.gpkg - val: ./output/trne/val_detections_at_0dot05_threshold.gpkg - tst: ./output/trne/tst_detections_at_0dot05_threshold.gpkg - output_folder: ./output/trne \ No newline at end of file + trn: trn_detections_at_0dot05_threshold.gpkg + val: val_detections_at_0dot05_threshold.gpkg + tst: tst_detections_at_0dot05_threshold.gpkg + output_folder: . + iou_threshold: 0.1 + area_threshold: 50 # area under which the polygons are discarded from assessment + metrics_method: macro-average # 1: macro-average ; 2: macro-weighted-average ; 3: micro-average \ No newline at end of file