Skip to content

Commit

Permalink
Merge pull request #104 from IGNF/upgrade-deps
Browse files Browse the repository at this point in the history
Upgrade dependencies and clean up code/packaging
  • Loading branch information
leavauchier authored Apr 15, 2024
2 parents 289fc15 + cc55d16 commit 057a134
Show file tree
Hide file tree
Showing 31 changed files with 392 additions and 267 deletions.
14 changes: 14 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
.hydra
.vscode
htmlcov

# Caches
**/__pycache__
.pytest_cache


# Distribution / packaging
build/
dist/
*.egg-info
*.egg
8 changes: 8 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -1,2 +1,10 @@
[flake8]
max-line-length = 99
show_source = True
format = pylint
extend-ignore = E203
exclude =
.git
__pycache__
data/*
logs/*
16 changes: 8 additions & 8 deletions .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,30 +85,30 @@ jobs:
- name: Get version number
id: tag
run: |
echo "::set-output name=version::$(docker run lidar_prod grep '__version__' package_metadata.yaml| cut -d\" -f2)"
echo "::set-output name=date::$(date '+%Y.%m.%d')"
echo "VERSION=$(python -m lidar_prod.version)" >> $GITHUB_ENV
echo "DATE=$(date '+%Y.%m.%d')" >> $GITHUB_ENV
# show possible tags, for debugging purpose
- name: Print tag
run: |
echo "${{steps.tag.outputs.version}}"
echo "${{steps.tag.outputs.date}}"
echo "${{env.VERSION}}"
echo "${{env.DATE}}"
- name: push main docker on nexus (tagged with a date)
# we push on nexus an image from the main branch when it has been updated (push or accepted pull request)
if: ((github.ref_name == 'main') && (github.event_name == 'push'))
run: |
docker tag lidar_prod $nexus_server/lidar_hd/lidar_prod:${{steps.tag.outputs.version}}-${{steps.tag.outputs.date}}
docker tag lidar_prod $nexus_server/lidar_hd/lidar_prod:${{ env.VERSION }}-${{ env.DATE }}
docker login $nexus_server --username svc_lidarhd --password ${{ secrets.PASSWORD_SVC_LIDARHD }}
docker push $nexus_server/lidar_hd/lidar_prod:${{steps.tag.outputs.version}}-${{steps.tag.outputs.date}}
docker push $nexus_server/lidar_hd/lidar_prod:${{ env.VERSION }}-${{ env.DATE }}
- name: push branch docker on nexus (tagged with the branch name)
# we push on nexus an image from a branch when it's pushed
if: ((github.event_name == 'push') && (github.ref_name != 'main'))
run: |
docker tag lidar_prod $nexus_server/lidar_hd/lidar_prod:${{steps.tag.outputs.version}}-${{github.ref_name}}
docker tag lidar_prod $nexus_server/lidar_hd/lidar_prod:${{ env.VERSION }}-${{github.ref_name}}
docker login $nexus_server --username svc_lidarhd --password ${{ secrets.PASSWORD_SVC_LIDARHD }}
docker push $nexus_server/lidar_hd/lidar_prod:${{steps.tag.outputs.version}}-${{github.ref_name}}
docker push $nexus_server/lidar_hd/lidar_prod:${{ env.VERSION }}-${{github.ref_name}}
- name: Clean dangling docker images
if: always() # always do it, even if something failed
Expand Down
35 changes: 8 additions & 27 deletions .github/workflows/gh-pages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,34 +23,15 @@ jobs:

# See https://github.com/conda-incubator/setup-miniconda#caching-environments

# Setup empty conda environment
- name: Setup a conda-incubator with an empty conda env
uses: conda-incubator/setup-miniconda@v2
# See https://github.com/marketplace/actions/setup-micromamba
- name: install
uses: mamba-org/setup-micromamba@v1.4.3
with:
python-version: 3.9.12
miniforge-variant: Mambaforge
miniforge-version: latest
use-mamba: true
# Environment to create and activate for next steps
activate-environment: lidar_prod

# Cache the env
# See https://github.com/conda-incubator/setup-miniconda#caching-environments
- name: Get Date
id: get-date
run: echo "::set-output name=today::$(/bin/date -u '+%Y%m%d')"
shell: bash

- name: Cache conda environment
uses: actions/cache@v2
with:
path: ${{ env.CONDA }}/envs
key: conda-${{ runner.os }}--${{ runner.arch }}--${{ steps.get-date.outputs.today }}-${{ hashFiles('setup_env/requirements.yml') }}-${{ hashFiles('setup_env/requirements.txt') }}
id: cache

- name: Update environment if there was no cached env.
run: mamba env update -n lidar_prod -f setup_env/requirements.yml
if: steps.cache.outputs.cache-hit != 'true'
environment-file: environment.yml
environment-name: lidar_prod # activate the environment
cache-environment: true
cache-downloads: true
generate-run-shell: true

- name: replace BD_UNI credentials
run: cp configs/bd_uni_connection_params/credentials_template.yaml configs/bd_uni_connection_params/credentials.yaml
Expand Down
2 changes: 0 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ repos:
rev: 24.3.0
hooks:
- id: black
args: [--line-length, "99"]

# python code analysis
- repo: https://github.com/PyCQA/flake8
Expand All @@ -28,7 +27,6 @@ repos:
rev: 5.13.2
hooks:
- id: isort
args: ["--profile", "black", --line-length, "99"]

# yaml formatting
- repo: https://github.com/pre-commit/mirrors-prettier
Expand Down
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# main

- Update pdal version to 2.6
- code and packaging cleanup

## 1.10.0
- Add support for EPSG reference other than 2154

Expand Down
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ RUN apt-get update \
# to use docker caching if requirements files were not updated.
# Dir needs to be "/tmp" for micromamba to find the pip requirements...
WORKDIR /tmp
COPY ./setup_env/ .
COPY environment.yml environment.yml

# install the python packages via anaconda
RUN micromamba create --file /tmp/requirements.yml
RUN micromamba create --file /tmp/environment.yml

# Sets the environment name (since it is not named "base")
# This ensures that env is activated when using "docker run ..."
Expand Down
23 changes: 13 additions & 10 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
import os
import sys

import yaml
from hydra.experimental import compose, initialize
import tomli
from hydra import compose, initialize
from omegaconf import OmegaConf

# from unittest import mock
Expand All @@ -24,15 +24,17 @@
abs_root_path = os.path.abspath(rel_root_path)
sys.path.insert(0, abs_root_path)

from lidar_prod.version import __version__ # noqa: E402

# -- Project information -----------------------------------------------------
with open(os.path.join(abs_root_path, "package_metadata.yaml"), "r") as f:
pm = yaml.safe_load(f)
with open(os.path.join(abs_root_path, "pyproject.toml"), "rb") as f:
data = tomli.load(f)

release = pm["__version__"]
project = pm["__name__"]
author = pm["__author__"]
copyright = pm["__copyright__"]

release = __version__
project = data["project"]["name"]
author = ", ".join([a["name"] for a in data["project"]["authors"]])
copyright = data["metadata"]["copyright"]

# -- YAML main to print the config into ---------------------------------------------------
# We need to concatenate configs into a single file using hydra
Expand All @@ -55,7 +57,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
needs_sphinx = "4.5"
needs_sphinx = "7.2"
extensions = [
"sphinx.ext.napoleon", # Supports google-style docstrings
"sphinx.ext.autodoc", # auto-generates doc fgrom docstrings
Expand All @@ -64,7 +66,8 @@
"sphinx.ext.githubpages", # creates .nojekyll file to publish the doc on GitHub Pages.
"myst_parser", # supports markdown syntax for doc pages, and link to markdown pages
"sphinx_paramlinks", # allow to reference params, which is done in pytorch_lightning
"sphinxnotes.mock", # ignore third-parties directive suche as "testcode" - see "mock_directive" args below
"sphinxnotes.mock", # ignore third-parties directive suche as "testcode" -
# see "mock_directive" args below
"sphinxcontrib.mermaid", # enable mermaid schema
]

Expand Down
23 changes: 20 additions & 3 deletions setup_env/requirements.yml → environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,37 @@ dependencies:
- black # code formatting
- isort # import sorting
- flake8 # code analysis
- rstcheck==3.3.*
# --------- geo --------- #
- conda-forge:python-pdal==3.2.*
- conda-forge:pdal==2.5.*
- conda-forge:pdal==2.6.*
- numpy
- scikit-learn
- geopandas
- pyproj
- laspy
# --------- others --------- #
- psycopg2 # database interaction
- jupyterlab # better jupyter notebooks
- pudb # debugger
- rich==11.2.* # rich text formatting
- pytest>=7.1.2 # tests
- pytest-cov==3.0.*
# --------- torch --------- #
# --------- documentation --------- #
- sphinx==7.2.*
- recommonmark==0.7.*
- sphinx_rtd_theme==2.0.*
- docutils==0.20.*
- pip:
- -r requirements.txt
# --------- postgis-toolkit --------- #
# - postgis-toolkit
# --------- hydra --------- #
- hydra-core==1.1.*
- hydra-colorlog==1.1.*
- optuna==2.10.*
# --------- Documentation --------- #
- myst_parser==2.0.*
- sphinx_paramlinks==0.6.*
- sphinxnotes-mock==1.0.*
- sphinx-argparse==0.4.*
- sphinxcontrib-mermaid==0.9.*
14 changes: 7 additions & 7 deletions lidar_prod/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,15 +121,15 @@ def apply_building_module(config: DictConfig, src_las_path: str, dest_las_path:
bd_uni_connection_params: BDUniConnectionParams = hydra.utils.instantiate(
config.bd_uni_connection_params
)

bv_cfg = config.building_validation.application
bv = BuildingValidator(
shp_path=config.building_validation.application.shp_path,
shp_path=bv_cfg.shp_path,
bd_uni_connection_params=bd_uni_connection_params,
cluster=config.building_validation.application.cluster,
bd_uni_request=config.building_validation.application.bd_uni_request,
data_format=config.building_validation.application.data_format,
thresholds=config.building_validation.application.thresholds,
use_final_classification_codes=config.building_validation.application.use_final_classification_codes,
cluster=bv_cfg.cluster,
bd_uni_request=bv_cfg.bd_uni_request,
data_format=bv_cfg.data_format,
thresholds=bv_cfg.thresholds,
use_final_classification_codes=bv_cfg.use_final_classification_codes,
)
bv.run(tmp_las_path)

Expand Down
4 changes: 3 additions & 1 deletion lidar_prod/optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@

from lidar_prod.commons import commons
from lidar_prod.tasks.basic_identification_optimization import BasicIdentifierOptimizer
from lidar_prod.tasks.building_validation_optimization import BuildingValidationOptimizer
from lidar_prod.tasks.building_validation_optimization import (
BuildingValidationOptimizer,
)
from lidar_prod.tasks.utils import BDUniConnectionParams

log = logging.getLogger(__name__)
Expand Down
59 changes: 40 additions & 19 deletions lidar_prod/tasks/basic_identification.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
"""

from __future__ import annotations # to recognize IoU as a type by itself (in __add__())
from __future__ import (
annotations, # to recognize IoU as a type by itself (in __add__())
)

import logging
from typing import Union
Expand Down Expand Up @@ -39,9 +41,11 @@ def __add__(self, other_iou: IoU):
)

def __str__(self):
return "IoU: {:0.3f} | true positive: {:,} | false negative: {:,} | false positive: {:,}".format(
self.iou, self.true_positive, self.false_negative, self.false_positive
s = (
f"IoU: {self.iou:0.3f} | true positive: {self.true_positive:,} | "
+ f"false negative: {self.false_negative:,} | false positive: {self.false_positive:,}"
)
return s

@staticmethod
def combine_iou(iou_list: list):
Expand Down Expand Up @@ -73,18 +77,21 @@ def __init__(
target_result_code: Union[int, list] = None,
) -> None:
"""
BasicIdentifier set all points with a value from a column above a threshold to another value in another column
BasicIdentifier set all points with a value from a column above a threshold to another
value in another column
args:
threshold: above the threshold, a point is set
proba_column: the column the treshold is compared against
result_column: the column to store the result
result_code: the value the point will be set to
evaluate_iou: True if we want to evaluate the IoU of that selection
target_column: if we want to evaluate the IoU, this is the column with the real results to compare againt
target_result_code: if we want to evaluate the IoU, this is/are the code(s) of the target.
Can be an int of a list of int, if we want an IoU but target_result_code
is not provided then result_code is used instead.
target_column: if we want to evaluate the IoU, this is the column with the real
results to compare against
target_result_code: if we want to evaluate the IoU, this is/are the code(s) of
the target. Can be an int of a list of int, if we want an IoU
but target_result_code is not provided then result_code is used
instead.
"""
self.threshold = threshold
self.proba_column = proba_column
Expand Down Expand Up @@ -115,20 +122,34 @@ def identify(self, las_data: laspy.lasdata.LasData) -> None:
self.iou = IoU.iou_by_mask(threshold_mask, target_mask)

# MONKEY PATCHING !!! for debugging
# if self.result_code == 1: # unclassified
# if self.result_code == 1: # unclassified
# truth_mask = las_data.points["classification"] == 1
# else: # vegetation
# else: # vegetation
# truth_mask = np.isin(las_data.points["classification"], [3, 4, 5])

# print("threshold_mask size: ", np.count_nonzero(threshold_mask), "truth_mask size: ", np.count_nonzero(truth_mask))

# self.result_code = self.result_code if self.result_code ==1 else 11
# print(
# "threshold_mask size: ",
# np.count_nonzero(threshold_mask),
# "truth_mask size: ",
# np.count_nonzero(truth_mask),
# )

# las_data.points[self.result_column][np.logical_and(truth_mask, threshold_mask)] = self.result_code # correct values
# las_data.points[self.result_column][np.logical_and(truth_mask, ~threshold_mask)] = self.result_code+1 # false positive
# las_data.points[self.result_column][np.logical_and(~truth_mask, threshold_mask)] = self.result_code+2 # false negative
# self.result_code = self.result_code if self.result_code == 1 else 11

# las_data.points[self.result_column][
# np.logical_and(truth_mask, threshold_mask)
# ] = self.result_code # correct values
# las_data.points[self.result_column][np.logical_and(truth_mask, ~threshold_mask)] = (
# self.result_code + 1
# ) # false positive
# las_data.points[self.result_column][np.logical_and(~truth_mask, threshold_mask)] = (
# self.result_code + 2
# ) # false negative
# print(
# "true positive: ", np.count_nonzero(np.logical_and(truth_mask, threshold_mask)),
# "False positive: ", np.count_nonzero(np.logical_and(truth_mask, ~threshold_mask)),
# "False negative: ", np.count_nonzero(np.logical_and(~truth_mask, threshold_mask))
# "true positive: ",
# np.count_nonzero(np.logical_and(truth_mask, threshold_mask)),
# "False positive: ",
# np.count_nonzero(np.logical_and(truth_mask, ~threshold_mask)),
# "False negative: ",
# np.count_nonzero(np.logical_and(~truth_mask, threshold_mask)),
# )
4 changes: 2 additions & 2 deletions lidar_prod/tasks/basic_identification_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def __init__(
target_column: the column with the target results to compare againt
n_trials: number of trials to get the best IoU
target_result_code: the code(s) defining the points with the target results.
Can be an int of a list of int, if we want an IoU but target_result_code
is not provided then result_code is used instead.
Can be an int of a list of int, if we want an IoU but
target_result_code is not provided then result_code is used instead.
"""

self.study = optuna.create_study(
Expand Down
Loading

0 comments on commit 057a134

Please sign in to comment.