diff --git a/src/raster/r.timeseries.locations/r.timeseries.locations.html b/src/raster/r.timeseries.locations/r.timeseries.locations.html
index 7571769a..e5e4b83b 100644
--- a/src/raster/r.timeseries.locations/r.timeseries.locations.html
+++ b/src/raster/r.timeseries.locations/r.timeseries.locations.html
@@ -4,6 +4,20 @@
DESCRIPTION
coherent sub-units based on a continuous raster map and break values for
subdivisions. It is written for locations in NVEs time series database.
+The tool supports the following methods to create subdivision maps:
+
+ - percentile: creates sub-units from the continuous_subdivision_map
+ using user-given percentiles for intervals.
+ - linear: creates sub-units from the continuous_subdivision_map
+ using linear scaled intervals.
+ - database: creates sub-units from intervals that are pre-defined in
+ the database using the continuous_subdivision_map.
+ - map_units: creates sub-units from the unique categories of the
+ continuous_subdivision_map. The continuous_subdivision_map
+ should thus contain integer values. Floating point values will be
+ rounded according to quantization rules in r.stats
+
+
EXAMPLES
Subdivide watersheds using altitude bands and breaks from DB
@@ -17,13 +31,26 @@ Subdivide watersheds using altitude bands and breaks from DB
--o --v
+Subdivide glacier lines using altitude pixels
+
+
+r.timeseries.locations locations_url="MSSQL:driver=FreeTDS;server=tst.sql.server.com;port=1433;database=testdatabase" \
+ where="parent_id IS NULL" locations=glacier_lines continuous_subdivision_map="DTM" \
+ round_to_closest=1 method=database domain_id=80 layer="dbo.region" type="line" \
+ locations_subunits="glacier_lines_altitude" nprocs=20 \
+ keepass_file="/hdata/fjernanalyse3/CopernicusUtviklingOgTest/data/actinia_secrets.kdbx" keepass_title=mssql \
+ --o --v
+
+
SEE ALSO
-v.in.ogr,
+r.mapcalc,
+r.stats,
r.to.vect,
+r.univar,
+v.in.ogr,
v.to.rast,
-r.mapcalc,
AUTHOR
diff --git a/src/raster/r.timeseries.locations/r.timeseries.locations.py b/src/raster/r.timeseries.locations/r.timeseries.locations.py
index a3359819..0afb4421 100644
--- a/src/raster/r.timeseries.locations/r.timeseries.locations.py
+++ b/src/raster/r.timeseries.locations/r.timeseries.locations.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python3
-"""
-MODULE: r.timeseries.locations
+"""MODULE: r.timeseries.locations
AUTHOR(S): Stefan Blumentrath
-PURPOSE: Manage locations for time series in NVE time series DB
-COPYRIGHT: (C) 2023 by Stefan Blumentrath
+PURPOSE: Manage locations for time series in NVE TimeSeries DB
+COPYRIGHT: (C) 2023-2025 by NVE, Stefan Blumentrath
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,7 +18,7 @@
"""
# %module
-# % description: Manage locations for time series in NVE time series DB
+# % description: Manage locations for time series in NVE TimeSeries DB
# % keyword: NVE
# % keyword: import
# % keyword: export
@@ -60,6 +59,9 @@
# % description: Layer name of the OGR data source to import
# %end
+# %option G_OPT_V_TYPE
+# %end
+
# %option G_OPT_DB_WHERE
# %end
@@ -74,7 +76,7 @@
# %option
# % key: method
-# % options: percentile,linear,database
+# % options: percentile,linear,database,map_units
# % required: no
# % multiple: no
# % description: Method for generating subunits
@@ -141,6 +143,7 @@
# % collective: locations_subunits,method,continuous_subdivision_map
# %end
+import json
import os
import sys
from functools import partial
@@ -150,18 +153,23 @@
import numpy as np
-def round_to_closest(x, y):
- """Round value x to closest y"""
+def round_to_closest(x: float | np.array, y: float) -> tuple:
+ """Round value x to closest y."""
if not y:
return x
return tuple(np.round(np.array(x).astype(float) / y, 0).astype(type(y)) * y)
def keepass_to_env(
- keepass_file, keepass_pwd, title, username_var, password_var, first=True
-):
- """Write KeePass entries into environment variables"""
-
+ keepass_file: str,
+ keepass_pwd: str,
+ title: str,
+ username_var: str,
+ password_var: str,
+ *,
+ first: bool = True,
+) -> None:
+ """Write KeePass entries into environment variables."""
kp = PyKeePass(keepass_file, password=keepass_pwd)
entry = kp.find_entries(title=title, first=first)
os.environ[username_var] = entry.username
@@ -169,17 +177,22 @@ def keepass_to_env(
def create_graph(
- raster_map, values, min_value="-Inf", max_value="Inf", epsilon=0.00000001
-):
- """Create a map calculator graph() function from a mapname and a list of range values
+ raster_map: str,
+ values: tuple[int],
+ min_value: float | str = "-Inf",
+ max_value: float | str = "Inf",
+ epsilon: float = 0.00000001,
+) -> str:
+ """Create a map calculator graph() function from a mapname and a list of range values.
+
:param raster_map: Name of the raster map to create the graph function for
:type raster_map: str
:param values: tuple with break points to build the graph with
:type values: tuple
- :param min: Minimum value of the input raster map
- :type min: float
- :param max: Maximum value of the input raster map
- :type max: float
+ :param min_value: Minimum value of the input raster map
+ :type min_value: float | str
+ :param max_value: Maximum value of the input raster map
+ :type max_value: float | str
:param epsilon: minimal value to dstigush break boundaries
:type epsilon: float
:return: A string with a graph() function for r.mapcalc
@@ -196,8 +209,39 @@ def create_graph(
return f"graph({raster_map},{','.join(value_list)})"
-def range_dict_from_db(grass_options):
- """Read user defined breaks for each ID from DB
+def range_dict_from_map_combination(grass_options: dict, range_scale: int) -> dict:
+ """Create ranges from the combination of location and continuous_subdivision_map.
+
+ :param options: The GRASS GIS options dict from g.parser
+ :type options: dict
+ :param range_scale: Integer to scale the locations map with
+ :type range_scale: int
+ :return: A dict with class breaks per ID
+ """
+ stats = Module(
+ "r.stats",
+ flags="cin",
+ input=[grass_options["locations"], grass_options["continuous_subdivision_map"]],
+ stdout_=PIPE,
+ )
+
+ map_stats = np.genfromtxt(
+ stats.outputs.stdout.split("\n"),
+ delimiter=" ",
+ names=["cat", "sub_cat", "n"],
+ dtype=None,
+ encoding="UTF8",
+ )
+ map_stats = {
+ int(m["cat"] * range_scale + m["sub_cat"]): (int(m["sub_cat"]), int(m["n"]))
+ for m in map_stats
+ }
+ return dict(sorted(map_stats.items()))
+
+
+def range_dict_from_db(grass_options: dict) -> dict:
+ """Read user defined breaks for each ID from DB.
+
:param options: The GRASS GIS options dict from g.parser
:type options: dict
:return: A dict with class breaks per ID
@@ -205,7 +249,7 @@ def range_dict_from_db(grass_options):
# Get data from DB
conn = pyodbc.connect(
grass_options["locations_url"].replace("MSSQL:", "")
- + f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}"
+ + f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}",
)
cursor = conn.cursor()
res = cursor.execute(
@@ -213,7 +257,7 @@ def range_dict_from_db(grass_options):
FROM {grass_options["layer"]}
WHERE domain_id = {grass_options["domain_id"]} AND parent_id IS NOT NULL
ORDER BY parent_id, minimum_elevation_m, maximum_elevation_m
-;"""
+;""",
)
range_table = res.fetchall()
conn.close()
@@ -223,14 +267,16 @@ def range_dict_from_db(grass_options):
if row[0] in range_dict:
range_dict[row[0]].append(row[1:])
else:
- range_dict[row[0]] = [(row[1:])]
+ range_dict[row[0]] = [row[1:]]
return range_dict
-def range_dict_from_statistics(grass_options):
- """Compute breaks from statistics of the continuous_subdivision_map
- according to user given method, class_number and rounding precision
+def range_dict_from_statistics(grass_options: dict) -> dict:
+ """Compute breaks from statistics of the continuous_subdivision_map.
+
+ Breaks are computed according to user given method, class_number and
+ rounding precision.
:param options: The GRASS GIS options dict from g.parser
:type options: dict
:return: A dict with class breaks per ID
@@ -240,8 +286,9 @@ def range_dict_from_statistics(grass_options):
if grass_options["method"] == "percentile":
univar_percentile = list(
np.round(
- np.linspace(0, 100, num=class_number + 1, endpoint=True), 0
- ).astype(int)
+ np.linspace(0, 100, num=class_number + 1, endpoint=True),
+ 0,
+ ).astype(int),
)
univar_flags = "et"
@@ -280,10 +327,13 @@ def range_dict_from_statistics(grass_options):
int(s["zone"]): list(
round_to_closest(
np.linspace(
- s["min"], s["max"], num=class_number + 1, endpoint=True
+ s["min"],
+ s["max"],
+ num=class_number + 1,
+ endpoint=True,
),
float(grass_options["round_to_closest"]),
- )
+ ),
)
for s in univar_stats
if not np.isnan(s["max"])
@@ -298,8 +348,8 @@ def range_dict_from_statistics(grass_options):
}
-def main():
- """Do the main work"""
+def main() -> None:
+ """Do the main work."""
locations = options["locations"]
where = f"domain_id = {options['domain_id']} AND parent_id IS NULL"
continuous_subdivision_map = options["continuous_subdivision_map"]
@@ -309,11 +359,13 @@ def main():
if "KEEPASS_PWD" not in os.environ:
gs.fatal(
_(
- "The KeePass password needs to be provided through environment variable 'KEEPASS_PWD'"
- )
+ "The KeePass password needs to be provided through environment variable 'KEEPASS_PWD'",
+ ),
)
gs.verbose(
- _("Trying to get keepass entries from <{}>").format(options["keepass_file"])
+ _("Trying to get keepass entries from <{}>").format(
+ options["keepass_file"],
+ ),
)
keepass_to_env(
options["keepass_file"],
@@ -328,7 +380,6 @@ def main():
Module(
"v.in.ogr",
flags="o",
- # Until GDAL 3.6 is available UID and PWD have to be provided in the connection string
input=options["locations_url"] + f";Tables={schema}.{layer}",
layer=layer if schema == "dbo" else f"{schema}.{layer}",
where=where + " AND " + options["where"] if options["where"] else where,
@@ -351,7 +402,7 @@ def main():
"v.to.rast",
input=locations,
output=locations,
- type="area",
+ type=options["type"],
use="attr",
attribute_column="id",
label_column="name",
@@ -360,19 +411,53 @@ def main():
gs.raster.raster_history(locations, overwrite=True)
if options["locations_subunits"]:
- if options["method"] == "database":
- range_dict = range_dict_from_db(options)
- elif options["method"] in ["percentile", "linear"]:
- range_dict = range_dict_from_statistics(options)
-
- create_sub_graph = partial(
- create_graph,
- continuous_subdivision_map,
- min_value=-9999,
- max_value=9999,
- epsilon=0.000001,
- )
- mc_expression = f"""{options["locations_subunits"]}=int(graph({locations},{", ".join(f"{cat}, int({create_sub_graph(values)})" for cat, values in range_dict.items())}))"""
+ if options["method"] == "map_units":
+ map_stats = gs.parse_key_val(
+ Module(
+ "r.info",
+ flags="gr",
+ map=options["locations_subunits"],
+ stdout_=PIPE,
+ ).outputs.stdout,
+ )
+ if not map_stats["max"]:
+ map_stats = json.loads(
+ Module(
+ "r.univar",
+ map=options["locations_subunits"],
+ format="json",
+ stdout_=PIPE,
+ ).outputs.stdout,
+ )
+ range_scale = int(
+ np.pow(
+ 10,
+ len(
+ str(
+ int(np.min([0, np.floor(float(map_stats["min"]))]))
+ + int(np.ceil(float(map_stats["max"]))),
+ ),
+ ),
+ ),
+ )
+ mc_expression = (
+ f"{locations} * {range_scale} + int({options['locations_subunits']})"
+ )
+ range_dict = range_dict_from_map_combination(options, range_scale)
+ else:
+ if options["method"] == "database":
+ range_dict = range_dict_from_db(options)
+ elif options["method"] in {"percentile", "linear"}:
+ range_dict = range_dict_from_statistics(options)
+
+ create_sub_graph = partial(
+ create_graph,
+ continuous_subdivision_map,
+ min_value=-9999,
+ max_value=9999,
+ epsilon=0.000001,
+ )
+ mc_expression = f"""{options["locations_subunits"]}=int(graph({locations},{", ".join(f"{cat}, int({create_sub_graph(values)})" for cat, values in range_dict.items())}))"""
if int(options["nprocs"]) > 1:
Module(
@@ -393,7 +478,7 @@ def main():
[
"\n".join([f"{row[0]}\t{row[1]} - {row[2]}" for row in val])
for val in range_dict.values()
- ]
+ ],
),
)
gs.raster.raster_history(options["locations_subunits"], overwrite=True)
@@ -427,7 +512,7 @@ def main():
# Load geometries of subunits to MS SQL
conn = pyodbc.connect(
options["locations_url"].replace("MSSQL:", "")
- + f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}"
+ + f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}",
)
cursor = conn.cursor()
cursor.executemany(