Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 29 additions & 2 deletions src/raster/r.timeseries.locations/r.timeseries.locations.html
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,20 @@ <h2>DESCRIPTION</h2>
coherent sub-units based on a continuous raster map and break values for
subdivisions. It is written for locations in NVEs time series database.

The tool supports the following <b>method</b>s to create subdivision maps:
<ul>
<li><em>percentile</em>: creates sub-units from the <b>continuous_subdivision_map</b>
using user-given <b>percentiles</b> for intervals.</li>
<li><em>linear</em>: creates sub-units from the <b>continuous_subdivision_map</b>
using linear scaled intervals.</li>
<li><em>database</em>: creates sub-units from intervals that are pre-defined in
the database using the <b>continuous_subdivision_map</b>.</li>
<li><em>map_units</em>: creates sub-units from the unique categories of the
<b>continuous_subdivision_map</b>. The <b>continuous_subdivision_map</b>
should thus contain integer values. Floating point values will be
rounded according to quantization rules in <em>r.stats</em></li>
</ul>

<h2>EXAMPLES</h2>

<h3>Subdivide watersheds using altitude bands and breaks from DB</h3>
Expand All @@ -17,13 +31,26 @@ <h3>Subdivide watersheds using altitude bands and breaks from DB</h3>
--o --v
</pre></div>

<h3>Subdivide glacier lines using altitude pixels</h3>

<div class="code"><pre>
r.timeseries.locations locations_url="MSSQL:driver=FreeTDS;server=tst.sql.server.com;port=1433;database=testdatabase" \
where="parent_id IS NULL" locations=glacier_lines continuous_subdivision_map="DTM" \
round_to_closest=1 method=database domain_id=80 layer="dbo.region" type="line" \
locations_subunits="glacier_lines_altitude" nprocs=20 \
keepass_file="/hdata/fjernanalyse3/CopernicusUtviklingOgTest/data/actinia_secrets.kdbx" keepass_title=mssql \
--o --v
</pre></div>

<h2>SEE ALSO</h2>

<em>
<a href="v.in.ogr.html">v.in.ogr</a>,
<a href="r.mapcalc.html">r.mapcalc</a>,
<a href="r.stats.html">r.stats</a>,
<a href="r.to.vect.html">r.to.vect</a>,
<a href="r.univar.html">r.univar</a>,
<a href="v.in.ogr.html">v.in.ogr</a>,
<a href="v.to.rast.html">v.to.rast</a>,
<a href="r.mapcalc.html">r.mapcalc</a>,
</em>

<h2>AUTHOR</h2>
Expand Down
191 changes: 138 additions & 53 deletions src/raster/r.timeseries.locations/r.timeseries.locations.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
#!/usr/bin/env python3

"""
MODULE: r.timeseries.locations
"""MODULE: r.timeseries.locations
AUTHOR(S): Stefan Blumentrath
PURPOSE: Manage locations for time series in NVE time series DB
COPYRIGHT: (C) 2023 by Stefan Blumentrath
PURPOSE: Manage locations for time series in NVE TimeSeries DB
COPYRIGHT: (C) 2023-2025 by NVE, Stefan Blumentrath

This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
Expand All @@ -19,7 +18,7 @@
"""

# %module
# % description: Manage locations for time series in NVE time series DB
# % description: Manage locations for time series in NVE TimeSeries DB
# % keyword: NVE
# % keyword: import
# % keyword: export
Expand Down Expand Up @@ -60,6 +59,9 @@
# % description: Layer name of the OGR data source to import
# %end

# %option G_OPT_V_TYPE
# %end

# %option G_OPT_DB_WHERE
# %end

Expand All @@ -74,7 +76,7 @@

# %option
# % key: method
# % options: percentile,linear,database
# % options: percentile,linear,database,map_units
# % required: no
# % multiple: no
# % description: Method for generating subunits
Expand Down Expand Up @@ -141,6 +143,7 @@
# % collective: locations_subunits,method,continuous_subdivision_map
# %end

import json
import os
import sys
from functools import partial
Expand All @@ -150,36 +153,46 @@
import numpy as np


def round_to_closest(x, y):
"""Round value x to closest y"""
def round_to_closest(x: float | np.array, y: float) -> tuple:
"""Round value x to closest y."""
if not y:
return x
return tuple(np.round(np.array(x).astype(float) / y, 0).astype(type(y)) * y)


def keepass_to_env(
keepass_file, keepass_pwd, title, username_var, password_var, first=True
):
"""Write KeePass entries into environment variables"""

keepass_file: str,
keepass_pwd: str,
title: str,
username_var: str,
password_var: str,
*,
first: bool = True,
) -> None:
"""Write KeePass entries into environment variables."""
kp = PyKeePass(keepass_file, password=keepass_pwd)
entry = kp.find_entries(title=title, first=first)
os.environ[username_var] = entry.username
os.environ[password_var] = entry.password


def create_graph(
raster_map, values, min_value="-Inf", max_value="Inf", epsilon=0.00000001
):
"""Create a map calculator graph() function from a mapname and a list of range values
raster_map: str,
values: tuple[int],
min_value: float | str = "-Inf",
max_value: float | str = "Inf",
epsilon: float = 0.00000001,
) -> str:
"""Create a map calculator graph() function from a mapname and a list of range values.

:param raster_map: Name of the raster map to create the graph function for
:type raster_map: str
:param values: tuple with break points to build the graph with
:type values: tuple
:param min: Minimum value of the input raster map
:type min: float
:param max: Maximum value of the input raster map
:type max: float
:param min_value: Minimum value of the input raster map
:type min_value: float | str
:param max_value: Maximum value of the input raster map
:type max_value: float | str
:param epsilon: minimal value to dstigush break boundaries
:type epsilon: float
:return: A string with a graph() function for r.mapcalc
Expand All @@ -196,24 +209,55 @@ def create_graph(
return f"graph({raster_map},{','.join(value_list)})"


def range_dict_from_db(grass_options):
"""Read user defined breaks for each ID from DB
def range_dict_from_map_combination(grass_options: dict, range_scale: int) -> dict:
"""Create ranges from the combination of location and continuous_subdivision_map.

:param options: The GRASS GIS options dict from g.parser
:type options: dict
:param range_scale: Integer to scale the locations map with
:type range_scale: int
:return: A dict with class breaks per ID
"""
stats = Module(
"r.stats",
flags="cin",
input=[grass_options["locations"], grass_options["continuous_subdivision_map"]],
stdout_=PIPE,
)

map_stats = np.genfromtxt(
stats.outputs.stdout.split("\n"),
delimiter=" ",
names=["cat", "sub_cat", "n"],
dtype=None,
encoding="UTF8",
)
map_stats = {
int(m["cat"] * range_scale + m["sub_cat"]): (int(m["sub_cat"]), int(m["n"]))
for m in map_stats
}
return dict(sorted(map_stats.items()))


def range_dict_from_db(grass_options: dict) -> dict:
"""Read user defined breaks for each ID from DB.

:param options: The GRASS GIS options dict from g.parser
:type options: dict
:return: A dict with class breaks per ID
"""
# Get data from DB
conn = pyodbc.connect(
grass_options["locations_url"].replace("MSSQL:", "")
+ f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}"
+ f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}",
)
cursor = conn.cursor()
res = cursor.execute(
f"""SELECT parent_id, id, minimum_elevation_m, maximum_elevation_m
FROM {grass_options["layer"]}
WHERE domain_id = {grass_options["domain_id"]} AND parent_id IS NOT NULL
ORDER BY parent_id, minimum_elevation_m, maximum_elevation_m
;"""
;""",
)
range_table = res.fetchall()
conn.close()
Expand All @@ -223,14 +267,16 @@ def range_dict_from_db(grass_options):
if row[0] in range_dict:
range_dict[row[0]].append(row[1:])
else:
range_dict[row[0]] = [(row[1:])]
range_dict[row[0]] = [row[1:]]

return range_dict


def range_dict_from_statistics(grass_options):
"""Compute breaks from statistics of the continuous_subdivision_map
according to user given method, class_number and rounding precision
def range_dict_from_statistics(grass_options: dict) -> dict:
"""Compute breaks from statistics of the continuous_subdivision_map.

Breaks are computed according to user given method, class_number and
rounding precision.
:param options: The GRASS GIS options dict from g.parser
:type options: dict
:return: A dict with class breaks per ID
Expand All @@ -240,8 +286,9 @@ def range_dict_from_statistics(grass_options):
if grass_options["method"] == "percentile":
univar_percentile = list(
np.round(
np.linspace(0, 100, num=class_number + 1, endpoint=True), 0
).astype(int)
np.linspace(0, 100, num=class_number + 1, endpoint=True),
0,
).astype(int),
)
univar_flags = "et"

Expand Down Expand Up @@ -280,10 +327,13 @@ def range_dict_from_statistics(grass_options):
int(s["zone"]): list(
round_to_closest(
np.linspace(
s["min"], s["max"], num=class_number + 1, endpoint=True
s["min"],
s["max"],
num=class_number + 1,
endpoint=True,
),
float(grass_options["round_to_closest"]),
)
),
)
for s in univar_stats
if not np.isnan(s["max"])
Expand All @@ -298,8 +348,8 @@ def range_dict_from_statistics(grass_options):
}


def main():
"""Do the main work"""
def main() -> None:
"""Do the main work."""
locations = options["locations"]
where = f"domain_id = {options['domain_id']} AND parent_id IS NULL"
continuous_subdivision_map = options["continuous_subdivision_map"]
Expand All @@ -309,11 +359,13 @@ def main():
if "KEEPASS_PWD" not in os.environ:
gs.fatal(
_(
"The KeePass password needs to be provided through environment variable 'KEEPASS_PWD'"
)
"The KeePass password needs to be provided through environment variable 'KEEPASS_PWD'",
),
)
gs.verbose(
_("Trying to get keepass entries from <{}>").format(options["keepass_file"])
_("Trying to get keepass entries from <{}>").format(
options["keepass_file"],
),
)
keepass_to_env(
options["keepass_file"],
Expand All @@ -328,7 +380,6 @@ def main():
Module(
"v.in.ogr",
flags="o",
# Until GDAL 3.6 is available UID and PWD have to be provided in the connection string
input=options["locations_url"] + f";Tables={schema}.{layer}",
layer=layer if schema == "dbo" else f"{schema}.{layer}",
where=where + " AND " + options["where"] if options["where"] else where,
Expand All @@ -351,7 +402,7 @@ def main():
"v.to.rast",
input=locations,
output=locations,
type="area",
type=options["type"],
use="attr",
attribute_column="id",
label_column="name",
Expand All @@ -360,19 +411,53 @@ def main():
gs.raster.raster_history(locations, overwrite=True)

if options["locations_subunits"]:
if options["method"] == "database":
range_dict = range_dict_from_db(options)
elif options["method"] in ["percentile", "linear"]:
range_dict = range_dict_from_statistics(options)

create_sub_graph = partial(
create_graph,
continuous_subdivision_map,
min_value=-9999,
max_value=9999,
epsilon=0.000001,
)
mc_expression = f"""{options["locations_subunits"]}=int(graph({locations},{", ".join(f"{cat}, int({create_sub_graph(values)})" for cat, values in range_dict.items())}))"""
if options["method"] == "map_units":
map_stats = gs.parse_key_val(
Module(
"r.info",
flags="gr",
map=options["locations_subunits"],
stdout_=PIPE,
).outputs.stdout,
)
if not map_stats["max"]:
map_stats = json.loads(
Module(
"r.univar",
map=options["locations_subunits"],
format="json",
stdout_=PIPE,
).outputs.stdout,
)
range_scale = int(
np.pow(
10,
len(
str(
int(np.min([0, np.floor(float(map_stats["min"]))]))
+ int(np.ceil(float(map_stats["max"]))),
),
),
),
)
mc_expression = (
f"{locations} * {range_scale} + int({options['locations_subunits']})"
)
range_dict = range_dict_from_map_combination(options, range_scale)
else:
if options["method"] == "database":
range_dict = range_dict_from_db(options)
elif options["method"] in {"percentile", "linear"}:
range_dict = range_dict_from_statistics(options)

create_sub_graph = partial(
create_graph,
continuous_subdivision_map,
min_value=-9999,
max_value=9999,
epsilon=0.000001,
)
mc_expression = f"""{options["locations_subunits"]}=int(graph({locations},{", ".join(f"{cat}, int({create_sub_graph(values)})" for cat, values in range_dict.items())}))"""

if int(options["nprocs"]) > 1:
Module(
Expand All @@ -393,7 +478,7 @@ def main():
[
"\n".join([f"{row[0]}\t{row[1]} - {row[2]}" for row in val])
for val in range_dict.values()
]
],
),
)
gs.raster.raster_history(options["locations_subunits"], overwrite=True)
Expand Down Expand Up @@ -427,7 +512,7 @@ def main():
# Load geometries of subunits to MS SQL
conn = pyodbc.connect(
options["locations_url"].replace("MSSQL:", "")
+ f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}"
+ f";UID={os.environ.get('MSSQLSPATIAL_UID')};PWD={os.environ.get('MSSQLSPATIAL_PWD')}",
)
cursor = conn.cursor()
cursor.executemany(
Expand Down
Loading